diff --git a/.cproject b/.cproject deleted file mode 100644 index 10b16f91c..000000000 --- a/.cproject +++ /dev/null @@ -1,3563 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - make - -j5 - SmartProjectionFactorExample_kitti_nonbatch.run - true - true - true - - - make - -j5 - SmartProjectionFactorExample_kitti.run - true - true - true - - - make - -j5 - SmartProjectionFactorTesting.run - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - tests/testSPQRUtil.run - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j4 - testSimilarity3.run - true - true - true - - - make - -j5 - testInvDepthCamera3.run - true - true - true - - - make - -j5 - testTriangulation.run - true - true - true - - - make - -j4 - testEvent.run - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -k - check - true - false - true - - - make - - tests/testBayesTree.run - true - false - true - - - make - - testBinaryBayesNet.run - true - false - true - - - make - -j2 - testFactorGraph.run - true - true - true - - - make - -j2 - testISAM.run - true - true - true - - - make - -j2 - testJunctionTree.run - true - true - true - - - make - -j2 - testKey.run - true - true - true - - - make - -j2 - testOrdering.run - true - true - true - - - make - - testSymbolicBayesNet.run - true - false - true - - - make - - tests/testSymbolicFactor.run - true - false - true - - - make - - testSymbolicFactorGraph.run - true - false - true - - - make - -j2 - timeSymbolMaps.run - true - true - true - - - make - - tests/testBayesTree - true - false - true - - - make - -j2 - tests/testPose2.run - true - true - true - - - make - -j2 - tests/testPose3.run - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j5 - testAHRS.run - true - true - true - - - make - -j5 - testInvDepthFactor3.run - true - true - true - - - make - -j5 - testMultiProjectionFactor.run - true - true - true - - - make - -j5 - testPoseRotationPrior.run - true - true - true - - - make - -j5 - testPoseTranslationPrior.run - true - true - true - - - make - -j5 - testReferenceFrameFactor.run - true - true - true - - - make - -j5 - testSmartProjectionFactor.run - true - true - true - - - make - -j5 - testTSAMFactors.run - true - true - true - - - make - -j5 - testInertialNavFactor_GlobalVelocity.run - true - true - true - - - make - -j5 - testInvDepthFactorVariant3.run - true - true - true - - - make - -j5 - testInvDepthFactorVariant1.run - true - true - true - - - make - -j5 - testEquivInertialNavFactor_GlobalVel.run - true - true - true - - - make - -j5 - testInvDepthFactorVariant2.run - true - true - true - - - make - -j5 - testRelativeElevationFactor.run - true - true - true - - - make - -j5 - testPoseBetweenFactor.run - true - true - true - - - make - -j5 - testGaussMarkov1stOrderFactor.run - true - true - true - - - make - -j4 - testSmartStereoProjectionPoseFactor.run - true - true - true - - - make - -j4 - testTOAFactor.run - true - true - true - - - make - -j5 - testGaussianFactorGraphUnordered.run - true - true - true - - - make - -j5 - testGaussianBayesNetUnordered.run - true - true - true - - - make - -j5 - testGaussianConditional.run - true - true - true - - - make - -j5 - testGaussianDensity.run - true - true - true - - - make - -j5 - testGaussianJunctionTree.run - true - true - true - - - make - -j5 - testHessianFactor.run - true - true - true - - - make - -j5 - testJacobianFactor.run - true - true - true - - - make - -j5 - testKalmanFilter.run - true - true - true - - - make - -j5 - testNoiseModel.run - true - true - true - - - make - -j5 - testSampler.run - true - true - true - - - make - -j5 - testSerializationLinear.run - true - true - true - - - make - -j5 - testVectorValues.run - true - true - true - - - make - -j5 - testGaussianBayesTree.run - true - true - true - - - make - -j5 - testCombinedImuFactor.run - true - true - true - - - make - -j5 - testImuFactor.run - true - true - true - - - make - -j5 - testAHRSFactor.run - true - true - true - - - make - -j8 - testAttitudeFactor.run - true - true - true - - - make - -j5 - clean - true - true - true - - - make - -j5 - all - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - testGaussianConditional.run - true - true - true - - - make - -j2 - testGaussianFactor.run - true - true - true - - - make - -j2 - timeGaussianFactor.run - true - true - true - - - make - -j2 - timeVectorConfig.run - true - true - true - - - make - -j2 - testVectorBTree.run - true - true - true - - - make - -j2 - testVectorMap.run - true - true - true - - - make - -j2 - testNoiseModel.run - true - true - true - - - make - -j2 - testBayesNetPreconditioner.run - true - true - true - - - make - - testErrors.run - true - false - true - - - make - -j2 - check - true - true - true - - - make - -j2 - tests/testGaussianJunctionTree.run - true - true - true - - - make - -j2 - tests/testGaussianFactor.run - true - true - true - - - make - -j2 - tests/testGaussianConditional.run - true - true - true - - - make - -j2 - tests/timeSLAMlike.run - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j2 - testBTree.run - true - true - true - - - make - -j2 - testDSF.run - true - true - true - - - make - -j2 - testDSFVector.run - true - true - true - - - make - -j2 - testMatrix.run - true - true - true - - - make - -j2 - testSPQRUtil.run - true - true - true - - - make - -j2 - testVector.run - true - true - true - - - make - -j2 - timeMatrix.run - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - testClusterTree.run - true - true - true - - - make - -j2 - testJunctionTree.run - true - true - true - - - make - -j2 - tests/testEliminationTree.run - true - true - true - - - make - -j2 - tests/testSymbolicFactor.run - true - true - true - - - make - -j2 - tests/testVariableSlots.run - true - true - true - - - make - -j2 - tests/testConditional.run - true - true - true - - - make - -j2 - tests/testSymbolicFactorGraph.run - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - testNonlinearConstraint.run - true - true - true - - - make - -j2 - testLieConfig.run - true - true - true - - - make - -j2 - testConstraintOptimizer.run - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j2 - testPlanarSLAM.run - true - true - true - - - make - -j2 - testPose2Config.run - true - true - true - - - make - -j2 - testPose2Factor.run - true - true - true - - - make - -j2 - testPose2Prior.run - true - true - true - - - make - -j2 - testPose2SLAM.run - true - true - true - - - make - -j2 - testPose3Config.run - true - true - true - - - make - -j2 - testPose3SLAM.run - true - true - true - - - make - testSimulated2DOriented.run - true - false - true - - - make - -j2 - testVSLAMConfig.run - true - true - true - - - make - -j2 - testVSLAMFactor.run - true - true - true - - - make - -j2 - testVSLAMGraph.run - true - true - true - - - make - -j2 - testPose3Factor.run - true - true - true - - - make - testSimulated2D.run - true - false - true - - - make - testSimulated3D.run - true - false - true - - - make - -j2 - tests/testGaussianISAM2 - true - true - true - - - make - -j5 - testBTree.run - true - true - true - - - make - -j5 - testDSF.run - true - true - true - - - make - -j5 - testDSFMap.run - true - true - true - - - make - -j5 - testDSFVector.run - true - true - true - - - make - -j5 - testFixedVector.run - true - true - true - - - make - -j5 - testEliminationTree.run - true - true - true - - - make - -j5 - testInference.run - true - true - true - - - make - -j5 - testKey.run - true - true - true - - - make - -j1 - testSymbolicBayesTree.run - true - false - true - - - make - -j1 - testSymbolicSequentialSolver.run - true - false - true - - - make - -j4 - testLabeledSymbol.run - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - tests/testLieConfig.run - true - true - true - - - make - -j3 - install - true - false - true - - - make - -j2 - clean - true - true - true - - - make - -j1 - check - true - false - true - - - make - -j5 - all - true - true - true - - - cmake - .. - true - false - true - - - make - -j5 - gtsam-shared - true - true - true - - - make - -j5 - gtsam-static - true - true - true - - - make - -j5 - timing - true - true - true - - - make - -j5 - examples - true - true - true - - - make - -j5 - VERBOSE=1 all - true - true - true - - - make - -j5 - VERBOSE=1 check - true - true - true - - - make - -j5 - check.base - true - true - true - - - make - -j5 - timing.base - true - true - true - - - make - -j2 VERBOSE=1 - check.geometry - true - false - true - - - make - -j5 - timing.geometry - true - true - true - - - make - -j2 VERBOSE=1 - check.inference - true - false - true - - - make - -j5 - timing.inference - true - true - true - - - make - -j2 VERBOSE=1 - check.linear - true - false - true - - - make - -j5 - timing.linear - true - true - true - - - make - -j2 VERBOSE=1 - check.nonlinear - true - false - true - - - make - -j5 - timing.nonlinear - true - true - true - - - make - -j2 VERBOSE=1 - check.slam - true - false - true - - - make - -j5 - timing.slam - true - true - true - - - make - -j5 - wrap_gtsam - true - true - true - - - make - VERBOSE=1 - wrap_gtsam - true - false - true - - - cpack - - -G DEB - true - false - true - - - cpack - - -G RPM - true - false - true - - - cpack - - -G TGZ - true - false - true - - - cpack - - --config CPackSourceConfig.cmake - true - false - true - - - make - -j5 - check.discrete - true - true - true - - - make - -j5 - check.discrete_unstable - true - true - true - - - make - -j5 - check.base_unstable - true - true - true - - - make - -j5 - check.dynamics_unstable - true - true - true - - - make - -j5 - check.slam_unstable - true - true - true - - - make - -j5 - check.unstable - true - true - true - - - make - -j5 - wrap_gtsam_build - true - true - true - - - make - -j5 - wrap_gtsam_unstable_build - true - true - true - - - make - -j5 - wrap_gtsam_unstable - true - true - true - - - make - -j5 - wrap - true - true - true - - - make - -j5 - wrap_gtsam_distclean - true - true - true - - - make - -j5 - wrap_gtsam_unstable_distclean - true - true - true - - - make - -j5 - doc - true - true - true - - - make - -j5 - doc_clean - true - true - true - - - make - -j5 - check - true - true - true - - - make - -j5 - check.geometry_unstable - true - true - true - - - make - -j5 - check.linear_unstable - true - true - true - - - make - -j6 -j8 - gtsam_unstable-shared - true - true - true - - - make - -j6 -j8 - gtsam_unstable-static - true - true - true - - - make - -j6 -j8 - check.nonlinear_unstable - true - true - true - - - make - -j5 - check.tests - true - true - true - - - make - -j2 VERBOSE=1 - check.navigation - true - false - true - - - make - -j4 - check.sam - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j2 - install - true - true - true - - - make - -j2 - all - true - true - true - - - cmake - .. - true - false - true - - - make - -j2 - testGaussianFactor.run - true - true - true - - - make - -j5 - testCal3Bundler.run - true - true - true - - - make - -j5 - testCal3DS2.run - true - true - true - - - make - -j5 - testCalibratedCamera.run - true - true - true - - - make - -j5 - testEssentialMatrix.run - true - true - true - - - make - -j1 VERBOSE=1 - testHomography2.run - true - false - true - - - make - -j5 - testPinholeCamera.run - true - true - true - - - make - -j5 - testPoint2.run - true - true - true - - - make - -j5 - testPoint3.run - true - true - true - - - make - -j5 - testPose2.run - true - true - true - - - make - -j5 - testPose3.run - true - true - true - - - make - -j5 - testRot3M.run - true - true - true - - - make - -j5 - testSphere2.run - true - true - true - - - make - -j5 - testStereoCamera.run - true - true - true - - - make - -j5 - testCal3Unified.run - true - true - true - - - make - -j5 - testRot2.run - true - true - true - - - make - -j5 - testRot3Q.run - true - true - true - - - make - -j5 - testRot3.run - true - true - true - - - make - -j4 - testSO3.run - true - true - true - - - make - -j4 - testQuaternion.run - true - true - true - - - make - -j4 - testOrientedPlane3.run - true - true - true - - - make - -j4 - testPinholePose.run - true - true - true - - - make - -j4 - testCyclic.run - true - true - true - - - make - -j4 - testUnit3.run - true - true - true - - - make - -j4 - testBearingRange.run - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j5 - all - true - false - true - - - make - -j5 - check - true - false - true - - - make - -j2 - all - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j2 - clean all - true - true - true - - - make - -j1 - testDiscreteBayesTree.run - true - false - true - - - make - -j5 - testDiscreteConditional.run - true - true - true - - - make - -j5 - testDiscreteFactor.run - true - true - true - - - make - -j5 - testDiscreteFactorGraph.run - true - true - true - - - make - -j5 - testDiscreteMarginals.run - true - true - true - - - make - -j5 - testIMUSystem.run - true - true - true - - - make - -j5 - testPoseRTV.run - true - true - true - - - make - -j5 - testVelocityConstraint.run - true - true - true - - - make - -j5 - testVelocityConstraint3.run - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - timeCalibratedCamera.run - true - true - true - - - make - -j2 - timeRot3.run - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j5 - testWrap.run - true - true - true - - - make - -j5 - testSpirit.run - true - true - true - - - make - -j5 - check.wrap - true - true - true - - - make - -j5 - testMethod.run - true - true - true - - - make - -j5 - testClass.run - true - true - true - - - make - -j4 - testType.run - true - true - true - - - make - -j4 - testArgument.run - true - true - true - - - make - -j4 - testReturnValue.run - true - true - true - - - make - -j4 - testTemplate.run - true - true - true - - - make - -j4 - testGlobalFunction.run - true - true - true - - - make - -j5 - schedulingExample.run - true - true - true - - - make - -j5 - schedulingQuals12.run - true - true - true - - - make - -j5 - schedulingQuals13.run - true - true - true - - - make - -j5 - testCSP.run - true - true - true - - - make - -j5 - testScheduler.run - true - true - true - - - make - -j5 - testSudoku.run - true - true - true - - - make - -j2 - vSFMexample.run - true - true - true - - - make - -j2 - testVSLAMGraph - true - true - true - - - make - -j5 - testMatrix.run - true - true - true - - - make - -j5 - testVector.run - true - true - true - - - make - -j5 - testNumericalDerivative.run - true - true - true - - - make - -j5 - testVerticalBlockMatrix.run - true - true - true - - - make - -j4 - testOptionalJacobian.run - true - true - true - - - make - -j4 - testGroup.run - true - true - true - - - make - -j5 - check.tests - true - true - true - - - make - -j2 - timeGaussianFactorGraph.run - true - true - true - - - make - -j5 - testMarginals.run - true - true - true - - - make - -j5 - testGaussianISAM2.run - true - true - true - - - make - -j5 - testSymbolicFactorGraphB.run - true - true - true - - - make - -j2 - timeSequentialOnDataset.run - true - true - true - - - make - -j5 - testGradientDescentOptimizer.run - true - true - true - - - make - -j2 - testGaussianFactor.run - true - true - true - - - make - -j2 - testNonlinearOptimizer.run - true - true - true - - - make - -j2 - testGaussianBayesNet.run - true - true - true - - - make - -j2 - testNonlinearISAM.run - true - true - true - - - make - -j2 - testNonlinearEquality.run - true - true - true - - - make - -j2 - testExtendedKalmanFilter.run - true - true - true - - - make - -j5 - timing.tests - true - true - true - - - make - -j5 - testNonlinearFactor.run - true - true - true - - - make - -j5 - clean - true - true - true - - - make - -j5 - testGaussianJunctionTreeB.run - true - true - true - - - make - - testGraph.run - true - false - true - - - make - - testJunctionTree.run - true - false - true - - - make - - testSymbolicBayesNetB.run - true - false - true - - - make - -j5 - testGaussianISAM.run - true - true - true - - - make - -j5 - testDoglegOptimizer.run - true - true - true - - - make - -j5 - testNonlinearFactorGraph.run - true - true - true - - - make - -j5 - testIterative.run - true - true - true - - - make - -j5 - testSubgraphSolver.run - true - true - true - - - make - -j5 - testGaussianFactorGraphB.run - true - true - true - - - make - -j5 - testSummarization.run - true - true - true - - - make - -j5 - testManifold.run - true - true - true - - - make - -j4 - testLie.run - true - true - true - - - make - -j4 - testSerializationSLAM.run - true - true - true - - - make - -j5 - testParticleFactor.run - true - true - true - - - make - -j2 - testGaussianFactor.run - true - true - true - - - make - -j2 - install - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j5 - testAntiFactor.run - true - true - true - - - make - -j5 - testPriorFactor.run - true - true - true - - - make - -j5 - testDataset.run - true - true - true - - - make - -j5 - testEssentialMatrixFactor.run - true - true - true - - - make - -j5 - testGeneralSFMFactor_Cal3Bundler.run - true - true - true - - - make - -j5 - testGeneralSFMFactor.run - true - true - true - - - make - -j5 - testProjectionFactor.run - true - true - true - - - make - -j5 - testRotateFactor.run - true - true - true - - - make - -j5 - testPoseRotationPrior.run - true - true - true - - - make - -j5 - testImplicitSchurFactor.run - true - true - true - - - make - -j4 - testOrientedPlane3Factor.run - true - true - true - - - make - -j4 - testSmartProjectionPoseFactor.run - true - true - true - - - make - -j4 - testInitializePose3.run - true - true - true - - - make - -j2 - SimpleRotation.run - true - true - true - - - make - -j5 - CameraResectioning.run - true - true - true - - - make - -j5 - PlanarSLAMExample.run - true - true - true - - - make - -j2 - all - true - true - true - - - make - -j2 - easyPoint2KalmanFilter.run - true - true - true - - - make - -j2 - elaboratePoint2KalmanFilter.run - true - true - true - - - make - -j5 - Pose2SLAMExample.run - true - true - true - - - make - -j2 - Pose2SLAMwSPCG_easy.run - true - true - true - - - make - -j5 - UGM_small.run - true - true - true - - - make - -j5 - LocalizationExample.run - true - true - true - - - make - -j5 - OdometryExample.run - true - true - true - - - make - -j5 - RangeISAMExample_plaza2.run - true - true - true - - - make - -j5 - SelfCalibrationExample.run - true - true - true - - - make - -j5 - SFMExample.run - true - true - true - - - make - -j5 - VisualISAMExample.run - true - true - true - - - make - -j5 - VisualISAM2Example.run - true - true - true - - - make - -j5 - Pose2SLAMExample_graphviz.run - true - true - true - - - make - -j5 - Pose2SLAMExample_graph.run - true - true - true - - - make - -j5 - SFMExample_bal.run - true - true - true - - - make - -j5 - Pose2SLAMExample_lago.run - true - true - true - - - make - -j5 - Pose2SLAMExample_g2o.run - true - true - true - - - make - -j5 - SFMExample_SmartFactor.run - true - true - true - - - make - -j4 - Pose2SLAMExampleExpressions.run - true - true - true - - - make - -j4 - SFMExampleExpressions.run - true - true - true - - - make - -j4 - SFMExampleExpressions_bal.run - true - true - true - - - make - -j5 - testLago.run - true - true - true - - - make - -j5 - testLinearContainerFactor.run - true - true - true - - - make - -j5 - testOrdering.run - true - true - true - - - make - -j5 - testValues.run - true - true - true - - - make - -j5 - testWhiteNoiseFactor.run - true - true - true - - - make - -j4 - testExpression.run - true - true - true - - - make - -j4 - testAdaptAutoDiff.run - true - true - true - - - make - -j4 - testCallRecord.run - true - true - true - - - make - -j4 - testExpressionFactor.run - true - true - true - - - make - -j4 - testExecutionTrace.run - true - true - true - - - make - -j4 - testSerializationNonlinear.run - true - true - true - - - make - -j4 - testImuFactor.run - true - true - true - - - make - -j2 - check - true - true - true - - - make - tests/testGaussianISAM2 - true - false - true - - - make - -j5 - timeCalibratedCamera.run - true - true - true - - - make - -j5 - timePinholeCamera.run - true - true - true - - - make - -j5 - timeStereoCamera.run - true - true - true - - - make - -j5 - timeLago.run - true - true - true - - - make - -j5 - timePose3.run - true - true - true - - - make - -j4 - timeAdaptAutoDiff.run - true - true - true - - - make - -j4 - timeCameraExpression.run - true - true - true - - - make - -j4 - timeOneCameraExpression.run - true - true - true - - - make - -j4 - timeSFMExpressions.run - true - true - true - - - make - -j4 - timeIncremental.run - true - true - true - - - make - -j4 - timeSchurFactors.run - true - true - true - - - make - -j4 - timeRot2.run - true - true - true - - - make - -j2 - testRot3.run - true - true - true - - - make - -j2 - testRot2.run - true - true - true - - - make - -j2 - testPose3.run - true - true - true - - - make - -j2 - timeRot3.run - true - true - true - - - make - -j2 - testPose2.run - true - true - true - - - make - -j2 - testCal3_S2.run - true - true - true - - - make - -j2 - testSimpleCamera.run - true - true - true - - - make - -j2 - testHomography2.run - true - true - true - - - make - -j2 - testCalibratedCamera.run - true - true - true - - - make - -j2 - check - true - true - true - - - make - -j2 - clean - true - true - true - - - make - -j2 - testPoint2.run - true - true - true - - - make - -j4 - testBearingFactor.run - true - true - true - - - make - -j4 - testRangeFactor.run - true - true - true - - - make - -j4 - testBearingRangeFactor.run - true - true - true - - - make - -j5 - wrap - true - true - true - - - - diff --git a/.github/scripts/python.sh b/.github/scripts/python.sh new file mode 100644 index 000000000..a71e14c97 --- /dev/null +++ b/.github/scripts/python.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +########################################################## +# Build and test the GTSAM Python wrapper. +########################################################## + +set -x -e + +# install TBB with _debug.so files +function install_tbb() +{ + TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download + TBB_VERSION=4.4.5 + TBB_DIR=tbb44_20160526oss + TBB_SAVEPATH="/tmp/tbb.tgz" + + if [ "$(uname)" == "Linux" ]; then + OS_SHORT="lin" + TBB_LIB_DIR="intel64/gcc4.4" + SUDO="sudo" + + elif [ "$(uname)" == "Darwin" ]; then + OS_SHORT="osx" + TBB_LIB_DIR="" + SUDO="" + + fi + + wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH + tar -C /tmp -xf $TBB_SAVEPATH + + TBBROOT=/tmp/$TBB_DIR + # Copy the needed files to the correct places. + # This works correctly for CI builds, instead of setting path variables. + # This is what Homebrew does to install TBB on Macs + $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/ + $SUDO cp -R $TBBROOT/include/ /usr/local/include/ + +} + +if [ -z ${PYTHON_VERSION+x} ]; then + echo "Please provide the Python version to build against!" + exit 127 +fi + +PYTHON="python${PYTHON_VERSION}" + +if [[ $(uname) == "Darwin" ]]; then + brew install wget +else + # Install a system package required by our library + sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools +fi + +PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin + +[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb + + +BUILD_PYBIND="ON" +TYPEDEF_POINTS_TO_VECTORS="ON" + +sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt + +mkdir $GITHUB_WORKSPACE/build +cd $GITHUB_WORKSPACE/build + +cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \ + -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \ + -DGTSAM_USE_QUATERNIONS=OFF \ + -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ + -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ + -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ + -DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \ + -DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \ + -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ + -DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \ + -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \ + -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install + +make -j$(nproc) install + + +cd $GITHUB_WORKSPACE/build/python +$PYTHON setup.py install --user --prefix= +cd $GITHUB_WORKSPACE/python/gtsam/tests +$PYTHON -m unittest discover diff --git a/.travis.sh b/.github/scripts/unix.sh old mode 100755 new mode 100644 similarity index 69% rename from .travis.sh rename to .github/scripts/unix.sh index 9fc09a3f8..55a8ac372 --- a/.travis.sh +++ b/.github/scripts/unix.sh @@ -1,20 +1,25 @@ #!/bin/bash +########################################################## +# Build and test GTSAM for *nix based systems. +# Specifically Linux and macOS. +########################################################## + # install TBB with _debug.so files function install_tbb() { TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download - TBB_VERSION=4.4.2 - TBB_DIR=tbb44_20151115oss + TBB_VERSION=4.4.5 + TBB_DIR=tbb44_20160526oss TBB_SAVEPATH="/tmp/tbb.tgz" - if [ "$TRAVIS_OS_NAME" == "linux" ]; then + if [ "$(uname)" == "Linux" ]; then OS_SHORT="lin" TBB_LIB_DIR="intel64/gcc4.4" SUDO="sudo" - elif [ "$TRAVIS_OS_NAME" == "osx" ]; then - OS_SHORT="lin" + elif [ "$(uname)" == "Darwin" ]; then + OS_SHORT="osx" TBB_LIB_DIR="" SUDO="" @@ -25,7 +30,7 @@ function install_tbb() TBBROOT=/tmp/$TBB_DIR # Copy the needed files to the correct places. - # This works correctly for travis builds, instead of setting path variables. + # This works correctly for CI builds, instead of setting path variables. # This is what Homebrew does to install TBB on Macs $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/ $SUDO cp -R $TBBROOT/include/ /usr/local/include/ @@ -38,15 +43,14 @@ function configure() set -e # Make sure any error makes the script to return an error code set -x # echo - SOURCE_DIR=`pwd` - BUILD_DIR=build + SOURCE_DIR=$GITHUB_WORKSPACE + BUILD_DIR=$GITHUB_WORKSPACE/build #env - git clean -fd || true rm -fr $BUILD_DIR || true mkdir $BUILD_DIR && cd $BUILD_DIR - install_tbb + [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb if [ ! -z "$GCC_VERSION" ]; then export CC=gcc-$GCC_VERSION @@ -59,11 +63,15 @@ function configure() -DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \ -DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \ -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \ - -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ -DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \ + -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \ + -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \ + -DGTSAM_ROT3_EXPMAP=${GTSAM_ROT3_EXPMAP:-ON} \ + -DGTSAM_POSE3_EXPMAP=${GTSAM_POSE3_EXPMAP:-ON} \ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=ON + -DBOOST_ROOT=$BOOST_ROOT \ + -DBoost_NO_SYSTEM_PATHS=ON \ + -DBoost_ARCHITECTURE=-x64 } @@ -71,7 +79,7 @@ function configure() function finish () { # Print ccache stats - ccache -s + [ -x "$(command -v ccache)" ] && ccache -s cd $SOURCE_DIR } @@ -111,4 +119,4 @@ case $1 in -t) test ;; -esac +esac \ No newline at end of file diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml new file mode 100644 index 000000000..be1da35bb --- /dev/null +++ b/.github/workflows/build-linux.yml @@ -0,0 +1,85 @@ +name: Linux CI + +on: [push, pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04-gcc-5, + ubuntu-18.04-gcc-9, + ubuntu-18.04-clang-9, + ] + + build_type: [Debug, Release] + build_unstable: [ON] + include: + - name: ubuntu-18.04-gcc-5 + os: ubuntu-18.04 + compiler: gcc + version: "5" + + - name: ubuntu-18.04-gcc-9 + os: ubuntu-18.04 + compiler: gcc + version: "9" + + - name: ubuntu-18.04-clang-9 + os: ubuntu-18.04 + compiler: clang + version: "9" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + # (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool + # ipv4 avoids potential timeouts because of crappy IPv6 infrastructure + # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository + # This key is not in the keystore by default for Ubuntu so we need to add it. + LLVM_KEY=15CF4D18AF4F7421 + gpg --keyserver ipv4.pool.sks-keyservers.net --recv-key $LLVM_KEY || gpg --keyserver ha.pool.sks-keyservers.net --recv-key $LLVM_KEY + gpg -a --export $LLVM_KEY | sudo apt-key add - + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy + + echo "BOOST_ROOT=$(echo $BOOST_ROOT_1_72_0)" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=$(echo $BOOST_ROOT_1_72_0/lib)" >> $GITHUB_ENV + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV + echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV + echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV + fi + - name: Check Boost version + if: runner.os == 'Linux' + run: | + echo "BOOST_ROOT = $BOOST_ROOT" + - name: Build and Test (Linux) + if: runner.os == 'Linux' + run: | + bash .github/scripts/unix.sh -t diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml new file mode 100644 index 000000000..69873980a --- /dev/null +++ b/.github/workflows/build-macos.yml @@ -0,0 +1,53 @@ +name: macOS CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + macOS-10.15-xcode-11.3.1, + ] + + build_type: [Debug, Release] + build_unstable: [ON] + include: + - name: macOS-10.15-xcode-11.3.1 + os: macOS-10.15 + compiler: xcode + version: "11.3.1" + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew tap ProfFan/robotics + brew install cmake ninja + brew install ProfFan/robotics/boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV + echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "CC=clang" >> $GITHUB_ENV + echo "CXX=clang++" >> $GITHUB_ENV + fi + - name: Build and Test (macOS) + if: runner.os == 'macOS' + run: | + bash .github/scripts/unix.sh -t diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml new file mode 100644 index 000000000..3f9a2e98a --- /dev/null +++ b/.github/workflows/build-python.yml @@ -0,0 +1,113 @@ +name: Python CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} Python ${{ matrix.python_version }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + PYTHON_VERSION: ${{ matrix.python_version }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04-gcc-5, + ubuntu-18.04-gcc-9, + ubuntu-18.04-clang-9, + macOS-10.15-xcode-11.3.1, + ubuntu-18.04-gcc-5-tbb, + ] + + build_type: [Debug, Release] + python_version: [3] + include: + - name: ubuntu-18.04-gcc-5 + os: ubuntu-18.04 + compiler: gcc + version: "5" + + - name: ubuntu-18.04-gcc-9 + os: ubuntu-18.04 + compiler: gcc + version: "9" + + - name: ubuntu-18.04-clang-9 + os: ubuntu-18.04 + compiler: clang + version: "9" + + - name: macOS-10.15-xcode-11.3.1 + os: macOS-10.15 + compiler: xcode + version: "11.3.1" + + - name: ubuntu-18.04-gcc-5-tbb + os: ubuntu-18.04 + compiler: gcc + version: "5" + flag: tbb + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + # (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool + # ipv4 avoids potential timeouts because of crappy IPv6 infrastructure + # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository + # This key is not in the keystore by default for Ubuntu so we need to add it. + LLVM_KEY=15CF4D18AF4F7421 + gpg --keyserver ipv4.pool.sks-keyservers.net --recv-key $LLVM_KEY || gpg --keyserver ha.pool.sks-keyservers.net --recv-key $LLVM_KEY + gpg -a --export $LLVM_KEY | sudo apt-key add - + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV + echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV + echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV + fi + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew tap ProfFan/robotics + brew install cmake ninja + brew install ProfFan/robotics/boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV + echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "CC=clang" >> $GITHUB_ENV + echo "CXX=clang++" >> $GITHUB_ENV + fi + - name: Set GTSAM_WITH_TBB Flag + if: matrix.flag == 'tbb' + run: | + echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV + echo "GTSAM Uses TBB" + - name: Build (Linux) + if: runner.os == 'Linux' + run: | + bash .github/scripts/python.sh + - name: Build (macOS) + if: runner.os == 'macOS' + run: | + bash .github/scripts/python.sh diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml new file mode 100644 index 000000000..532f917c1 --- /dev/null +++ b/.github/workflows/build-special.yml @@ -0,0 +1,128 @@ +name: Special Cases CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ON + + strategy: + fail-fast: false + + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: + [ + ubuntu-gcc-deprecated, + ubuntu-gcc-quaternions, + ubuntu-gcc-tbb, + ubuntu-cayleymap, + ] + + build_type: [Debug, Release] + + include: + - name: ubuntu-gcc-deprecated + os: ubuntu-18.04 + compiler: gcc + version: "9" + flag: deprecated + + - name: ubuntu-gcc-quaternions + os: ubuntu-18.04 + compiler: gcc + version: "9" + flag: quaternions + + - name: ubuntu-gcc-tbb + os: ubuntu-18.04 + compiler: gcc + version: "9" + flag: tbb + + - name: ubuntu-cayleymap + os: ubuntu-18.04 + compiler: gcc + version: "9" + flag: cayley + + steps: + - name: Checkout + uses: actions/checkout@master + + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository. + if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then + gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421 + gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add - + sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main" + fi + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy + + echo "BOOST_ROOT=$(echo $BOOST_ROOT_1_72_0)" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=$(echo $BOOST_ROOT_1_72_0/lib)" >> $GITHUB_ENV + + if [ "${{ matrix.compiler }}" = "gcc" ]; then + sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib + echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV + echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV + else + sudo apt-get install -y clang-${{ matrix.version }} g++-multilib + echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV + echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV + fi + + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + if [ "${{ matrix.compiler }}" = "gcc" ]; then + brew install gcc@${{ matrix.version }} + echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV + echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV + else + sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app + echo "CC=clang" >> $GITHUB_ENV + echo "CXX=clang++" >> $GITHUB_ENV + fi + + - name: Set Allow Deprecated Flag + if: matrix.flag == 'deprecated' + run: | + echo "GTSAM_ALLOW_DEPRECATED_SINCE_V41=ON" >> $GITHUB_ENV + echo "Allow deprecated since version 4.1" + + - name: Set Use Quaternions Flag + if: matrix.flag == 'quaternions' + run: | + echo "GTSAM_USE_QUATERNIONS=ON" >> $GITHUB_ENV + echo "Use Quaternions for rotations" + + - name: Set GTSAM_WITH_TBB Flag + if: matrix.flag == 'tbb' + run: | + echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV + echo "GTSAM Uses TBB" + + - name: Use Cayley Transform for Rot3 + if: matrix.flag == 'cayley' + run: | + echo "GTSAM_POSE3_EXPMAP=OFF" >> $GITHUB_ENV + echo "GTSAM_ROT3_EXPMAP=OFF" >> $GITHUB_ENV + echo "GTSAM Uses Cayley map for Rot3" + + - name: Build & Test + run: | + bash .github/scripts/unix.sh -t diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml new file mode 100644 index 000000000..887d41972 --- /dev/null +++ b/.github/workflows/build-windows.yml @@ -0,0 +1,78 @@ +name: Windows CI + +on: [pull_request] + +jobs: + build: + name: ${{ matrix.name }} ${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + + env: + CTEST_OUTPUT_ON_FAILURE: ON + CTEST_PARALLEL_LEVEL: 2 + CMAKE_BUILD_TYPE: ${{ matrix.build_type }} + GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + #TODO This build keeps timing out, need to understand why. + # windows-2016-cl, + windows-2019-cl, + ] + + build_type: [Debug, Release] + build_unstable: [ON] + include: + + #TODO This build keeps timing out, need to understand why. + # - name: windows-2016-cl + # os: windows-2016 + # compiler: cl + + - name: windows-2019-cl + os: windows-2019 + compiler: cl + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Windows) + if: runner.os == 'Windows' + shell: powershell + run: | + Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh') + scoop install ninja --global + if ("${{ matrix.compiler }}".StartsWith("clang")) { + scoop install llvm --global + } + if ("${{ matrix.compiler }}" -eq "gcc") { + # Chocolatey GCC is broken on the windows-2019 image. + # See: https://github.com/DaanDeMeyer/doctest/runs/231595515 + # See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413 + scoop install gcc --global + echo "CC=gcc" >> $GITHUB_ENV + echo "CXX=g++" >> $GITHUB_ENV + } elseif ("${{ matrix.compiler }}" -eq "clang") { + echo "CC=clang" >> $GITHUB_ENV + echo "CXX=clang++" >> $GITHUB_ENV + } else { + echo "CC=${{ matrix.compiler }}" >> $GITHUB_ENV + echo "CXX=${{ matrix.compiler }}" >> $GITHUB_ENV + } + # Scoop modifies the PATH so we make the modified PATH global. + echo "$env:PATH" >> $GITHUB_PATH + - name: Build (Windows) + if: runner.os == 'Windows' + run: | + cmake -E remove_directory build + echo "BOOST_ROOT_1_72_0: ${env:BOOST_ROOT_1_72_0}" + cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT_1_72_0}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT_1_72_0}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT_1_72_0}\lib" + cmake --build build --config ${{ matrix.build_type }} --target gtsam + cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable + cmake --build build --config ${{ matrix.build_type }} --target wrap + cmake --build build --config ${{ matrix.build_type }} --target check.base + cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable + cmake --build build --config ${{ matrix.build_type }} --target check.linear diff --git a/.github/workflows/trigger-python.yml b/.github/workflows/trigger-python.yml index 8fad9e7ca..1e8981d99 100644 --- a/.github/workflows/trigger-python.yml +++ b/.github/workflows/trigger-python.yml @@ -1,8 +1,11 @@ -# This triggers Cython builds on `gtsam-manylinux-build` +# This triggers Python builds on `gtsam-manylinux-build` name: Trigger Python Builds -on: push +on: + push: + branches: + - develop jobs: - triggerCython: + triggerPython: runs-on: ubuntu-latest steps: - name: Repository Dispatch @@ -10,5 +13,5 @@ jobs: with: token: ${{ secrets.PYTHON_CI_REPO_ACCESS_TOKEN }} repository: borglab/gtsam-manylinux-build - event-type: cython-wrapper + event-type: python-wrapper client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' diff --git a/.gitignore b/.gitignore index 1d89cac25..cde059767 100644 --- a/.gitignore +++ b/.gitignore @@ -9,15 +9,10 @@ *.txt.user *.txt.user.6d59f0c *.pydevproject -cython/venv -cython/gtsam.cpp -cython/gtsam.cpython-35m-darwin.so -cython/gtsam.pyx -cython/gtsam.so -cython/gtsam_wrapper.pxd .vscode .env /.vs/ /CMakeSettings.json # for QtCreator: CMakeLists.txt.user* +xcode/ diff --git a/.travis.python.sh b/.travis.python.sh deleted file mode 100644 index 1ef5799aa..000000000 --- a/.travis.python.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -set -x -e - -if [ -z ${PYTHON_VERSION+x} ]; then - echo "Please provide the Python version to build against!" - exit 127 -fi - -PYTHON="python${PYTHON_VERSION}" - -if [[ $(uname) == "Darwin" ]]; then - brew install wget -else - # Install a system package required by our library - sudo apt-get install wget libicu-dev python3-pip python3-setuptools -fi - -CURRDIR=$(pwd) - -sudo $PYTHON -m pip install -r ./cython/requirements.txt - -mkdir $CURRDIR/build -cd $CURRDIR/build - -cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \ - -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \ - -DGTSAM_USE_QUATERNIONS=OFF \ - -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ - -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \ - -DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \ - -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \ - -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF \ - -DCMAKE_INSTALL_PREFIX=$CURRDIR/../gtsam_install - -make -j$(nproc) install - -cd $CURRDIR/../gtsam_install/cython - -sudo $PYTHON setup.py install - -cd $CURRDIR/cython/gtsam/tests - -$PYTHON -m unittest discover \ No newline at end of file diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index ca6a426ea..000000000 --- a/.travis.yml +++ /dev/null @@ -1,140 +0,0 @@ -language: cpp -cache: ccache -sudo: required -dist: xenial - -addons: - apt: - sources: - - ubuntu-toolchain-r-test - - sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main' - key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' - packages: - - g++-9 - - clang-9 - - build-essential pkg-config - - cmake - - libpython-dev python-numpy - - libboost-all-dev - -# before_install: - # - if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi - -install: - - if [ "$TRAVIS_OS_NAME" == "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install ccache ; fi - - if [ "$TRAVIS_OS_NAME" == "osx" ]; then export PATH="/usr/local/opt/ccache/libexec:$PATH" ; fi - -# We first do the compile stage specified below, then the matrix expansion specified after. -stages: - - compile - - test - - special - -env: - global: - - MAKEFLAGS="-j2" - - CCACHE_SLOPPINESS=pch_defines,time_macros - -# Compile stage without building examples/tests to populate the caches. -jobs: -# -------- STAGE 1: COMPILE ----------- - include: -# on Mac, GCC - - stage: compile - os: osx - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: osx - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Mac, CLANG - - stage: compile - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, GCC - - stage: compile - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, CLANG - - stage: compile - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -b - - stage: compile - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -b -# on Linux, with deprecated ON to make sure that path still compiles/tests - - stage: special - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V4=ON - script: bash .travis.sh -b -# on Linux, with GTSAM_WITH_TBB on to make sure GTSAM still compiles/tests - - stage: special - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_WITH_TBB=ON - script: bash .travis.sh -t -# -------- STAGE 2: TESTS ----------- -# on Mac, GCC - - stage: test - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t - - stage: test - os: osx - compiler: clang - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -t - - stage: test - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t - - stage: test - os: linux - compiler: gcc - env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF - script: bash .travis.sh -t - - stage: test - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release - script: bash .travis.sh -t -# on Linux, with quaternions ON to make sure that path still compiles/tests - - stage: special - os: linux - compiler: clang - env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_USE_QUATERNIONS=ON - script: bash .travis.sh -t - - stage: special - os: linux - compiler: gcc - env: PYTHON_VERSION=3 - script: bash .travis.python.sh - - stage: special - os: osx - compiler: clang - env: PYTHON_VERSION=3 - script: bash .travis.python.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index a810ac9df..0c39089c1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -9,25 +9,23 @@ endif() # Set the version number for the library set (GTSAM_VERSION_MAJOR 4) -set (GTSAM_VERSION_MINOR 0) -set (GTSAM_VERSION_PATCH 2) +set (GTSAM_VERSION_MINOR 1) +set (GTSAM_VERSION_PATCH 0) math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}") set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}") +set (CMAKE_PROJECT_VERSION ${GTSAM_VERSION_STRING}) +set (CMAKE_PROJECT_VERSION_MAJOR ${GTSAM_VERSION_MAJOR}) +set (CMAKE_PROJECT_VERSION_MINOR ${GTSAM_VERSION_MINOR}) +set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH}) + ############################################################################### # Gather information, perform checks, set defaults -# Set the default install path to home -#set (CMAKE_INSTALL_PREFIX ${HOME} CACHE PATH "Install prefix for library") - set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(GtsamMakeConfigFile) include(GNUInstallDirs) -# Record the root dir for gtsam - needed during external builds, e.g., ROS -set(GTSAM_SOURCE_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}) -message(STATUS "GTSAM_SOURCE_ROOT_DIR: [${GTSAM_SOURCE_ROOT_DIR}]") - # Load build type flags and default to Debug mode include(GtsamBuildTypes) @@ -40,378 +38,21 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR}) message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ") endif() -# See whether gtsam_unstable is available (it will be present only if we're using a git checkout) -if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable") - set(GTSAM_UNSTABLE_AVAILABLE 1) -else() - set(GTSAM_UNSTABLE_AVAILABLE 0) -endif() - -# ---------------------------------------------------------------------------- -# Uninstall target, for "make uninstall" -# ---------------------------------------------------------------------------- -configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in" - "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" - IMMEDIATE @ONLY) - -add_custom_target(uninstall - "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake") - - -############################################################################### -# Set up options - -# Configurable Options -if(GTSAM_UNSTABLE_AVAILABLE) - option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON) -endif() -option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON) -option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) -option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." OFF) -option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." OFF) -option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) -option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON) -option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) -option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) -option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) -option(GTSAM_ALLOW_DEPRECATED_SINCE_V4 "Allow use of methods/functions deprecated in GTSAM 4" ON) -option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typedef Point2 and Point3 to Eigen::Vector equivalents" OFF) -option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) -option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) -if(NOT MSVC AND NOT XCODE_VERSION) - option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON) -endif() - -if(NOT MSVC AND NOT XCODE_VERSION) - # Set the build type to upper case for downstream use - string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER) - - # Set the GTSAM_BUILD_TAG variable. - # If build type is Release, set to blank (""), else set to the build type. - if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "RELEASE") - set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory - else() - set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}") - endif() -endif() - -# Options relating to MATLAB wrapper -# TODO: Check for matlab mex binary before handling building of binaries -option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF) -option(GTSAM_INSTALL_CYTHON_TOOLBOX "Enable/Disable installation of Cython toolbox" OFF) -option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab/cython wrap utility (necessary for matlab/cython interface)" ON) -set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of python to build the cython wrapper for (or Default)") - -# Check / set dependent variables for MATLAB wrapper -if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND NOT GTSAM_BUILD_WRAP) - message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX or GTSAM_INSTALL_CYTHON_TOOLBOX is enabled, please also enable GTSAM_BUILD_WRAP") -endif() -if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND GTSAM_BUILD_TYPE_POSTFIXES) - set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX}) -endif() -if(GTSAM_INSTALL_WRAP AND NOT GTSAM_BUILD_WRAP) - message(FATAL_ERROR "GTSAM_INSTALL_WRAP is enabled, please also enable GTSAM_BUILD_WRAP") -endif() - -if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS) - message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.") -endif() - -if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS) - message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the MATLAB toolbox cannot deal with this yet. Please turn one of the two options off.") -endif() - -if(GTSAM_INSTALL_CYTHON_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS) - message(FATAL_ERROR "GTSAM_INSTALL_CYTHON_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the CYTHON toolbox cannot deal with this yet. Please turn one of the two options off.") -endif() - -# Flags for choosing default packaging tools -set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator") -set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator") - -############################################################################### -# Find boost - -# To change the path for boost, you will need to set: -# BOOST_ROOT: path to install prefix for boost -# Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT - -if(MSVC) - # By default, boost only builds static libraries on windows - set(Boost_USE_STATIC_LIBS ON) # only find static libs - # If we ever reset above on windows and, ... - # If we use Boost shared libs, disable auto linking. - # Some libraries, at least Boost Program Options, rely on this to export DLL symbols. - if(NOT Boost_USE_STATIC_LIBS) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK) - endif() - # Virtual memory range for PCH exceeded on VS2015 - if(MSVC_VERSION LESS 1910) # older than VS2017 - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295) - endif() -endif() - -# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT() -# or explicit instantiation will generate build errors. -# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017 -# -if(MSVC AND BUILD_SHARED_LIBS) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) -endif() - -# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such. -set(BOOST_FIND_MINIMUM_VERSION 1.43) -set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex) - -find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS}) - -# Required components -if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR - NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY) - message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.") -endif() - -option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF) -# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library) -set(GTSAM_BOOST_LIBRARIES - Boost::serialization - Boost::system - Boost::filesystem - Boost::thread - Boost::date_time - Boost::regex -) -if (GTSAM_DISABLE_NEW_TIMERS) - message("WARNING: GTSAM timing instrumentation manually disabled") - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS) -else() - if(Boost_TIMER_LIBRARY) - list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono) - else() - list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt - message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.") - endif() -endif() - -############################################################################### -# Find TBB -find_package(TBB 4.4 COMPONENTS tbb tbbmalloc) - -# Set up variables if we're using TBB -if(TBB_FOUND AND GTSAM_WITH_TBB) - set(GTSAM_USE_TBB 1) # This will go into config.h - if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020)) - set(TBB_GREATER_EQUAL_2020 1) - else() - set(TBB_GREATER_EQUAL_2020 0) - endif() - # all definitions and link requisites will go via imported targets: - # tbb & tbbmalloc - list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc) -else() - set(GTSAM_USE_TBB 0) # This will go into config.h -endif() - -############################################################################### -# Prohibit Timing build mode in combination with TBB -if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing")) - message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.") -endif() - - -############################################################################### -# Find Google perftools -find_package(GooglePerfTools) - -############################################################################### -# Support ccache, if installed -if(NOT MSVC AND NOT XCODE_VERSION) - find_program(CCACHE_FOUND ccache) - if(CCACHE_FOUND) - if(GTSAM_BUILD_WITH_CCACHE) - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) - set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) - else() - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "") - set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "") - endif() - endif(CCACHE_FOUND) -endif() - -############################################################################### -# Find MKL -find_package(MKL) - -if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL) - set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h - set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL - list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES}) - - # --no-as-needed is required with gcc according to the MKL link advisor - if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed") - endif() -else() - set(GTSAM_USE_EIGEN_MKL 0) - set(EIGEN_USE_MKL_ALL 0) -endif() - -############################################################################### -# Find OpenMP (if we're also using MKL) -find_package(OpenMP) # do this here to generate correct message if disabled - -if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL) - if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP) - set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h - list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC ${OpenMP_CXX_FLAGS}) - endif() -endif() - - -############################################################################### -# Option for using system Eigen or GTSAM-bundled Eigen -### These patches only affect usage of MKL. If you want to enable MKL, you *must* -### use our patched version of Eigen -### See: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=704 (Householder QR MKL selection) -### http://eigen.tuxfamily.org/bz/show_bug.cgi?id=705 (Fix MKL LLT return code) -option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF) -option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF) - -# Switch for using system Eigen or GTSAM-bundled Eigen -if(GTSAM_USE_SYSTEM_EIGEN) - find_package(Eigen3 REQUIRED) - - # Use generic Eigen include paths e.g. - set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") - - # check if MKL is also enabled - can have one or the other, but not both! - # Note: Eigen >= v3.2.5 includes our patches - if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5)) - message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL") - endif() - - # Check for Eigen version which doesn't work with MKL - # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details. - if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4)) - message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.") - endif() - - # The actual include directory (for BUILD cmake target interface): - set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}") -else() - # Use bundled Eigen include path. - # Clear any variables set by FindEigen3 - if(EIGEN3_INCLUDE_DIR) - set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE) - endif() - - # set full path to be used by external projects - # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in - set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/") - - # The actual include directory (for BUILD cmake target interface): - set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/") -endif() - -# Detect Eigen version: -set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h") -if (EXISTS ${EIGEN_VER_H}) - file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION) - - # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc... - - string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}") - - string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}") - - string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}") - string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}") - - set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}") - - message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}") -else() - message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`") -endif () - -if (MSVC) - if (BUILD_SHARED_LIBS) - # mute eigen static assert to avoid errors in shared lib - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) - endif() - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen -endif() - -if (APPLE AND BUILD_SHARED_LIBS) - # Set the default install directory on macOS - set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib") -endif() - -############################################################################### -# Global compile options +include(cmake/HandleBoost.cmake) # Boost +include(cmake/HandleCCache.cmake) # ccache +include(cmake/HandleCPack.cmake) # CPack +include(cmake/HandleEigen.cmake) # Eigen3 +include(cmake/HandleGeneralOptions.cmake) # CMake build options +include(cmake/HandleMKL.cmake) # MKL +include(cmake/HandleOpenMP.cmake) # OpenMP +include(cmake/HandlePerfTools.cmake) # Google perftools +include(cmake/HandlePython.cmake) # Python options and commands +include(cmake/HandleTBB.cmake) # TBB +include(cmake/HandleUninstall.cmake) # for "make uninstall" -# Build list of possible allocators -set(possible_allocators "") -if(GTSAM_USE_TBB) - list(APPEND possible_allocators TBB) - set(preferred_allocator TBB) -else() - list(APPEND possible_allocators BoostPool STL) - set(preferred_allocator STL) -endif() -if(GOOGLE_PERFTOOLS_FOUND) - list(APPEND possible_allocators tcmalloc) -endif() +include(cmake/HandleAllocators.cmake) # Must be after tbb, pertools -# Check if current allocator choice is valid and set cache option -list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid) -if(allocator_valid EQUAL -1) - set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE) -else() - set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator") -endif() -set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators}) -mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR) - -# Define compile flags depending on allocator -if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool") - set(GTSAM_ALLOCATOR_BOOSTPOOL 1) -elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL") - set(GTSAM_ALLOCATOR_STL 1) -elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB") - set(GTSAM_ALLOCATOR_TBB 1) -elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc") - set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator - list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc") -endif() - -if(MSVC) - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS) - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code -endif() - -# GCC 4.8+ complains about local typedefs which we use for shared_ptr etc. -if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8) - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) - endif() -endif() - -# As of XCode 7, clang also complains about this -if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) - list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) - endif() -endif() - -if(GTSAM_ENABLE_CONSISTENCY_CHECKS) - # This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h - list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS) -endif() +include(cmake/HandleGlobalBuildFlags.cmake) # Build flags ############################################################################### # Add components @@ -419,14 +60,16 @@ endif() # Build CppUnitLite add_subdirectory(CppUnitLite) -# Build wrap -if (GTSAM_BUILD_WRAP) +# This is the new wrapper +if(GTSAM_BUILD_PYTHON) + # Need to set this for the wrap package so we don't use the default value. + set(WRAP_PYTHON_VERSION ${GTSAM_PYTHON_VERSION} + CACHE STRING "The Python version to use for wrapping") + add_subdirectory(wrap) - # suppress warning of cython line being too long - if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-misleading-indentation") - endif() -endif(GTSAM_BUILD_WRAP) + list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake") + add_subdirectory(python) +endif() # Build GTSAM library add_subdirectory(gtsam) @@ -447,29 +90,13 @@ endif() # Matlab toolbox if (GTSAM_INSTALL_MATLAB_TOOLBOX) - add_subdirectory(matlab) + add_subdirectory(matlab) endif() -# Cython wrap -if (GTSAM_INSTALL_CYTHON_TOOLBOX) - set(GTSAM_INSTALL_CYTHON_TOOLBOX 1) - # Set up cache options - set(GTSAM_CYTHON_INSTALL_PATH "" CACHE PATH "Cython toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/cython") - if(NOT GTSAM_CYTHON_INSTALL_PATH) - set(GTSAM_CYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/cython") - endif() - set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency) - add_subdirectory(cython) -else() - set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h -endif() - - # Install config and export files GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in") export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake) - # Check for doxygen availability - optional dependency find_package(Doxygen) @@ -481,142 +108,11 @@ endif() # CMake Tools add_subdirectory(cmake) - -############################################################################### -# Set up CPack -set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM") -set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology") -set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu") -set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md") -set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") -set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR}) -set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR}) -set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH}) -set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}") -#set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory -#set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error -set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$") -set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/") -set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/") -set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}") -#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs - -# Deb-package specific cpack -set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev") -set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.43)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)") - - -############################################################################### # Print configuration variables -message(STATUS "===============================================================") -message(STATUS "================ Configuration Options ======================") -message(STATUS " CMAKE_CXX_COMPILER_ID type : ${CMAKE_CXX_COMPILER_ID}") -message(STATUS " CMAKE_CXX_COMPILER_VERSION : ${CMAKE_CXX_COMPILER_VERSION}") -message(STATUS " CMake version : ${CMAKE_VERSION}") -message(STATUS " CMake generator : ${CMAKE_GENERATOR}") -message(STATUS " CMake build tool : ${CMAKE_BUILD_TOOL}") -message(STATUS "Build flags ") -print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ") -print_config_flag(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all' ") -print_config_flag(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'") -if (DOXYGEN_FOUND) - print_config_flag(${GTSAM_BUILD_DOCS} "Build Docs ") -endif() -print_config_flag(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries ") -print_config_flag(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name ") -if(GTSAM_UNSTABLE_AVAILABLE) - print_config_flag(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") -endif() - -if(NOT MSVC AND NOT XCODE_VERSION) - print_config_flag(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ") - message(STATUS " Build type : ${CMAKE_BUILD_TYPE}") - message(STATUS " C compilation flags : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") - message(STATUS " C++ compilation flags : ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") -endif() - -print_build_options_for_target(gtsam) - -message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") - -if(GTSAM_USE_TBB) - message(STATUS " Use Intel TBB : Yes") -elseif(TBB_FOUND) - message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled") -else() - message(STATUS " Use Intel TBB : TBB not found") -endif() -if(GTSAM_USE_EIGEN_MKL) - message(STATUS " Eigen will use MKL : Yes") -elseif(MKL_FOUND) - message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled") -else() - message(STATUS " Eigen will use MKL : MKL not found") -endif() -if(GTSAM_USE_EIGEN_MKL_OPENMP) - message(STATUS " Eigen will use MKL and OpenMP : Yes") -elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") -elseif(OPENMP_FOUND AND NOT MKL_FOUND) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found") -elseif(OPENMP_FOUND) - message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") -else() - message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found") -endif() -message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}") - -if(GTSAM_THROW_CHEIRALITY_EXCEPTION) - message(STATUS " Cheirality exceptions enabled : YES") -else() - message(STATUS " Cheirality exceptions enabled : NO") -endif() - -if(NOT MSVC AND NOT XCODE_VERSION) - if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) - message(STATUS " Build with ccache : Yes") - elseif(CCACHE_FOUND) - message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") - else() - message(STATUS " Build with ccache : No") - endif() -endif() - -message(STATUS "Packaging flags ") -message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}") -message(STATUS " CPack Generator : ${CPACK_GENERATOR}") - -message(STATUS "GTSAM flags ") -print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") -print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") -print_config_flag(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") -print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") -print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 allowed ") -print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ") -print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") -print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") -print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ") - -message(STATUS "MATLAB toolbox flags ") -print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ") - -message(STATUS "Cython toolbox flags ") -print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ") -if(GTSAM_INSTALL_CYTHON_TOOLBOX) - message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}") -endif() -message(STATUS "===============================================================") +include(cmake/HandlePrintConfiguration.cmake) # Print warnings at the end -if(GTSAM_WITH_TBB AND NOT TBB_FOUND) - message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.") -endif() -if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND) - message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.") -endif() -if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND) - message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.") -endif() +include(cmake/HandleFinalChecks.cmake) # Include CPack *after* all flags include(CPack) diff --git a/CppUnitLite/Test.h b/CppUnitLite/Test.h index b1fb0cf08..a898c83ef 100644 --- a/CppUnitLite/Test.h +++ b/CppUnitLite/Test.h @@ -64,7 +64,7 @@ class Test class testGroup##testName##Test : public Test \ { public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, true) {} \ virtual ~testGroup##testName##Test () {};\ - void run (TestResult& result_);} \ + void run (TestResult& result_) override;} \ testGroup##testName##Instance; \ void testGroup##testName##Test::run (TestResult& result_) @@ -82,7 +82,7 @@ class Test class testGroup##testName##Test : public Test \ { public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, false) {} \ virtual ~testGroup##testName##Test () {};\ - void run (TestResult& result_);} \ + void run (TestResult& result_) override;} \ testGroup##testName##Instance; \ void testGroup##testName##Test::run (TestResult& result_) diff --git a/GTSAM-Concepts.md b/GTSAM-Concepts.md index a6cfee984..953357ede 100644 --- a/GTSAM-Concepts.md +++ b/GTSAM-Concepts.md @@ -72,9 +72,9 @@ A Lie group is both a manifold *and* a group. Hence, a LIE_GROUP type should imp However, we now also need to be able to evaluate the derivatives of compose and inverse. Hence, we have the following extra valid static functions defined in the struct `gtsam::traits`: -* `r = traits::Compose(p,q,Hq,Hp)` +* `r = traits::Compose(p,q,Hp,Hq)` * `q = traits::Inverse(p,Hp)` -* `r = traits::Between(p,q,Hq,H2p)` +* `r = traits::Between(p,q,Hp,Hq)` where above the *H* arguments stand for optional Jacobian arguments. That makes it possible to create factors implementing priors (PriorFactor) or relations between two instances of a Lie group type (BetweenFactor). diff --git a/INSTALL.md b/INSTALL.md index b8f73f153..1fddf4df0 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -13,7 +13,7 @@ $ make install ## Important Installation Notes 1. GTSAM requires the following libraries to be installed on your system: - - BOOST version 1.43 or greater (install through Linux repositories or MacPorts) + - BOOST version 1.58 or greater (install through Linux repositories or MacPorts) - Cmake version 3.0 or higher - Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher @@ -70,7 +70,34 @@ execute commands as follows for an out-of-source build: This will build the library and unit tests, run all of the unit tests, and then install the library itself. -## CMake Configuration Options and Details +# Windows Installation + +This section details how to build a GTSAM `.sln` file using Visual Studio. + +### Prerequisites + +- Visual Studio with C++ CMake tools for Windows +- All the other pre-requisites listed above. + +### Steps + +1. Open Visual Studio. +2. Select `Open a local folder` and select the GTSAM source directory. +3. Go to `Project -> CMake Settings`. + - (Optional) Set `Configuration name`. + - (Optional) Set `Configuration type`. + - Set the `Toolset` to `msvc_x64_x64`. If you know what toolset you require, then skip this step. + - Update the `Build root` to `${projectDir}\build\${name}`. + - You can optionally create a new configuration for a `Release` build. + - Set the necessary CMake variables for your use case. + - Click on `Show advanced settings`. + - For `CMake generator`, select a version which matches `Visual Studio Win64`, e.g. `Visual Studio 16 2019 Win64`. + - Save the settings (Ctrl + S). +4. Click on `Project -> Generate Cache`. This will generate the CMake build files (as seen in the Output window). +5. The last step will generate a `GTSAM.sln` file in the `build` directory. At this point, GTSAM can be used as a regular Visual Studio project. + + +# CMake Configuration Options and Details GTSAM has a number of options that can be configured, which is best done with one of the following: @@ -78,7 +105,7 @@ one of the following: - ccmake the curses GUI for cmake - cmake-gui a real GUI for cmake -### Important Options: +## Important Options: #### CMAKE_BUILD_TYPE We support several build configurations for GTSAM (case insensitive) @@ -173,7 +200,7 @@ NOTE: If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against g Intel has a guide for installing MKL on Linux through APT repositories at . After following the instructions, add the following to your `~/.bashrc` (and afterwards, open a new terminal before compiling GTSAM): -`LD_PRELOAD` need only be set if you are building the cython wrapper to use GTSAM from python. +`LD_PRELOAD` need only be set if you are building the python wrapper to use GTSAM from python. ```sh source /opt/intel/mkl/bin/mklvars.sh intel64 export LD_PRELOAD="$LD_PRELOAD:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_sequential.so" @@ -190,6 +217,6 @@ Failing to specify `LD_PRELOAD` may lead to errors such as: `ImportError: /opt/intel/mkl/lib/intel64/libmkl_vml_avx2.so: undefined symbol: mkl_serv_getenv` or `Intel MKL FATAL ERROR: Cannot load libmkl_avx2.so or libmkl_def.so.` -when importing GTSAM using the cython wrapper in python. +when importing GTSAM using the python wrapper. diff --git a/LICENSE b/LICENSE index d828deb55..228a6b942 100644 --- a/LICENSE +++ b/LICENSE @@ -23,3 +23,5 @@ ordering library - Included unmodified in gtsam/3rdparty/metis - Licenced under Apache License v 2.0, provided in gtsam/3rdparty/metis/LICENSE.txt +- Spectra v0.9.0: Sparse Eigenvalue Computation Toolkit as a Redesigned ARPACK. + - Licenced under MPL2, provided at https://github.com/yixuan/spectra diff --git a/README.md b/README.md index 093e35f0f..60eff197a 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,11 @@ # README - Georgia Tech Smoothing and Mapping Library +**Important Note** + +As of August 1 2020, the `develop` branch is officially in "Pre 4.1" mode, and features deprecated in 4.0 have been removed. Please use the last [4.0.3 release](https://github.com/borglab/gtsam/releases/tag/4.0.3) if you need those features. + +However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4`. + ## What is GTSAM? GTSAM is a C++ library that implements smoothing and @@ -7,13 +13,16 @@ mapping (SAM) in robotics and vision, using Factor Graphs and Bayes Networks as the underlying computing paradigm rather than sparse matrices. -| Platform | Build Status | -|:---------:|:-------------:| -| gcc/clang | [![Build Status](https://travis-ci.com/borglab/gtsam.svg?branch=develop)](https://travis-ci.com/borglab/gtsam/) | -| MSVC | [![Build status](https://ci.appveyor.com/api/projects/status/3enllitj52jsxwfg/branch/develop?svg=true)](https://ci.appveyor.com/project/dellaert/gtsam) | +The current support matrix is: +| Platform | Compiler | Build Status | +|:------------:|:---------:|:-------------:| +| Ubuntu 18.04 | gcc/clang | ![Linux CI](https://github.com/borglab/gtsam/workflows/Linux%20CI/badge.svg) | +| macOS | clang | ![macOS CI](https://github.com/borglab/gtsam/workflows/macOS%20CI/badge.svg) | +| Windows | MSVC | ![Windows CI](https://github.com/borglab/gtsam/workflows/Windows%20CI/badge.svg) | -On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](##Wrappers). + +On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](#wrappers). ## Quickstart @@ -31,7 +40,7 @@ $ make install Prerequisites: -- [Boost](http://www.boost.org/users/download/) >= 1.43 (Ubuntu: `sudo apt-get install libboost-all-dev`) +- [Boost](http://www.boost.org/users/download/) >= 1.58 (Ubuntu: `sudo apt-get install libboost-all-dev`) - [CMake](http://www.cmake.org/cmake/resources/software.html) >= 3.0 (Ubuntu: `sudo apt-get install cmake`) - A modern compiler, i.e., at least gcc 4.7.3 on Linux. @@ -44,13 +53,16 @@ Optional prerequisites - used automatically if findable by CMake: ## GTSAM 4 Compatibility -GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. We also deprecate some legacy functionality and wrongly named methods, but by default the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 is enabled, allowing anyone to just pull V4 and compile. To build the python toolbox, however, you will have to explicitly disable that flag. +GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. It also introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect. + +GTSAM 4 also deprecated some legacy functionality and wrongly named methods. If you are on a 4.0.X release, you can define the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 to use the deprecated methods. + +GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag GTSAM_ALLOW_DEPRECATED_SINCE_V41 for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile. -Also, GTSAM 4 introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect. ## Wrappers -We provide support for [MATLAB](matlab/README.md) and [Python](cython/README.md) wrappers for GTSAM. Please refer to the linked documents for more details. +We provide support for [MATLAB](matlab/README.md) and [Python](python/README.md) wrappers for GTSAM. Please refer to the linked documents for more details. ## The Preintegrated IMU Factor diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 2c78ca1f2..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,33 +0,0 @@ -# version format -version: 4.0.2-{branch}-build{build} - -os: Visual Studio 2019 - -clone_folder: c:\projects\gtsam - -platform: x64 -configuration: Release - -environment: - CTEST_OUTPUT_ON_FAILURE: 1 - BOOST_ROOT: C:/Libraries/boost_1_71_0 - -build_script: - - cd c:\projects\gtsam\build - # As of Dec 2019, not all unit tests build cleanly for MSVC, so we'll just - # check that parts of GTSAM build correctly: - #- cmake --build . - - cmake --build . --config Release --target gtsam - - cmake --build . --config Release --target gtsam_unstable - - cmake --build . --config Release --target wrap - #- cmake --build . --target check - - cmake --build . --config Release --target check.base - - cmake --build . --config Release --target check.base_unstable - - cmake --build . --config Release --target check.linear - -before_build: - - cd c:\projects\gtsam - - mkdir build - - cd build - # Disable examples to avoid AppVeyor timeout - - cmake -G "Visual Studio 16 2019" .. -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt index d612e2fae..451ca38a4 100644 --- a/cmake/CMakeLists.txt +++ b/cmake/CMakeLists.txt @@ -17,11 +17,8 @@ install(FILES GtsamBuildTypes.cmake GtsamMakeConfigFile.cmake GtsamMatlabWrap.cmake - GtsamPythonWrap.cmake - GtsamCythonWrap.cmake GtsamTesting.cmake GtsamPrinting.cmake - FindCython.cmake FindNumPy.cmake README.html DESTINATION "${SCRIPT_INSTALL_DIR}/GTSAMCMakeTools") diff --git a/cmake/FindCython.cmake b/cmake/FindCython.cmake deleted file mode 100644 index e5a32c30d..000000000 --- a/cmake/FindCython.cmake +++ /dev/null @@ -1,81 +0,0 @@ -# Modifed from: https://github.com/nest/nest-simulator/blob/master/cmake/FindCython.cmake -# -# Find the Cython compiler. -# -# This code sets the following variables: -# -# CYTHON_FOUND -# CYTHON_PATH -# CYTHON_EXECUTABLE -# CYTHON_VERSION -# -# See also UseCython.cmake - -#============================================================================= -# Copyright 2011 Kitware, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#============================================================================= - -# Use the Cython executable that lives next to the Python executable -# if it is a local installation. -if(GTSAM_PYTHON_VERSION STREQUAL "Default") - find_package(PythonInterp) -else() - find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT) -endif() - -if ( PYTHONINTERP_FOUND ) - execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c" - "import Cython; print(Cython.__path__[0])" - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE CYTHON_PATH - OUTPUT_STRIP_TRAILING_WHITESPACE - ) -endif () - -# RESULT=0 means ok -if ( NOT RESULT ) - get_filename_component( _python_path ${PYTHON_EXECUTABLE} PATH ) - find_program( CYTHON_EXECUTABLE - NAMES cython cython.bat cython3 - HINTS ${_python_path} - ) -endif () - -# RESULT=0 means ok -if ( NOT RESULT ) - execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c" - "import Cython; print(Cython.__version__)" - RESULT_VARIABLE RESULT - OUTPUT_VARIABLE CYTHON_VAR_OUTPUT - ERROR_VARIABLE CYTHON_VAR_OUTPUT - OUTPUT_STRIP_TRAILING_WHITESPACE - ) - if ( RESULT EQUAL 0 ) - string( REGEX REPLACE ".* ([0-9]+\\.[0-9]+(\\.[0-9]+)?).*" "\\1" - CYTHON_VERSION "${CYTHON_VAR_OUTPUT}" ) - endif () -endif () - -include( FindPackageHandleStandardArgs ) -find_package_handle_standard_args( Cython - FOUND_VAR - CYTHON_FOUND - REQUIRED_VARS - CYTHON_PATH - CYTHON_EXECUTABLE - VERSION_VAR - CYTHON_VERSION - ) - diff --git a/cmake/FindNumPy.cmake b/cmake/FindNumPy.cmake index 4f5743aa6..d55a760c6 100644 --- a/cmake/FindNumPy.cmake +++ b/cmake/FindNumPy.cmake @@ -40,17 +40,9 @@ # Finding NumPy involves calling the Python interpreter if(NumPy_FIND_REQUIRED) - if(GTSAM_PYTHON_VERSION STREQUAL "Default") - find_package(PythonInterp REQUIRED) - else() - find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) - endif() + find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) else() - if(GTSAM_PYTHON_VERSION STREQUAL "Default") - find_package(PythonInterp) - else() - find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT) - endif() + find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT) endif() if(NOT PYTHONINTERP_FOUND) diff --git a/cmake/GtsamBuildTypes.cmake b/cmake/GtsamBuildTypes.cmake index 15a02b6e8..3155161be 100644 --- a/cmake/GtsamBuildTypes.cmake +++ b/cmake/GtsamBuildTypes.cmake @@ -1,3 +1,10 @@ +include(CheckCXXCompilerFlag) # for check_cxx_compiler_flag() + +# Set cmake policy to recognize the AppleClang compiler +# independently from the Clang compiler. +if(POLICY CMP0025) + cmake_policy(SET CMP0025 NEW) +endif() # function: list_append_cache(var [new_values ...]) # Like "list(APPEND ...)" but working for CACHE variables. @@ -99,8 +106,27 @@ if(MSVC) set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING /MD /O2 CACHE STRING "(User editable) Private compiler flags for Timing configuration.") else() # Common to all configurations, next for each configuration: - # "-fPIC" is to ensure proper code generation for shared libraries - set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON -Wall -fPIC CACHE STRING "(User editable) Private compiler flags for all configurations.") + + if (NOT MSVC) + check_cxx_compiler_flag(-Wsuggest-override COMPILER_HAS_WSUGGEST_OVERRIDE) + check_cxx_compiler_flag(-Wmissing COMPILER_HAS_WMISSING_OVERRIDE) + if (COMPILER_HAS_WSUGGEST_OVERRIDE) + set(flag_override_ -Wsuggest-override) # -Werror=suggest-override: Add again someday + elseif(COMPILER_HAS_WMISSING_OVERRIDE) + set(flag_override_ -Wmissing-override) # -Werror=missing-override: Add again someday + endif() + endif() + + set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON + -Wall # Enable common warnings + -fPIC # ensure proper code generation for shared libraries + $<$:-Wreturn-local-addr -Werror=return-local-addr> # Error: return local address + $<$:-Wreturn-stack-address -Werror=return-stack-address> # Error: return local address + -Wreturn-type -Werror=return-type # Error on missing return() + -Wformat -Werror=format-security # Error on wrong printf() arguments + $<$:${flag_override_}> # Enforce the use of the override keyword + # + CACHE STRING "(User editable) Private compiler flags for all configurations.") set(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG -g -fno-inline CACHE STRING "(User editable) Private compiler flags for Debug configuration.") set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELWITHDEBINFO -g -O3 CACHE STRING "(User editable) Private compiler flags for RelWithDebInfo configuration.") set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELEASE -O3 CACHE STRING "(User editable) Private compiler flags for Release configuration.") @@ -242,3 +268,17 @@ function(gtsam_apply_build_flags target_name_) target_compile_options(${target_name_} PRIVATE ${GTSAM_COMPILE_OPTIONS_PRIVATE}) endfunction(gtsam_apply_build_flags) + + +if(NOT MSVC AND NOT XCODE_VERSION) + # Set the build type to upper case for downstream use + string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER) + + # Set the GTSAM_BUILD_TAG variable. + # If build type is Release, set to blank (""), else set to the build type. + if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "RELEASE") + set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory + else() + set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}") + endif() +endif() diff --git a/cmake/GtsamCythonWrap.cmake b/cmake/GtsamCythonWrap.cmake deleted file mode 100644 index f1382729f..000000000 --- a/cmake/GtsamCythonWrap.cmake +++ /dev/null @@ -1,281 +0,0 @@ -# Check Cython version, need to be >=0.25.2 -# Unset these cached variables to avoid surprises when the python/cython -# in the current environment are different from the cached! -unset(PYTHON_EXECUTABLE CACHE) -unset(CYTHON_EXECUTABLE CACHE) -unset(PYTHON_INCLUDE_DIR CACHE) -unset(PYTHON_MAJOR_VERSION CACHE) -unset(PYTHON_LIBRARY CACHE) - -# Allow override from command line -if(NOT DEFINED GTSAM_USE_CUSTOM_PYTHON_LIBRARY) - if(GTSAM_PYTHON_VERSION STREQUAL "Default") - find_package(PythonInterp REQUIRED) - find_package(PythonLibs REQUIRED) - else() - find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) - find_package(PythonLibs ${GTSAM_PYTHON_VERSION} EXACT REQUIRED) - endif() -endif() -find_package(Cython 0.25.2 REQUIRED) - -execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" - "from __future__ import print_function;import sys;print(sys.version[0], end='')" - OUTPUT_VARIABLE PYTHON_MAJOR_VERSION -) - -# User-friendly Cython wrapping and installing function. -# Builds a Cython module from the provided interface_header. -# For example, for the interface header gtsam.h, -# this will build the wrap module 'gtsam'. -# -# Arguments: -# -# interface_header: The relative path to the wrapper interface definition file. -# extra_imports: extra header to import in the Cython pxd file. -# For example, to use Cython gtsam.pxd in your own module, -# use "from gtsam cimport *" -# install_path: destination to install the library -# libs: libraries to link with -# dependencies: Dependencies which need to be built before the wrapper -function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies) - # Paths for generated files - get_filename_component(module_name "${interface_header}" NAME_WE) - set(generated_files_path "${PROJECT_BINARY_DIR}/cython/${module_name}") - wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}") - install_cython_wrapped_library("${interface_header}" "${generated_files_path}" "${install_path}") -endfunction() - -function(set_up_required_cython_packages) - # Set up building of cython module - include_directories(${PYTHON_INCLUDE_DIRS}) - find_package(NumPy REQUIRED) - include_directories(${NUMPY_INCLUDE_DIRS}) -endfunction() - - -# Convert pyx to cpp by executing cython -# This is the first step to compile cython from the command line -# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html -# -# Arguments: -# - target: The specified target for this step -# - pyx_file: The input pyx_file in full *absolute* path -# - generated_cpp: The output cpp file in full absolute path -# - include_dirs: Directories to include when executing cython -function(pyx_to_cpp target pyx_file generated_cpp include_dirs) - foreach(dir ${include_dirs}) - set(includes_for_cython ${includes_for_cython} -I ${dir}) - endforeach() - - add_custom_command( - OUTPUT ${generated_cpp} - COMMAND - ${CYTHON_EXECUTABLE} -X boundscheck=False -v --fast-fail --cplus -${PYTHON_MAJOR_VERSION} ${includes_for_cython} ${pyx_file} -o ${generated_cpp} - VERBATIM) - add_custom_target(${target} ALL DEPENDS ${generated_cpp}) -endfunction() - -# Build the cpp file generated by converting pyx using cython -# This is the second step to compile cython from the command line -# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html -# -# Arguments: -# - target: The specified target for this step -# - cpp_file: The input cpp_file in full *absolute* path -# - output_lib_we: The output lib filename only (without extension) -# - output_dir: The output directory -function(build_cythonized_cpp target cpp_file output_lib_we output_dir) - add_library(${target} MODULE ${cpp_file}) - - if(WIN32) - # Use .pyd extension instead of .dll on Windows - set_target_properties(${target} PROPERTIES SUFFIX ".pyd") - - # Add full path to the Python library - target_link_libraries(${target} ${PYTHON_LIBRARIES}) - endif() - - if(APPLE) - set(link_flags "-undefined dynamic_lookup") - endif() - set_target_properties(${target} - PROPERTIES COMPILE_FLAGS "-w" - LINK_FLAGS "${link_flags}" - OUTPUT_NAME ${output_lib_we} - PREFIX "" - ${CMAKE_BUILD_TYPE_UPPER}_POSTFIX "" - LIBRARY_OUTPUT_DIRECTORY ${output_dir}) -endfunction() - -# Cythonize a pyx from the command line as described at -# http://cython.readthedocs.io/en/latest/src/reference/compilation.html -# Arguments: -# - target: The specified target -# - pyx_file: The input pyx_file in full *absolute* path -# - output_lib_we: The output lib filename only (without extension) -# - output_dir: The output directory -# - include_dirs: Directories to include when executing cython -# - libs: Libraries to link with -# - interface_header: For dependency. Any update in interface header will re-trigger cythonize -function(cythonize target pyx_file output_lib_we output_dir include_dirs libs interface_header dependencies) - get_filename_component(pyx_path "${pyx_file}" DIRECTORY) - get_filename_component(pyx_name "${pyx_file}" NAME_WE) - set(generated_cpp "${output_dir}/${pyx_name}.cpp") - - set_up_required_cython_packages() - pyx_to_cpp(${target}_pyx2cpp ${pyx_file} ${generated_cpp} "${include_dirs}") - - # Late dependency injection, to make sure this gets called whenever the interface header is updated - # See: https://stackoverflow.com/questions/40032593/cmake-does-not-rebuild-dependent-after-prerequisite-changes - add_custom_command(OUTPUT ${generated_cpp} DEPENDS ${interface_header} ${pyx_file} APPEND) - if (NOT "${dependencies}" STREQUAL "") - add_dependencies(${target}_pyx2cpp "${dependencies}") - endif() - - build_cythonized_cpp(${target} ${generated_cpp} ${output_lib_we} ${output_dir}) - if (NOT "${libs}" STREQUAL "") - target_link_libraries(${target} "${libs}") - endif() - add_dependencies(${target} ${target}_pyx2cpp) -endfunction() - -# Internal function that wraps a library and compiles the wrapper -function(wrap_library_cython interface_header generated_files_path extra_imports libs dependencies) - # Wrap codegen interface - # Extract module path and name from interface header file name - # wrap requires interfacePath to be *absolute* - get_filename_component(interface_header "${interface_header}" ABSOLUTE) - get_filename_component(module_path "${interface_header}" PATH) - get_filename_component(module_name "${interface_header}" NAME_WE) - - # Wrap module to Cython pyx - message(STATUS "Cython wrapper generating ${module_name}.pyx") - set(generated_pyx "${generated_files_path}/${module_name}.pyx") - file(MAKE_DIRECTORY "${generated_files_path}") - add_custom_command( - OUTPUT ${generated_pyx} - DEPENDS ${interface_header} wrap - COMMAND - wrap --cython ${module_path} ${module_name} ${generated_files_path} "${extra_imports}" - VERBATIM - WORKING_DIRECTORY ${generated_files_path}/../) - add_custom_target(cython_wrap_${module_name}_pyx ALL DEPENDS ${generated_pyx}) - if(NOT "${dependencies}" STREQUAL "") - add_dependencies(cython_wrap_${module_name}_pyx ${dependencies}) - endif() - - message(STATUS "Cythonize and build ${module_name}.pyx") - get_property(include_dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES) - cythonize(cythonize_${module_name} ${generated_pyx} ${module_name} - ${generated_files_path} "${include_dirs}" "${libs}" ${interface_header} cython_wrap_${module_name}_pyx) - - # distclean - add_custom_target(wrap_${module_name}_cython_distclean - COMMAND cmake -E remove_directory ${generated_files_path}) -endfunction() - -# Internal function that installs a wrap toolbox -function(install_cython_wrapped_library interface_header generated_files_path install_path) - get_filename_component(module_name "${interface_header}" NAME_WE) - - # NOTE: only installs .pxd and .pyx and binary files (not .cpp) - the trailing slash on the directory name - # here prevents creating the top-level module name directory in the destination. - # Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH/subdirectory if there is one - get_filename_component(location "${install_path}" PATH) - get_filename_component(name "${install_path}" NAME) - message(STATUS "Installing Cython Toolbox to ${location}${GTSAM_BUILD_TAG}/${name}") #${GTSAM_CYTHON_INSTALL_PATH}" - - if(GTSAM_BUILD_TYPE_POSTFIXES) - foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) - string(TOUPPER "${build_type}" build_type_upper) - if(${build_type_upper} STREQUAL "RELEASE") - set(build_type_tag "") # Don't create release mode tag on installed directory - else() - set(build_type_tag "${build_type}") - endif() - - install(DIRECTORY "${generated_files_path}/" DESTINATION "${location}${build_type_tag}/${name}" - CONFIGURATIONS "${build_type}" - PATTERN "build" EXCLUDE - PATTERN "CMakeFiles" EXCLUDE - PATTERN "Makefile" EXCLUDE - PATTERN "*.cmake" EXCLUDE - PATTERN "*.cpp" EXCLUDE - PATTERN "*.py" EXCLUDE) - endforeach() - else() - install(DIRECTORY "${generated_files_path}/" DESTINATION ${install_path} - PATTERN "build" EXCLUDE - PATTERN "CMakeFiles" EXCLUDE - PATTERN "Makefile" EXCLUDE - PATTERN "*.cmake" EXCLUDE - PATTERN "*.cpp" EXCLUDE - PATTERN "*.py" EXCLUDE) - endif() -endfunction() - -# Helper function to install Cython scripts and handle multiple build types where the scripts -# should be installed to all build type toolboxes -# -# Arguments: -# source_directory: The source directory to be installed. "The last component of each directory -# name is appended to the destination directory but a trailing slash may be -# used to avoid this because it leaves the last component empty." -# (https://cmake.org/cmake/help/v3.3/command/install.html?highlight=install#installing-directories) -# dest_directory: The destination directory to install to. -# patterns: list of file patterns to install -function(install_cython_scripts source_directory dest_directory patterns) - set(patterns_args "") - set(exclude_patterns "") - - foreach(pattern ${patterns}) - list(APPEND patterns_args PATTERN "${pattern}") - endforeach() - if(GTSAM_BUILD_TYPE_POSTFIXES) - foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) - string(TOUPPER "${build_type}" build_type_upper) - if(${build_type_upper} STREQUAL "RELEASE") - set(build_type_tag "") # Don't create release mode tag on installed directory - else() - set(build_type_tag "${build_type}") - endif() - # Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one - get_filename_component(location "${dest_directory}" PATH) - get_filename_component(name "${dest_directory}" NAME) - install(DIRECTORY "${source_directory}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}" - FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE) - endforeach() - else() - install(DIRECTORY "${source_directory}" DESTINATION "${dest_directory}" FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE) - endif() - -endfunction() - -# Helper function to install specific files and handle multiple build types where the scripts -# should be installed to all build type toolboxes -# -# Arguments: -# source_files: The source files to be installed. -# dest_directory: The destination directory to install to. -function(install_cython_files source_files dest_directory) - - if(GTSAM_BUILD_TYPE_POSTFIXES) - foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) - string(TOUPPER "${build_type}" build_type_upper) - if(${build_type_upper} STREQUAL "RELEASE") - set(build_type_tag "") # Don't create release mode tag on installed directory - else() - set(build_type_tag "${build_type}") - endif() - # Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one - get_filename_component(location "${dest_directory}" PATH) - get_filename_component(name "${dest_directory}" NAME) - install(FILES "${source_files}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}") - endforeach() - else() - install(FILES "${source_files}" DESTINATION "${dest_directory}") - endif() - -endfunction() - diff --git a/cmake/GtsamMatlabWrap.cmake b/cmake/GtsamMatlabWrap.cmake index 5fc829bf2..b76f96a4e 100644 --- a/cmake/GtsamMatlabWrap.cmake +++ b/cmake/GtsamMatlabWrap.cmake @@ -1,46 +1,64 @@ +# Check / set dependent variables for MATLAB wrapper +if(GTSAM_INSTALL_MATLAB_TOOLBOX) + find_package(Matlab COMPONENTS MEX_COMPILER REQUIRED) + if(NOT Matlab_MEX_COMPILER) + message(FATAL_ERROR "Cannot find MEX compiler binary. Please check your Matlab installation and ensure MEX in installed as well.") + endif() + + if(GTSAM_BUILD_TYPE_POSTFIXES) + set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX}) + endif() + + if(NOT BUILD_SHARED_LIBS) + message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.") + endif() +endif() + # Set up cache options option(GTSAM_MEX_BUILD_STATIC_MODULE "Build MATLAB wrapper statically (increases build time)" OFF) set(GTSAM_BUILD_MEX_BINARY_FLAGS "" CACHE STRING "Extra flags for running Matlab MEX compilation") set(GTSAM_TOOLBOX_INSTALL_PATH "" CACHE PATH "Matlab toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/gtsam_toolbox") if(NOT GTSAM_TOOLBOX_INSTALL_PATH) - set(GTSAM_TOOLBOX_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/gtsam_toolbox") + set(GTSAM_TOOLBOX_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/gtsam_toolbox") endif() # GTSAM_MEX_BUILD_STATIC_MODULE is not for Windows - on Windows any static # are already compiled into the library by the linker if(GTSAM_MEX_BUILD_STATIC_MODULE AND WIN32) - message(FATAL_ERROR "GTSAM_MEX_BUILD_STATIC_MODULE should not be set on Windows - the linker already automatically compiles in any dependent static libraries. To create a standalone toolbox pacakge, simply ensure that CMake finds the static versions of all dependent libraries (Boost, etc).") + message(FATAL_ERROR "GTSAM_MEX_BUILD_STATIC_MODULE should not be set on Windows - the linker already automatically compiles in any dependent static libraries. To create a standalone toolbox pacakge, simply ensure that CMake finds the static versions of all dependent libraries (Boost, etc).") endif() -# Try to automatically configure mex path -if(APPLE) - file(GLOB matlab_bin_directories "/Applications/MATLAB*/bin") - set(mex_program_name "mex") -elseif(WIN32) - file(GLOB matlab_bin_directories "C:/Program Files*/MATLAB/*/bin") - set(mex_program_name "mex.bat") -else() - file(GLOB matlab_bin_directories "/usr/local/MATLAB/*/bin") - set(mex_program_name "mex") -endif() -# Run find_program explicitly putting $PATH after our predefined program -# directories using 'ENV PATH' and 'NO_SYSTEM_ENVIRONMENT_PATH' - this prevents -# finding the LaTeX mex program (totally unrelated to MATLAB Mex) when LaTeX is -# on the system path. -list(REVERSE matlab_bin_directories) # Reverse list so the highest version (sorted alphabetically) is preferred -find_program(MEX_COMMAND ${mex_program_name} - PATHS ${matlab_bin_directories} ENV PATH - NO_DEFAULT_PATH) -mark_as_advanced(FORCE MEX_COMMAND) -# Now that we have mex, trace back to find the Matlab installation root -get_filename_component(MEX_COMMAND "${MEX_COMMAND}" REALPATH) -get_filename_component(mex_path "${MEX_COMMAND}" PATH) -if(mex_path MATCHES ".*/win64$") - get_filename_component(MATLAB_ROOT "${mex_path}/../.." ABSOLUTE) -else() - get_filename_component(MATLAB_ROOT "${mex_path}/.." ABSOLUTE) +set(MEX_COMMAND ${Matlab_MEX_COMPILER} CACHE PATH "Path to MATLAB MEX compiler") +set(MATLAB_ROOT ${Matlab_ROOT_DIR} CACHE PATH "Path to MATLAB installation root (e.g. /usr/local/MATLAB/R2012a)") + +# Try to automatically configure mex path from provided custom `bin` path. +if(GTSAM_CUSTOM_MATLAB_PATH) + set(matlab_bin_directory ${GTSAM_CUSTOM_MATLAB_PATH}) + + if(WIN32) + set(mex_program_name "mex.bat") + else() + set(mex_program_name "mex") + endif() + + # Run find_program explicitly putting $PATH after our predefined program + # directories using 'ENV PATH' and 'NO_SYSTEM_ENVIRONMENT_PATH' - this prevents + # finding the LaTeX mex program (totally unrelated to MATLAB Mex) when LaTeX is + # on the system path. + find_program(MEX_COMMAND ${mex_program_name} + PATHS ${matlab_bin_directory} ENV PATH + NO_DEFAULT_PATH) + + mark_as_advanced(FORCE MEX_COMMAND) + # Now that we have mex, trace back to find the Matlab installation root + get_filename_component(MEX_COMMAND "${MEX_COMMAND}" REALPATH) + get_filename_component(mex_path "${MEX_COMMAND}" PATH) + if(mex_path MATCHES ".*/win64$") + get_filename_component(MATLAB_ROOT "${mex_path}/../.." ABSOLUTE) + else() + get_filename_component(MATLAB_ROOT "${mex_path}/.." ABSOLUTE) + endif() endif() -set(MATLAB_ROOT "${MATLAB_ROOT}" CACHE PATH "Path to MATLAB installation root (e.g. /usr/local/MATLAB/R2012a)") # User-friendly wrapping function. Builds a mex module from the provided @@ -209,15 +227,34 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex # Set up generation of module source file file(MAKE_DIRECTORY "${generated_files_path}") - add_custom_command( + + find_package(PythonInterp + ${GTSAM_PYTHON_VERSION} + EXACT + REQUIRED) + find_package(PythonLibs + ${GTSAM_PYTHON_VERSION} + EXACT + REQUIRED) + + + set(_ignore gtsam::Point2 + gtsam::Point3) + + # set the matlab wrapping script variable + set(MATLAB_WRAP_SCRIPT "${GTSAM_SOURCE_DIR}/wrap/scripts/matlab_wrap.py") + + add_custom_command( OUTPUT ${generated_cpp_file} - DEPENDS ${interfaceHeader} wrap ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects} - COMMAND - wrap --matlab - ${modulePath} - ${moduleName} - ${generated_files_path} - ${matlab_h_path} + DEPENDS ${interfaceHeader} ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects} + COMMAND + ${PYTHON_EXECUTABLE} + ${MATLAB_WRAP_SCRIPT} + --src ${interfaceHeader} + --module_name ${moduleName} + --out ${generated_files_path} + --top_module_namespaces ${moduleName} + --ignore ${_ignore} VERBATIM WORKING_DIRECTORY ${generated_files_path}) diff --git a/cmake/GtsamPrinting.cmake b/cmake/GtsamPrinting.cmake index e53f9c54f..c68679667 100644 --- a/cmake/GtsamPrinting.cmake +++ b/cmake/GtsamPrinting.cmake @@ -1,14 +1,3 @@ -# print configuration variables -# Usage: -#print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ") -function(print_config_flag flag msg) - if (flag) - message(STATUS " ${msg}: Enabled") - else () - message(STATUS " ${msg}: Disabled") - endif () -endfunction() - # Based on https://github.com/jimbraun/XCDF/blob/master/cmake/CMakePadString.cmake function(string_pad RESULT_NAME DESIRED_LENGTH VALUE) string(LENGTH "${VALUE}" VALUE_LENGTH) @@ -26,6 +15,27 @@ endfunction() set(GTSAM_PRINT_SUMMARY_PADDING_LENGTH 50 CACHE STRING "Padding of cmake summary report lines after configuring.") mark_as_advanced(GTSAM_PRINT_SUMMARY_PADDING_LENGTH) +# print configuration variables with automatic padding +# Usage: +# print_config(${GTSAM_BUILD_TESTS} "Build Tests") +function(print_config config msg) + string_pad(padded_config ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${config}") + message(STATUS "${padded_config}: ${msg}") +endfunction() + +# print configuration variable with enabled/disabled value +# Usage: +# print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests ") +function(print_enabled_config config msg) + string_pad(padded_msg ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${msg}") + if (config) + message(STATUS "${padded_msg}: Enabled") + else () + message(STATUS "${padded_msg}: Disabled") + endif () +endfunction() + + # Print " var: ${var}" padding with spaces as needed function(print_padded variable_name) string_pad(padded_prop ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${variable_name}") @@ -36,16 +46,16 @@ endfunction() # Prints all the relevant CMake build options for a given target: function(print_build_options_for_target target_name_) print_padded(GTSAM_COMPILE_FEATURES_PUBLIC) - print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE) + # print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE) print_padded(GTSAM_COMPILE_OPTIONS_PUBLIC) - print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE) + # print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE) print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC) foreach(build_type ${GTSAM_CMAKE_CONFIGURATION_TYPES}) string(TOUPPER "${build_type}" build_type_toupper) - print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE_${build_type_toupper}) + # print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE_${build_type_toupper}) print_padded(GTSAM_COMPILE_OPTIONS_PUBLIC_${build_type_toupper}) - print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE_${build_type_toupper}) + # print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE_${build_type_toupper}) print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC_${build_type_toupper}) endforeach() endfunction() diff --git a/cmake/GtsamPythonWrap.cmake b/cmake/GtsamPythonWrap.cmake deleted file mode 100644 index 714e37488..000000000 --- a/cmake/GtsamPythonWrap.cmake +++ /dev/null @@ -1,102 +0,0 @@ -#Setup cache options -set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "Target python version for GTSAM python module. Use 'Default' to chose the default version") -set(GTSAM_BUILD_PYTHON_FLAGS "" CACHE STRING "Extra flags for running Matlab PYTHON compilation") -set(GTSAM_PYTHON_INSTALL_PATH "" CACHE PATH "Python toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/borg/python") -if(NOT GTSAM_PYTHON_INSTALL_PATH) - set(GTSAM_PYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/borg/python") -endif() - -#Author: Paul Furgale Modified by Andrew Melim -function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY) - # # Boost - # find_package(Boost COMPONENTS python filesystem system REQUIRED) - # include_directories(${Boost_INCLUDE_DIRS}) - - # # Find Python - # FIND_PACKAGE(PythonLibs 2.7 REQUIRED) - # INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS}) - - IF(APPLE) - # The apple framework headers don't include the numpy headers for some reason. - GET_FILENAME_COMPONENT(REAL_PYTHON_INCLUDE ${PYTHON_INCLUDE_DIRS} REALPATH) - IF( ${REAL_PYTHON_INCLUDE} MATCHES Python.framework) - message("Trying to find extra headers for numpy from ${REAL_PYTHON_INCLUDE}.") - message("Looking in ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy") - FIND_PATH(NUMPY_INCLUDE_DIR arrayobject.h - ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy - ${REAL_PYTHON_INCLUDE}/numpy - ) - IF(${NUMPY_INCLUDE_DIR} MATCHES NOTFOUND) - message("Unable to find numpy include directories: ${NUMPY_INCLUDE_DIR}") - ELSE() - message("Found headers at ${NUMPY_INCLUDE_DIR}") - INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR}) - INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR}/..) - ENDIF() - ENDIF() - ENDIF(APPLE) - - if(MSVC) - add_library(${moduleName}_python MODULE ${ARGN}) - set_target_properties(${moduleName}_python PROPERTIES - OUTPUT_NAME ${moduleName}_python - CLEAN_DIRECT_OUTPUT 1 - VERSION 1 - SOVERSION 0 - SUFFIX ".pyd") - target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp - - set(PYLIB_OUTPUT_FILE $) - message(${PYLIB_OUTPUT_FILE}) - get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE) - set(PYLIB_SO_NAME ${PYLIB_OUTPUT_NAME}.pyd) - - ELSE() - # Create a shared library - add_library(${moduleName}_python SHARED ${generated_cpp_file}) - - set_target_properties(${moduleName}_python PROPERTIES - OUTPUT_NAME ${moduleName}_python - CLEAN_DIRECT_OUTPUT 1) - target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp - # On OSX and Linux, the python library must end in the extension .so. Build this - # filename here. - get_property(PYLIB_OUTPUT_FILE TARGET ${moduleName}_python PROPERTY LOCATION) - set(PYLIB_OUTPUT_FILE $) - message(${PYLIB_OUTPUT_FILE}) - get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE) - set(PYLIB_SO_NAME lib${moduleName}_python.so) - ENDIF(MSVC) - - # Installs the library in the gtsam folder, which is used by setup.py to create the gtsam package - set(PYTHON_MODULE_DIRECTORY ${CMAKE_SOURCE_DIR}/python/gtsam) - # Cause the library to be output in the correct directory. - add_custom_command(TARGET ${moduleName}_python - POST_BUILD - COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME} - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - COMMENT "Copying library files to python directory" ) - - # Cause the library to be output in the correct directory. - add_custom_command(TARGET ${TARGET_NAME} - POST_BUILD - COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME} - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - COMMENT "Copying library files to python directory" ) - - get_directory_property(AMCF ADDITIONAL_MAKE_CLEAN_FILES) - list(APPEND AMCF ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME}) - set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${AMCF}") -endfunction(wrap_python) - -# Macro to get list of subdirectories -macro(SUBDIRLIST result curdir) - file(GLOB children RELATIVE ${curdir} ${curdir}/*) - set(dirlist "") - foreach(child ${children}) - if(IS_DIRECTORY ${curdir}/${child}) - list(APPEND dirlist ${child}) - endif() - endforeach() - set(${result} ${dirlist}) -endmacro() diff --git a/cmake/GtsamTesting.cmake b/cmake/GtsamTesting.cmake index 3b42ffa21..573fb696a 100644 --- a/cmake/GtsamTesting.cmake +++ b/cmake/GtsamTesting.cmake @@ -88,36 +88,36 @@ enable_testing() option(GTSAM_BUILD_TESTS "Enable/Disable building of tests" ON) option(GTSAM_BUILD_EXAMPLES_ALWAYS "Build examples with 'make all' (build with 'make examples' if not)" ON) - option(GTSAM_BUILD_TIMING_ALWAYS "Build timing scripts with 'make all' (build with 'make timing' if not" OFF) - - # Add option for combining unit tests - if(MSVC OR XCODE_VERSION) - option(GTSAM_SINGLE_TEST_EXE "Combine unit tests into single executable (faster compile)" ON) - else() - option(GTSAM_SINGLE_TEST_EXE "Combine unit tests into single executable (faster compile)" OFF) - endif() - mark_as_advanced(GTSAM_SINGLE_TEST_EXE) - - # Enable make check (http://www.cmake.org/Wiki/CMakeEmulateMakeCheck) - if(GTSAM_BUILD_TESTS) - add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} -C $ --output-on-failure) - # Also add alternative checks using valgrind. - # We don't look for valgrind being installed in the system, since these - # targets are not invoked unless directly instructed by the user. - if (UNIX) - # Run all tests using valgrind: - add_custom_target(check_valgrind) - endif() +option(GTSAM_BUILD_TIMING_ALWAYS "Build timing scripts with 'make all' (build with 'make timing' if not" OFF) + +# Add option for combining unit tests +if(MSVC OR XCODE_VERSION) + option(GTSAM_SINGLE_TEST_EXE "Combine unit tests into single executable (faster compile)" ON) +else() + option(GTSAM_SINGLE_TEST_EXE "Combine unit tests into single executable (faster compile)" OFF) +endif() +mark_as_advanced(GTSAM_SINGLE_TEST_EXE) + +# Enable make check (http://www.cmake.org/Wiki/CMakeEmulateMakeCheck) +if(GTSAM_BUILD_TESTS) + add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} -C $ --output-on-failure) + # Also add alternative checks using valgrind. + # We don't look for valgrind being installed in the system, since these + # targets are not invoked unless directly instructed by the user. + if (UNIX) + # Run all tests using valgrind: + add_custom_target(check_valgrind) + endif() - # Add target to build tests without running - add_custom_target(all.tests) - endif() + # Add target to build tests without running + add_custom_target(all.tests) +endif() - # Add examples target - add_custom_target(examples) +# Add examples target +add_custom_target(examples) - # Add timing target - add_custom_target(timing) +# Add timing target +add_custom_target(timing) # Implementations of this file's macros: diff --git a/cmake/HandleAllocators.cmake b/cmake/HandleAllocators.cmake new file mode 100644 index 000000000..63411b17b --- /dev/null +++ b/cmake/HandleAllocators.cmake @@ -0,0 +1,34 @@ +# Build list of possible allocators +set(possible_allocators "") +if(GTSAM_USE_TBB) + list(APPEND possible_allocators TBB) + set(preferred_allocator TBB) +else() + list(APPEND possible_allocators BoostPool STL) + set(preferred_allocator STL) +endif() +if(GOOGLE_PERFTOOLS_FOUND) + list(APPEND possible_allocators tcmalloc) +endif() + +# Check if current allocator choice is valid and set cache option +list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid) +if(allocator_valid EQUAL -1) + set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE) +else() + set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator") +endif() +set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators}) +mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR) + +# Define compile flags depending on allocator +if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool") + set(GTSAM_ALLOCATOR_BOOSTPOOL 1) +elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL") + set(GTSAM_ALLOCATOR_STL 1) +elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB") + set(GTSAM_ALLOCATOR_TBB 1) +elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc") + set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator + list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc") +endif() diff --git a/cmake/HandleBoost.cmake b/cmake/HandleBoost.cmake new file mode 100644 index 000000000..e73c2237d --- /dev/null +++ b/cmake/HandleBoost.cmake @@ -0,0 +1,56 @@ +############################################################################### +# Find boost + +# To change the path for boost, you will need to set: +# BOOST_ROOT: path to install prefix for boost +# Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT + +if(MSVC) + # By default, boost only builds static libraries on windows + set(Boost_USE_STATIC_LIBS ON) # only find static libs + # If we ever reset above on windows and, ... + # If we use Boost shared libs, disable auto linking. + # Some libraries, at least Boost Program Options, rely on this to export DLL symbols. + if(NOT Boost_USE_STATIC_LIBS) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK) + endif() + # Virtual memory range for PCH exceeded on VS2015 + if(MSVC_VERSION LESS 1910) # older than VS2017 + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295) + endif() +endif() + + +# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such. +set(BOOST_FIND_MINIMUM_VERSION 1.58) +set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex) + +find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS}) + +# Required components +if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR + NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY) + message(FATAL_ERROR "Missing required Boost components >= v1.58, please install/upgrade Boost or configure your search paths.") +endif() + +option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF) +# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library) +set(GTSAM_BOOST_LIBRARIES + Boost::serialization + Boost::system + Boost::filesystem + Boost::thread + Boost::date_time + Boost::regex +) +if (GTSAM_DISABLE_NEW_TIMERS) + message("WARNING: GTSAM timing instrumentation manually disabled") + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS) +else() + if(Boost_TIMER_LIBRARY) + list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono) + else() + list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt + message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.") + endif() +endif() diff --git a/cmake/HandleCCache.cmake b/cmake/HandleCCache.cmake new file mode 100644 index 000000000..9eabb1905 --- /dev/null +++ b/cmake/HandleCCache.cmake @@ -0,0 +1,14 @@ +############################################################################### +# Support ccache, if installed +if(NOT MSVC AND NOT XCODE_VERSION) + find_program(CCACHE_FOUND ccache) + if(CCACHE_FOUND) + if(GTSAM_BUILD_WITH_CCACHE) + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) + else() + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "") + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "") + endif() + endif(CCACHE_FOUND) +endif() diff --git a/cmake/HandleCPack.cmake b/cmake/HandleCPack.cmake new file mode 100644 index 000000000..1c32433a4 --- /dev/null +++ b/cmake/HandleCPack.cmake @@ -0,0 +1,28 @@ +#JLBC: is all this actually used by someone? could it be removed? + +# Flags for choosing default packaging tools +set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator") +set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator") + +############################################################################### +# Set up CPack +set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM") +set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology") +set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu") +set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md") +set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE") +set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR}) +set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR}) +set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH}) +set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}") +#set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory +#set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error +set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$") +set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/") +set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/") +set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}") +#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs + +# Deb-package specific cpack +set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev") +set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.58)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)") diff --git a/cmake/HandleEigen.cmake b/cmake/HandleEigen.cmake new file mode 100644 index 000000000..fda441907 --- /dev/null +++ b/cmake/HandleEigen.cmake @@ -0,0 +1,77 @@ +############################################################################### +# Option for using system Eigen or GTSAM-bundled Eigen + +option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF) + +if(NOT GTSAM_USE_SYSTEM_EIGEN) + # This option only makes sense if using the embedded copy of Eigen, it is + # used to decide whether to *install* the "unsupported" module: + option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF) +endif() + +# Switch for using system Eigen or GTSAM-bundled Eigen +if(GTSAM_USE_SYSTEM_EIGEN) + find_package(Eigen3 REQUIRED) + + # Use generic Eigen include paths e.g. + set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}") + + # check if MKL is also enabled - can have one or the other, but not both! + # Note: Eigen >= v3.2.5 includes our patches + if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5)) + message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL") + endif() + + # Check for Eigen version which doesn't work with MKL + # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details. + if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4)) + message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.") + endif() + + # The actual include directory (for BUILD cmake target interface): + set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}") +else() + # Use bundled Eigen include path. + # Clear any variables set by FindEigen3 + if(EIGEN3_INCLUDE_DIR) + set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE) + endif() + + # set full path to be used by external projects + # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in + set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/") + + # The actual include directory (for BUILD cmake target interface): + set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/") +endif() + +# Detect Eigen version: +set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h") +if (EXISTS ${EIGEN_VER_H}) + file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION) + + # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc... + + string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}") + + string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}") + + string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}") + string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}") + + set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}") + + message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}") +else() + message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`") +endif () + +if (MSVC) + if (BUILD_SHARED_LIBS) + # mute eigen static assert to avoid errors in shared lib + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) + endif() + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen +endif() diff --git a/cmake/HandleFinalChecks.cmake b/cmake/HandleFinalChecks.cmake new file mode 100644 index 000000000..f91fc7fdb --- /dev/null +++ b/cmake/HandleFinalChecks.cmake @@ -0,0 +1,10 @@ +# Print warnings at the end +if(GTSAM_WITH_TBB AND NOT TBB_FOUND) + message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.") +endif() +if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND) + message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.") +endif() +if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND) + message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.") +endif() diff --git a/cmake/HandleGeneralOptions.cmake b/cmake/HandleGeneralOptions.cmake new file mode 100644 index 000000000..ee86066a2 --- /dev/null +++ b/cmake/HandleGeneralOptions.cmake @@ -0,0 +1,45 @@ +############################################################################### +# Set up options + +# See whether gtsam_unstable is available (it will be present only if we're using a git checkout) +if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable") + set(GTSAM_UNSTABLE_AVAILABLE 1) +else() + set(GTSAM_UNSTABLE_AVAILABLE 0) +endif() + +# Configurable Options +if(GTSAM_UNSTABLE_AVAILABLE) + option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON) + option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON) + option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF) +endif() +option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON) +option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF) +option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON) +option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON) +option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF) +option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON) +option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF) +option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF) +option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON) +option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF) +option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF) +option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON) +option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON) +option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON) +if(NOT MSVC AND NOT XCODE_VERSION) + option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON) +endif() + +# Enable GTSAM_ROT3_EXPMAP if GTSAM_POSE3_EXPMAP is enabled, and vice versa. +if(GTSAM_POSE3_EXPMAP) + message(STATUS "GTSAM_POSE3_EXPMAP=ON, enabling GTSAM_ROT3_EXPMAP as well") + set(GTSAM_ROT3_EXPMAP 1 CACHE BOOL "" FORCE) +elseif(GTSAM_ROT3_EXPMAP) + message(STATUS "GTSAM_ROT3_EXPMAP=ON, enabling GTSAM_POSE3_EXPMAP as well") + set(GTSAM_POSE3_EXPMAP 1 CACHE BOOL "" FORCE) +endif() + +# Set the default Python version. This is later updated in HandlePython.cmake. +set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of Python to build the wrappers against.") diff --git a/cmake/HandleGlobalBuildFlags.cmake b/cmake/HandleGlobalBuildFlags.cmake new file mode 100644 index 000000000..f33e12b94 --- /dev/null +++ b/cmake/HandleGlobalBuildFlags.cmake @@ -0,0 +1,52 @@ +# JLBC: These should ideally be ported to "modern cmake" via target properties. +# + +if (CMAKE_GENERATOR STREQUAL "Ninja" AND + ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) OR + (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5))) + # Force colored warnings in Ninja's output, if the compiler has -fdiagnostics-color support. + # Rationale in https://github.com/ninja-build/ninja/issues/814 + add_compile_options(-fdiagnostics-color=always) +endif() + + +# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT() +# or explicit instantiation will generate build errors. +# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017 +# +if(MSVC AND BUILD_SHARED_LIBS) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT) +endif() + +if (APPLE AND BUILD_SHARED_LIBS) + # Set the default install directory on macOS + set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib") +endif() + +############################################################################### +# Global compile options + +if(MSVC) + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS) + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code +endif() + +# GCC 4.8+ complains about local typedefs which we use for shared_ptr etc. +if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8) + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) + endif() +endif() + +# As of XCode 7, clang also complains about this +if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) + list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs) + endif() +endif() + +if(GTSAM_ENABLE_CONSISTENCY_CHECKS) + # This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h + list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS) +endif() diff --git a/cmake/HandleMKL.cmake b/cmake/HandleMKL.cmake new file mode 100644 index 000000000..5d7ec365b --- /dev/null +++ b/cmake/HandleMKL.cmake @@ -0,0 +1,17 @@ +############################################################################### +# Find MKL +find_package(MKL) + +if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL) + set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h + set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL + list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES}) + + # --no-as-needed is required with gcc according to the MKL link advisor + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed") + endif() +else() + set(GTSAM_USE_EIGEN_MKL 0) + set(EIGEN_USE_MKL_ALL 0) +endif() diff --git a/cmake/HandleOpenMP.cmake b/cmake/HandleOpenMP.cmake new file mode 100644 index 000000000..4f27aa633 --- /dev/null +++ b/cmake/HandleOpenMP.cmake @@ -0,0 +1,11 @@ + +############################################################################### +# Find OpenMP (if we're also using MKL) +find_package(OpenMP) # do this here to generate correct message if disabled + +if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL) + if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP) + set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h + list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC ${OpenMP_CXX_FLAGS}) + endif() +endif() diff --git a/cmake/HandlePerfTools.cmake b/cmake/HandlePerfTools.cmake new file mode 100644 index 000000000..499caf80a --- /dev/null +++ b/cmake/HandlePerfTools.cmake @@ -0,0 +1,4 @@ + +############################################################################### +# Find Google perftools +find_package(GooglePerfTools) diff --git a/cmake/HandlePrintConfiguration.cmake b/cmake/HandlePrintConfiguration.cmake new file mode 100644 index 000000000..4ffd00e54 --- /dev/null +++ b/cmake/HandlePrintConfiguration.cmake @@ -0,0 +1,104 @@ +############################################################################### +# Print configuration variables +message(STATUS "===============================================================") +message(STATUS "================ Configuration Options ======================") +print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}") +print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}") +print_config("CMake version" "${CMAKE_VERSION}") +print_config("CMake generator" "${CMAKE_GENERATOR}") +print_config("CMake build tool" "${CMAKE_BUILD_TOOL}") +message(STATUS "Build flags ") +print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests") +print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'") +print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'") +if (DOXYGEN_FOUND) + print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs") +endif() +print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries") +print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name") +if(GTSAM_UNSTABLE_AVAILABLE) + print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ") + print_enabled_config(${GTSAM_UNSTABLE_BUILD_PYTHON} "Build GTSAM unstable Python ") + print_enabled_config(${GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX} "Build MATLAB Toolbox for unstable") +endif() + +if(NOT MSVC AND NOT XCODE_VERSION) + print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ") + print_config("Build type" "${CMAKE_BUILD_TYPE}") + print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") + print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}") +endif() + +print_build_options_for_target(gtsam) + +print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})") + +if(GTSAM_USE_TBB) + print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})") +elseif(TBB_FOUND) + print_config("Use Intel TBB" "TBB (Version: ${TBB_VERSION}) found but GTSAM_WITH_TBB is disabled") +else() + print_config("Use Intel TBB" "TBB not found") +endif() +if(GTSAM_USE_EIGEN_MKL) + print_config("Eigen will use MKL" "Yes") +elseif(MKL_FOUND) + print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled") +else() + print_config("Eigen will use MKL" "MKL not found") +endif() +if(GTSAM_USE_EIGEN_MKL_OPENMP) + print_config("Eigen will use MKL and OpenMP" "Yes") +elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL) + print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled") +elseif(OPENMP_FOUND AND NOT MKL_FOUND) + print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found") +elseif(OPENMP_FOUND) + print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled") +else() + print_config("Eigen will use MKL and OpenMP" "OpenMP not found") +endif() +print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}") + +if(GTSAM_THROW_CHEIRALITY_EXCEPTION) + print_config("Cheirality exceptions enabled" "YES") +else() + print_config("Cheirality exceptions enabled" "NO") +endif() + +if(NOT MSVC AND NOT XCODE_VERSION) + if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE) + print_config("Build with ccache" "Yes") + elseif(CCACHE_FOUND) + print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled") + else() + print_config("Build with ccache" "No") + endif() +endif() + +message(STATUS "Packaging flags") +print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}") +print_config("CPack Generator" "${CPACK_GENERATOR}") + +message(STATUS "GTSAM flags ") +print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ") +print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ") +print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ") +print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ") +print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1") +print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ") +print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration") + +message(STATUS "MATLAB toolbox flags") +print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ") +if (${GTSAM_INSTALL_MATLAB_TOOLBOX}) + print_config("MATLAB root" "${MATLAB_ROOT}") + print_config("MEX binary" "${MEX_COMMAND}") +endif() +message(STATUS "Python toolbox flags ") +print_enabled_config(${GTSAM_BUILD_PYTHON} "Build Python module with pybind ") +if(GTSAM_BUILD_PYTHON) + print_config("Python version" ${GTSAM_PYTHON_VERSION}) +endif() + +message(STATUS "===============================================================") diff --git a/cmake/HandlePython.cmake b/cmake/HandlePython.cmake new file mode 100644 index 000000000..0c24824bc --- /dev/null +++ b/cmake/HandlePython.cmake @@ -0,0 +1,55 @@ +# Set Python version if either Python or MATLAB wrapper is requested. +if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX) + if(${GTSAM_PYTHON_VERSION} STREQUAL "Default") + + if(${CMAKE_VERSION} VERSION_LESS "3.12.0") + # Use older version of cmake's find_python + find_package(PythonInterp) + + if(NOT ${PYTHONINTERP_FOUND}) + message( + FATAL_ERROR + "Cannot find Python interpreter. Please install Python >= 3.6.") + endif() + + find_package(PythonLibs ${PYTHON_VERSION_STRING}) + + set(Python_VERSION_MAJOR ${PYTHON_VERSION_MAJOR}) + set(Python_VERSION_MINOR ${PYTHON_VERSION_MINOR}) + set(Python_EXECUTABLE ${PYTHON_EXECUTABLE}) + + else() + # Get info about the Python3 interpreter + # https://cmake.org/cmake/help/latest/module/FindPython3.html#module:FindPython3 + find_package(Python3 COMPONENTS Interpreter Development) + + if(NOT ${Python3_FOUND}) + message( + FATAL_ERROR + "Cannot find Python3 interpreter. Please install Python >= 3.6.") + endif() + + set(Python_VERSION_MAJOR ${Python3_VERSION_MAJOR}) + set(Python_VERSION_MINOR ${Python3_VERSION_MINOR}) + + endif() + + set(GTSAM_PYTHON_VERSION + "${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}" + CACHE STRING "The version of Python to build the wrappers against." + FORCE) + + endif() +endif() + +# Check for build of Unstable modules +if(GTSAM_BUILD_PYTHON) + if(GTSAM_UNSTABLE_BUILD_PYTHON) + if (NOT GTSAM_BUILD_UNSTABLE) + message(WARNING "GTSAM_UNSTABLE_BUILD_PYTHON requires the unstable module to be enabled.") + set(GTSAM_UNSTABLE_BUILD_PYTHON OFF) + endif() + endif() + + set(GTSAM_PY_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/python") +endif() diff --git a/cmake/HandleTBB.cmake b/cmake/HandleTBB.cmake new file mode 100644 index 000000000..cedee55ea --- /dev/null +++ b/cmake/HandleTBB.cmake @@ -0,0 +1,24 @@ +############################################################################### +# Find TBB +find_package(TBB 4.4 COMPONENTS tbb tbbmalloc) + +# Set up variables if we're using TBB +if(TBB_FOUND AND GTSAM_WITH_TBB) + set(GTSAM_USE_TBB 1) # This will go into config.h + if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020)) + set(TBB_GREATER_EQUAL_2020 1) + else() + set(TBB_GREATER_EQUAL_2020 0) + endif() + # all definitions and link requisites will go via imported targets: + # tbb & tbbmalloc + list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc) +else() + set(GTSAM_USE_TBB 0) # This will go into config.h +endif() + +############################################################################### +# Prohibit Timing build mode in combination with TBB +if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing")) + message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.") +endif() diff --git a/cmake/HandleUninstall.cmake b/cmake/HandleUninstall.cmake new file mode 100644 index 000000000..1859b0273 --- /dev/null +++ b/cmake/HandleUninstall.cmake @@ -0,0 +1,10 @@ +# ---------------------------------------------------------------------------- +# Uninstall target, for "make uninstall" +# ---------------------------------------------------------------------------- +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" + IMMEDIATE @ONLY) + +add_custom_target(uninstall + "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake") diff --git a/cmake/dllexport.h.in b/cmake/dllexport.h.in index 9a0a344b7..7d757edea 100644 --- a/cmake/dllexport.h.in +++ b/cmake/dllexport.h.in @@ -47,9 +47,14 @@ # endif # endif #else +#ifdef __APPLE__ +# define @library_name@_EXPORT __attribute__((visibility("default"))) +# define @library_name@_EXTERN_EXPORT extern +#else # define @library_name@_EXPORT # define @library_name@_EXTERN_EXPORT extern #endif +#endif #undef BUILD_SHARED_LIBS diff --git a/cython/CMakeLists.txt b/cython/CMakeLists.txt deleted file mode 100644 index 4cc9d2f5d..000000000 --- a/cython/CMakeLists.txt +++ /dev/null @@ -1,48 +0,0 @@ -# Install cython components -include(GtsamCythonWrap) - -# Create the cython toolbox for the gtsam library -if (GTSAM_INSTALL_CYTHON_TOOLBOX) - # build and include the eigency version of eigency - add_subdirectory(gtsam_eigency) - include_directories(${PROJECT_BINARY_DIR}/cython/gtsam_eigency) - - # Fix for error "C1128: number of sections exceeded object file format limit" - if(MSVC) - add_compile_options(/bigobj) - endif() - - # wrap gtsam - add_custom_target(gtsam_header DEPENDS "../gtsam.h") - wrap_and_install_library_cython("../gtsam.h" # interface_header - "" # extra imports - "${GTSAM_CYTHON_INSTALL_PATH}/gtsam" # install path - gtsam # library to link with - "wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping - ) - - # wrap gtsam_unstable - if(GTSAM_BUILD_UNSTABLE) - add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h") - wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header - "from gtsam.gtsam cimport *" # extra imports - "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable" # install path - gtsam_unstable # library to link with - "gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping - ) - endif() - - file(READ "${PROJECT_SOURCE_DIR}/cython/requirements.txt" CYTHON_INSTALL_REQUIREMENTS) - file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS) - - # Install the custom-generated __init__.py - # This is to make the build/cython/gtsam folder a python package, so gtsam can be found while wrapping gtsam_unstable - configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py COPYONLY) - configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam_unstable/__init__.py COPYONLY) - configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${PROJECT_BINARY_DIR}/cython/setup.py) - install_cython_files("${PROJECT_BINARY_DIR}/cython/setup.py" "${GTSAM_CYTHON_INSTALL_PATH}") - # install scripts and tests - install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py") - install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py") - -endif () diff --git a/cython/README.md b/cython/README.md deleted file mode 100644 index bc6e346d9..000000000 --- a/cython/README.md +++ /dev/null @@ -1,155 +0,0 @@ -# Python Wrapper - -This is the Cython/Python wrapper around the GTSAM C++ library. - -## Install - -- if you want to build the gtsam python library for a specific python version (eg 2.7), use the `-DGTSAM_PYTHON_VERSION=2.7` option when running `cmake` otherwise the default interpreter will be used. - - If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment) then the environment should be active while building gtsam. -- This wrapper needs Cython(>=0.25.2), backports_abc>=0.5, and numpy. These can be installed as follows: - -```bash - pip install -r /cython/requirements.txt -``` - -- For compatibility with gtsam's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git), -named **gtsam_eigency**, to interface between C++'s Eigen and Python's numpy. - -- Build and install gtsam using cmake with `GTSAM_INSTALL_CYTHON_TOOLBOX` enabled. -The wrapped module will be installed to `GTSAM_CYTHON_INSTALL_PATH`, which is -by default: `/cython` - -- To use the library without installing system-wide: modify your `PYTHONPATH` to include the `GTSAM_CYTHON_INSTALL_PATH`: -```bash -export PYTHONPATH=$PYTHONPATH: -``` -- To install system-wide: run `make install` then navigate to `GTSAM_CYTHON_INSTALL_PATH` and run `python setup.py install` - - (the same command can be used to install into a virtual environment if it is active) - - note: if you don't want gtsam to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install gtsam to a subdirectory of the build directory. - - if you run `setup.py` from the build directory rather than the installation directory, the script will warn you with the message: `setup.py is being run from an unexpected location`. - Before `make install` is run, not all the components of the package have been copied across, so running `setup.py` from the build directory would result in an incomplete package. - -## Unit Tests - -The Cython toolbox also has a small set of unit tests located in the -test directory. To run them: - -```bash - cd - python -m unittest discover -``` - -## Writing Your Own Scripts - -See the tests for examples. - -### Some Important Notes: - -- Vector/Matrix: - + GTSAM expects double-precision floating point vectors and matrices. - Hence, you should pass numpy matrices with dtype=float, or 'float64'. - + Also, GTSAM expects *column-major* matrices, unlike the default storage - scheme in numpy. Hence, you should pass column-major matrices to gtsam using - the flag order='F'. And you always get column-major matrices back. - For more details, see: https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed - + Passing row-major matrices of different dtype, e.g. 'int', will also work - as the wrapper converts them to column-major and dtype float for you, - using numpy.array.astype(float, order='F', copy=False). - However, this will result a copy if your matrix is not in the expected type - and storage order. - -- Inner namespace: Classes in inner namespace will be prefixed by _ in Python. -Examples: noiseModel_Gaussian, noiseModel_mEstimator_Tukey - -- Casting from a base class to a derive class must be done explicitly. -Examples: -```Python - noiseBase = factor.noiseModel() - noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase) -``` - -## Wrapping Your Own Project That Uses GTSAM - -- Set PYTHONPATH to include ${GTSAM_CYTHON_INSTALL_PATH} - + so that it can find gtsam Cython header: gtsam/gtsam.pxd - -- In your CMakeList.txt -```cmake -find_package(GTSAM REQUIRED) # Make sure gtsam's install folder is in your PATH -set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${GTSAM_DIR}/../GTSAMCMakeTools") - -# Wrap -include(GtsamCythonWrap) -include_directories(${GTSAM_EIGENCY_INSTALL_PATH}) -wrap_and_install_library_cython("your_project_interface.h" - "from gtsam.gtsam cimport *" # extra import of gtsam/gtsam.pxd Cython header - "your_install_path" - "libraries_to_link_with_the_cython_module" - "dependencies_which_need_to_be_built_before_the_wrapper" - ) -#Optional: install_cython_scripts and install_cython_files. See GtsamCythonWrap.cmake. -``` - -## KNOWN ISSUES - - - Doesn't work with python3 installed from homebrew - - size-related issue: can only wrap up to a certain number of classes: up to mEstimator! - - Guess: 64 vs 32b? disutils Compiler flags? - - Bug with Cython 0.24: instantiated factor classes return FastVector for keys(), which can't be casted to FastVector - - Upgrading to 0.25 solves the problem - - Need default constructor and default copy constructor for almost every classes... :( - - support these constructors by default and declare "delete" for special classes? - - -### TODO - -- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython. -- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in gtsam?) -- [ ] inner namespaces ==> inner packages? -- [ ] Wrap fixed-size Matrices/Vectors? - - -### Completed/Cancelled: - -- [x] Fix Python tests: don't use " import * ": Bad style!!! (18-03-17 19:50) -- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files -- [x] Wrap unstable @done (18-03-17 15:30) -- [x] Unify cython/gtsam.h and the original gtsam.h @done (18-03-17 15:30) -- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem. -- [x] 06-03-17: manage to remove the requirements for default and copy constructors -- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector, to FastVector. -- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: ```gtsam::JointMarginal __pyx_t_1;``` Users don't have to wrap this constructor, however. -- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00) -- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30) -- [x] CMake install script @done (25-11-16 02:30) -- [ ] [REFACTOR] better name for uninstantiateClass: very vague!! @cancelled (25-11-16 02:30) -- lazy -- [ ] forward declaration? @cancelled (23-11-16 13:00) - nothing to do, seem to work? -- [x] wrap VariableIndex: why is it in inference? If need to, shouldn't have constructors to specific FactorGraphs @done (23-11-16 13:00) -- [x] Global functions @done (22-11-16 21:00) -- [x] [REFACTOR] typesEqual --> isSameSignature @done (22-11-16 21:00) -- [x] Proper overloads (constructors, static methods, methods) @done (20-11-16 21:00) -- [x] Allow overloading methods. The current solution is annoying!!! @done (20-11-16 21:00) -- [x] Casting from parent and grandparents @done (16-11-16 17:00) -- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00) -- [x] Support "print obj" @done (16-11-16 17:00) -- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00) -- [x] Cython: Key and size_t: traits doesn't exist @done (16-09-12 18:34) -- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19) -- [x] [Nice to have] parse typedef @done (16-09-13 17:19) -- [x] ctypedef at correct places @done (16-09-12 18:34) -- [x] expand template variable type in constructor/static methods? @done (16-09-12 18:34) -- [x] NonlinearOptimizer: copy constructor deleted!!! @done (16-09-13 17:20) -- [x] Value: no default constructor @done (16-09-13 17:20) -- [x] ctypedef PriorFactor[Vector] PriorFactorVector @done (16-09-19 12:25) -- [x] Delete duplicate methods in derived class @done (16-09-12 13:38) -- [x] Fix return properly @done (16-09-11 17:14) -- [x] handle pair @done (16-09-11 17:14) -- [x] Eigency: ambiguous call: A(const T&) A(const Vector& v) and Eigency A(Map[Vector]& v) @done (16-09-11 07:59) -- [x] Eigency: Constructor: ambiguous construct from Vector/Matrix @done (16-09-11 07:59) -- [x] Eigency: Fix method template of Vector/Matrix: template argument is [Vector] while arugment is Map[Vector] @done (16-09-11 08:22) -- [x] Robust noise: copy assignment operator is deleted because of shared_ptr of the abstract Base class @done (16-09-10 09:05) -- [ ] Cython: Constructor: generate default constructor? (hack: if it's serializable?) @cancelled (16-09-13 17:20) -- [ ] Eigency: Map[] to Block @created(16-09-10 07:59) @cancelled (16-09-11 08:28) - -- inference before symbolic/linear -- what's the purpose of "virtual" ?? diff --git a/cython/gtsam/__init__.py b/cython/gtsam/__init__.py deleted file mode 100644 index d40ee4502..000000000 --- a/cython/gtsam/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -from .gtsam import * - -try: - import gtsam_unstable - - - def _deprecated_wrapper(item, name): - def wrapper(*args, **kwargs): - from warnings import warn - message = ('importing the unstable item "{}" directly from gtsam is deprecated. '.format(name) + - 'Please import it from gtsam_unstable.') - warn(message) - return item(*args, **kwargs) - return wrapper - - - for name in dir(gtsam_unstable): - if not name.startswith('__'): - item = getattr(gtsam_unstable, name) - if callable(item): - item = _deprecated_wrapper(item, name) - globals()[name] = item - -except ImportError: - pass - diff --git a/cython/gtsam/examples/ImuFactorExample.py b/cython/gtsam/examples/ImuFactorExample.py deleted file mode 100644 index 0e01766e7..000000000 --- a/cython/gtsam/examples/ImuFactorExample.py +++ /dev/null @@ -1,153 +0,0 @@ -""" -GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, -Atlanta, Georgia 30332-0415 -All Rights Reserved - -See LICENSE for the license information - -A script validating and demonstrating the ImuFactor inference. - -Author: Frank Dellaert, Varun Agrawal -""" - -from __future__ import print_function - -import math - -import gtsam -import matplotlib.pyplot as plt -import numpy as np -from gtsam import symbol_shorthand_B as B -from gtsam import symbol_shorthand_V as V -from gtsam import symbol_shorthand_X as X -from gtsam.utils.plot import plot_pose3 -from mpl_toolkits.mplot3d import Axes3D - -from PreintegrationExample import POSES_FIG, PreintegrationExample - -BIAS_KEY = B(0) - - -np.set_printoptions(precision=3, suppress=True) - - -class ImuFactorExample(PreintegrationExample): - - def __init__(self): - self.velocity = np.array([2, 0, 0]) - self.priorNoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) - self.velNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) - - # Choose one of these twists to change scenario: - zero_twist = (np.zeros(3), np.zeros(3)) - forward_twist = (np.zeros(3), self.velocity) - loop_twist = (np.array([0, -math.radians(30), 0]), self.velocity) - sick_twist = ( - np.array([math.radians(30), -math.radians(30), 0]), self.velocity) - - accBias = np.array([-0.3, 0.1, 0.2]) - gyroBias = np.array([0.1, 0.3, -0.1]) - bias = gtsam.imuBias_ConstantBias(accBias, gyroBias) - - dt = 1e-2 - super(ImuFactorExample, self).__init__(sick_twist, bias, dt) - - def addPrior(self, i, graph): - state = self.scenario.navState(i) - graph.push_back(gtsam.PriorFactorPose3( - X(i), state.pose(), self.priorNoise)) - graph.push_back(gtsam.PriorFactorVector( - V(i), state.velocity(), self.velNoise)) - - def run(self): - graph = gtsam.NonlinearFactorGraph() - - # initialize data structure for pre-integrated IMU measurements - pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias) - - T = 12 - num_poses = T + 1 # assumes 1 factor per second - initial = gtsam.Values() - initial.insert(BIAS_KEY, self.actualBias) - for i in range(num_poses): - state_i = self.scenario.navState(float(i)) - - poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) - pose = state_i.pose().compose(poseNoise) - - velocity = state_i.velocity() + np.random.randn(3)*0.1 - - initial.insert(X(i), pose) - initial.insert(V(i), velocity) - - # simulate the loop - i = 0 # state index - actual_state_i = self.scenario.navState(0) - for k, t in enumerate(np.arange(0, T, self.dt)): - # get measurements and add them to PIM - measuredOmega = self.runner.measuredAngularVelocity(t) - measuredAcc = self.runner.measuredSpecificForce(t) - pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt) - - poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1) - - actual_state_i = gtsam.NavState( - actual_state_i.pose().compose(poseNoise), - actual_state_i.velocity() + np.random.randn(3)*0.1) - - # Plot IMU many times - if k % 10 == 0: - self.plotImu(t, measuredOmega, measuredAcc) - - # Plot every second - if k % int(1 / self.dt) == 0: - self.plotGroundTruthPose(t) - - # create IMU factor every second - if (k + 1) % int(1 / self.dt) == 0: - factor = gtsam.ImuFactor(X(i), V(i), X( - i + 1), V(i + 1), BIAS_KEY, pim) - graph.push_back(factor) - if True: - print(factor) - print(pim.predict(actual_state_i, self.actualBias)) - pim.resetIntegration() - actual_state_i = self.scenario.navState(t + self.dt) - i += 1 - - # add priors on beginning and end - self.addPrior(0, graph) - self.addPrior(num_poses - 1, graph) - - # optimize using Levenberg-Marquardt optimization - params = gtsam.LevenbergMarquardtParams() - params.setVerbosityLM("SUMMARY") - optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) - result = optimizer.optimize() - - # Calculate and print marginal covariances - marginals = gtsam.Marginals(graph, result) - print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY)) - for i in range(num_poses): - print("Covariance on pose {}:\n{}\n".format( - i, marginals.marginalCovariance(X(i)))) - print("Covariance on vel {}:\n{}\n".format( - i, marginals.marginalCovariance(V(i)))) - - # Plot resulting poses - i = 0 - while result.exists(X(i)): - pose_i = result.atPose3(X(i)) - plot_pose3(POSES_FIG, pose_i, 0.1) - i += 1 - - gtsam.utils.plot.set_axes_equal(POSES_FIG) - - print(result.atimuBias_ConstantBias(BIAS_KEY)) - - plt.ioff() - plt.show() - - -if __name__ == '__main__': - ImuFactorExample().run() diff --git a/cython/gtsam_eigency/CMakeLists.txt b/cython/gtsam_eigency/CMakeLists.txt deleted file mode 100644 index 77bead834..000000000 --- a/cython/gtsam_eigency/CMakeLists.txt +++ /dev/null @@ -1,49 +0,0 @@ -include(GtsamCythonWrap) - -# Copy eigency's sources to the build folder -# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core -# and eigency's cython pxd headers can be found when cythonizing gtsam -file(COPY "." DESTINATION ".") -set(OUTPUT_DIR "${PROJECT_BINARY_DIR}/cython/gtsam_eigency") -set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR}) - -# This is to make the build/cython/gtsam_eigency folder a python package -configure_file(__init__.py.in ${PROJECT_BINARY_DIR}/cython/gtsam_eigency/__init__.py) - -# include eigency headers -include_directories(${EIGENCY_INCLUDE_DIR}) - -# Cythonize and build eigency -message(STATUS "Cythonize and build eigency") -# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is -# a part of the gtsam_eigency package and generate the function call import_gtsam_igency__conversions() -# in conversions_api.h correctly!!! -cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions" - "${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "") -cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core" - ${OUTPUT_DIR} "${EIGENCY_INCLUDE_DIR}" "" "" "") - -# Include Eigen headers: -target_include_directories(cythonize_eigency_conversions PUBLIC - $ - $ -) -target_include_directories(cythonize_eigency_core PUBLIC - $ - $ -) - -add_dependencies(cythonize_eigency_core cythonize_eigency_conversions) -add_custom_target(cythonize_eigency) -add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core) - -# install -install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}" - PATTERN "CMakeLists.txt" EXCLUDE - PATTERN "__init__.py.in" EXCLUDE) -install(TARGETS cythonize_eigency_core cythonize_eigency_conversions - DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency") -install(FILES ${OUTPUT_DIR}/conversions_api.h DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency) -configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py) -install(FILES ${OUTPUT_DIR}/__init__.py DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency) diff --git a/cython/gtsam_eigency/LICENSE.txt b/cython/gtsam_eigency/LICENSE.txt deleted file mode 100644 index 71743c864..000000000 --- a/cython/gtsam_eigency/LICENSE.txt +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 Wouter Boomsma - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/cython/gtsam_eigency/__init__.py.in b/cython/gtsam_eigency/__init__.py.in deleted file mode 100644 index a59d51eab..000000000 --- a/cython/gtsam_eigency/__init__.py.in +++ /dev/null @@ -1,13 +0,0 @@ -import os -import numpy as np - -__eigen_dir__ = "${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}" - -def get_includes(include_eigen=True): - root = os.path.dirname(__file__) - parent = os.path.join(root, "..") - path = [root, parent, np.get_include()] - if include_eigen: - path.append(os.path.join(root, __eigen_dir__)) - return path - diff --git a/cython/gtsam_eigency/conversions.pxd b/cython/gtsam_eigency/conversions.pxd deleted file mode 100644 index f4445e585..000000000 --- a/cython/gtsam_eigency/conversions.pxd +++ /dev/null @@ -1,62 +0,0 @@ -cimport numpy as np - -cdef api np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride) - -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) -cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride) - diff --git a/cython/gtsam_eigency/conversions.pyx b/cython/gtsam_eigency/conversions.pyx deleted file mode 100644 index 55c9ae0cd..000000000 --- a/cython/gtsam_eigency/conversions.pyx +++ /dev/null @@ -1,327 +0,0 @@ -cimport cython -import numpy as np -from numpy.lib.stride_tricks import as_strided - -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[:,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[::1,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[:,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long row_stride, long col_stride): - cdef double[::1,:] mem_view = data - dtype = 'double' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[:,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[::1,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[:,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long row_stride, long col_stride): - cdef float[::1,:] mem_view = data - dtype = 'float' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[:,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[::1,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[:,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long row_stride, long col_stride): - cdef long[::1,:] mem_view = data - dtype = 'int_' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned long[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[:,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[::1,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[:,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long row_stride, long col_stride): - cdef int[::1,:] mem_view = data - dtype = 'int' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[:,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned int[::1,:] mem_view = data - dtype = 'uint' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[:,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[::1,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[:,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long row_stride, long col_stride): - cdef short[::1,:] mem_view = data - dtype = 'short' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[:,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[::1,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[:,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned short[::1,:] mem_view = data - dtype = 'ushort' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[:,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[::1,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[:,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long row_stride, long col_stride): - cdef signed char[::1,:] mem_view = data - dtype = 'int8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[:,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[::1,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[:,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long row_stride, long col_stride): - cdef unsigned char[::1,:] mem_view = data - dtype = 'uint8' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[:,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[::1,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[:,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex128_t[::1,:] mem_view = data - dtype = 'complex128' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - - -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[:,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]) -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[::1,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]) - -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[:,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])) -@cython.boundscheck(False) -cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride): - cdef np.complex64_t[::1,:] mem_view = data - dtype = 'complex64' - cdef int itemsize = np.dtype(dtype).itemsize - return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])) - diff --git a/cython/gtsam_eigency/core.pxd b/cython/gtsam_eigency/core.pxd deleted file mode 100644 index 9a84c3c16..000000000 --- a/cython/gtsam_eigency/core.pxd +++ /dev/null @@ -1,917 +0,0 @@ -cimport cython -cimport numpy as np - -ctypedef signed char schar; -ctypedef unsigned char uchar; - -ctypedef fused dtype: - uchar - schar - short - int - long - float - double - -ctypedef fused DenseType: - Matrix - Array - -ctypedef fused Rows: - _1 - _2 - _3 - _4 - _5 - _6 - _7 - _8 - _9 - _10 - _11 - _12 - _13 - _14 - _15 - _16 - _17 - _18 - _19 - _20 - _21 - _22 - _23 - _24 - _25 - _26 - _27 - _28 - _29 - _30 - _31 - _32 - Dynamic - -ctypedef Rows Cols -ctypedef Rows StrideOuter -ctypedef Rows StrideInner - -ctypedef fused DenseTypeShort: - Vector1i - Vector2i - Vector3i - Vector4i - VectorXi - RowVector1i - RowVector2i - RowVector3i - RowVector4i - RowVectorXi - Matrix1i - Matrix2i - Matrix3i - Matrix4i - MatrixXi - Vector1f - Vector2f - Vector3f - Vector4f - VectorXf - RowVector1f - RowVector2f - RowVector3f - RowVector4f - RowVectorXf - Matrix1f - Matrix2f - Matrix3f - Matrix4f - MatrixXf - Vector1d - Vector2d - Vector3d - Vector4d - VectorXd - RowVector1d - RowVector2d - RowVector3d - RowVector4d - RowVectorXd - Matrix1d - Matrix2d - Matrix3d - Matrix4d - MatrixXd - Vector1cf - Vector2cf - Vector3cf - Vector4cf - VectorXcf - RowVector1cf - RowVector2cf - RowVector3cf - RowVector4cf - RowVectorXcf - Matrix1cf - Matrix2cf - Matrix3cf - Matrix4cf - MatrixXcf - Vector1cd - Vector2cd - Vector3cd - Vector4cd - VectorXcd - RowVector1cd - RowVector2cd - RowVector3cd - RowVector4cd - RowVectorXcd - Matrix1cd - Matrix2cd - Matrix3cd - Matrix4cd - MatrixXcd - Array22i - Array23i - Array24i - Array2Xi - Array32i - Array33i - Array34i - Array3Xi - Array42i - Array43i - Array44i - Array4Xi - ArrayX2i - ArrayX3i - ArrayX4i - ArrayXXi - Array2i - Array3i - Array4i - ArrayXi - Array22f - Array23f - Array24f - Array2Xf - Array32f - Array33f - Array34f - Array3Xf - Array42f - Array43f - Array44f - Array4Xf - ArrayX2f - ArrayX3f - ArrayX4f - ArrayXXf - Array2f - Array3f - Array4f - ArrayXf - Array22d - Array23d - Array24d - Array2Xd - Array32d - Array33d - Array34d - Array3Xd - Array42d - Array43d - Array44d - Array4Xd - ArrayX2d - ArrayX3d - ArrayX4d - ArrayXXd - Array2d - Array3d - Array4d - ArrayXd - Array22cf - Array23cf - Array24cf - Array2Xcf - Array32cf - Array33cf - Array34cf - Array3Xcf - Array42cf - Array43cf - Array44cf - Array4Xcf - ArrayX2cf - ArrayX3cf - ArrayX4cf - ArrayXXcf - Array2cf - Array3cf - Array4cf - ArrayXcf - Array22cd - Array23cd - Array24cd - Array2Xcd - Array32cd - Array33cd - Array34cd - Array3Xcd - Array42cd - Array43cd - Array44cd - Array4Xcd - ArrayX2cd - ArrayX3cd - ArrayX4cd - ArrayXXcd - Array2cd - Array3cd - Array4cd - ArrayXcd - -ctypedef fused StorageOrder: - RowMajor - ColMajor - -ctypedef fused MapOptions: - Aligned - Unaligned - -cdef extern from "eigency_cpp.h" namespace "eigency": - - cdef cppclass _1 "1": - pass - - cdef cppclass _2 "2": - pass - - cdef cppclass _3 "3": - pass - - cdef cppclass _4 "4": - pass - - cdef cppclass _5 "5": - pass - - cdef cppclass _6 "6": - pass - - cdef cppclass _7 "7": - pass - - cdef cppclass _8 "8": - pass - - cdef cppclass _9 "9": - pass - - cdef cppclass _10 "10": - pass - - cdef cppclass _11 "11": - pass - - cdef cppclass _12 "12": - pass - - cdef cppclass _13 "13": - pass - - cdef cppclass _14 "14": - pass - - cdef cppclass _15 "15": - pass - - cdef cppclass _16 "16": - pass - - cdef cppclass _17 "17": - pass - - cdef cppclass _18 "18": - pass - - cdef cppclass _19 "19": - pass - - cdef cppclass _20 "20": - pass - - cdef cppclass _21 "21": - pass - - cdef cppclass _22 "22": - pass - - cdef cppclass _23 "23": - pass - - cdef cppclass _24 "24": - pass - - cdef cppclass _25 "25": - pass - - cdef cppclass _26 "26": - pass - - cdef cppclass _27 "27": - pass - - cdef cppclass _28 "28": - pass - - cdef cppclass _29 "29": - pass - - cdef cppclass _30 "30": - pass - - cdef cppclass _31 "31": - pass - - cdef cppclass _32 "32": - pass - - cdef cppclass PlainObjectBase: - pass - - cdef cppclass Map[DenseTypeShort](PlainObjectBase): - Map() except + - Map(np.ndarray array) except + - - cdef cppclass FlattenedMap[DenseType, dtype, Rows, Cols]: - FlattenedMap() except + - FlattenedMap(np.ndarray array) except + - - cdef cppclass FlattenedMapWithOrder "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder]: - FlattenedMapWithOrder() except + - FlattenedMapWithOrder(np.ndarray array) except + - - cdef cppclass FlattenedMapWithStride "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder, MapOptions, StrideOuter, StrideInner]: - FlattenedMapWithStride() except + - FlattenedMapWithStride(np.ndarray array) except + - - cdef np.ndarray ndarray_view(PlainObjectBase &) - cdef np.ndarray ndarray_copy(PlainObjectBase &) - cdef np.ndarray ndarray(PlainObjectBase &) - - -cdef extern from "eigency_cpp.h" namespace "Eigen": - - cdef cppclass Dynamic: - pass - - cdef cppclass RowMajor: - pass - - cdef cppclass ColMajor: - pass - - cdef cppclass Aligned: - pass - - cdef cppclass Unaligned: - pass - - cdef cppclass Matrix(PlainObjectBase): - pass - - cdef cppclass Array(PlainObjectBase): - pass - - cdef cppclass VectorXd(PlainObjectBase): - pass - - cdef cppclass Vector1i(PlainObjectBase): - pass - - cdef cppclass Vector2i(PlainObjectBase): - pass - - cdef cppclass Vector3i(PlainObjectBase): - pass - - cdef cppclass Vector4i(PlainObjectBase): - pass - - cdef cppclass VectorXi(PlainObjectBase): - pass - - cdef cppclass RowVector1i(PlainObjectBase): - pass - - cdef cppclass RowVector2i(PlainObjectBase): - pass - - cdef cppclass RowVector3i(PlainObjectBase): - pass - - cdef cppclass RowVector4i(PlainObjectBase): - pass - - cdef cppclass RowVectorXi(PlainObjectBase): - pass - - cdef cppclass Matrix1i(PlainObjectBase): - pass - - cdef cppclass Matrix2i(PlainObjectBase): - pass - - cdef cppclass Matrix3i(PlainObjectBase): - pass - - cdef cppclass Matrix4i(PlainObjectBase): - pass - - cdef cppclass MatrixXi(PlainObjectBase): - pass - - cdef cppclass Vector1f(PlainObjectBase): - pass - - cdef cppclass Vector2f(PlainObjectBase): - pass - - cdef cppclass Vector3f(PlainObjectBase): - pass - - cdef cppclass Vector4f(PlainObjectBase): - pass - - cdef cppclass VectorXf(PlainObjectBase): - pass - - cdef cppclass RowVector1f(PlainObjectBase): - pass - - cdef cppclass RowVector2f(PlainObjectBase): - pass - - cdef cppclass RowVector3f(PlainObjectBase): - pass - - cdef cppclass RowVector4f(PlainObjectBase): - pass - - cdef cppclass RowVectorXf(PlainObjectBase): - pass - - cdef cppclass Matrix1f(PlainObjectBase): - pass - - cdef cppclass Matrix2f(PlainObjectBase): - pass - - cdef cppclass Matrix3f(PlainObjectBase): - pass - - cdef cppclass Matrix4f(PlainObjectBase): - pass - - cdef cppclass MatrixXf(PlainObjectBase): - pass - - cdef cppclass Vector1d(PlainObjectBase): - pass - - cdef cppclass Vector2d(PlainObjectBase): - pass - - cdef cppclass Vector3d(PlainObjectBase): - pass - - cdef cppclass Vector4d(PlainObjectBase): - pass - - cdef cppclass VectorXd(PlainObjectBase): - pass - - cdef cppclass RowVector1d(PlainObjectBase): - pass - - cdef cppclass RowVector2d(PlainObjectBase): - pass - - cdef cppclass RowVector3d(PlainObjectBase): - pass - - cdef cppclass RowVector4d(PlainObjectBase): - pass - - cdef cppclass RowVectorXd(PlainObjectBase): - pass - - cdef cppclass Matrix1d(PlainObjectBase): - pass - - cdef cppclass Matrix2d(PlainObjectBase): - pass - - cdef cppclass Matrix3d(PlainObjectBase): - pass - - cdef cppclass Matrix4d(PlainObjectBase): - pass - - cdef cppclass MatrixXd(PlainObjectBase): - pass - - cdef cppclass Vector1cf(PlainObjectBase): - pass - - cdef cppclass Vector2cf(PlainObjectBase): - pass - - cdef cppclass Vector3cf(PlainObjectBase): - pass - - cdef cppclass Vector4cf(PlainObjectBase): - pass - - cdef cppclass VectorXcf(PlainObjectBase): - pass - - cdef cppclass RowVector1cf(PlainObjectBase): - pass - - cdef cppclass RowVector2cf(PlainObjectBase): - pass - - cdef cppclass RowVector3cf(PlainObjectBase): - pass - - cdef cppclass RowVector4cf(PlainObjectBase): - pass - - cdef cppclass RowVectorXcf(PlainObjectBase): - pass - - cdef cppclass Matrix1cf(PlainObjectBase): - pass - - cdef cppclass Matrix2cf(PlainObjectBase): - pass - - cdef cppclass Matrix3cf(PlainObjectBase): - pass - - cdef cppclass Matrix4cf(PlainObjectBase): - pass - - cdef cppclass MatrixXcf(PlainObjectBase): - pass - - cdef cppclass Vector1cd(PlainObjectBase): - pass - - cdef cppclass Vector2cd(PlainObjectBase): - pass - - cdef cppclass Vector3cd(PlainObjectBase): - pass - - cdef cppclass Vector4cd(PlainObjectBase): - pass - - cdef cppclass VectorXcd(PlainObjectBase): - pass - - cdef cppclass RowVector1cd(PlainObjectBase): - pass - - cdef cppclass RowVector2cd(PlainObjectBase): - pass - - cdef cppclass RowVector3cd(PlainObjectBase): - pass - - cdef cppclass RowVector4cd(PlainObjectBase): - pass - - cdef cppclass RowVectorXcd(PlainObjectBase): - pass - - cdef cppclass Matrix1cd(PlainObjectBase): - pass - - cdef cppclass Matrix2cd(PlainObjectBase): - pass - - cdef cppclass Matrix3cd(PlainObjectBase): - pass - - cdef cppclass Matrix4cd(PlainObjectBase): - pass - - cdef cppclass MatrixXcd(PlainObjectBase): - pass - - cdef cppclass Array22i(PlainObjectBase): - pass - - cdef cppclass Array23i(PlainObjectBase): - pass - - cdef cppclass Array24i(PlainObjectBase): - pass - - cdef cppclass Array2Xi(PlainObjectBase): - pass - - cdef cppclass Array32i(PlainObjectBase): - pass - - cdef cppclass Array33i(PlainObjectBase): - pass - - cdef cppclass Array34i(PlainObjectBase): - pass - - cdef cppclass Array3Xi(PlainObjectBase): - pass - - cdef cppclass Array42i(PlainObjectBase): - pass - - cdef cppclass Array43i(PlainObjectBase): - pass - - cdef cppclass Array44i(PlainObjectBase): - pass - - cdef cppclass Array4Xi(PlainObjectBase): - pass - - cdef cppclass ArrayX2i(PlainObjectBase): - pass - - cdef cppclass ArrayX3i(PlainObjectBase): - pass - - cdef cppclass ArrayX4i(PlainObjectBase): - pass - - cdef cppclass ArrayXXi(PlainObjectBase): - pass - - cdef cppclass Array2i(PlainObjectBase): - pass - - cdef cppclass Array3i(PlainObjectBase): - pass - - cdef cppclass Array4i(PlainObjectBase): - pass - - cdef cppclass ArrayXi(PlainObjectBase): - pass - - cdef cppclass Array22f(PlainObjectBase): - pass - - cdef cppclass Array23f(PlainObjectBase): - pass - - cdef cppclass Array24f(PlainObjectBase): - pass - - cdef cppclass Array2Xf(PlainObjectBase): - pass - - cdef cppclass Array32f(PlainObjectBase): - pass - - cdef cppclass Array33f(PlainObjectBase): - pass - - cdef cppclass Array34f(PlainObjectBase): - pass - - cdef cppclass Array3Xf(PlainObjectBase): - pass - - cdef cppclass Array42f(PlainObjectBase): - pass - - cdef cppclass Array43f(PlainObjectBase): - pass - - cdef cppclass Array44f(PlainObjectBase): - pass - - cdef cppclass Array4Xf(PlainObjectBase): - pass - - cdef cppclass ArrayX2f(PlainObjectBase): - pass - - cdef cppclass ArrayX3f(PlainObjectBase): - pass - - cdef cppclass ArrayX4f(PlainObjectBase): - pass - - cdef cppclass ArrayXXf(PlainObjectBase): - pass - - cdef cppclass Array2f(PlainObjectBase): - pass - - cdef cppclass Array3f(PlainObjectBase): - pass - - cdef cppclass Array4f(PlainObjectBase): - pass - - cdef cppclass ArrayXf(PlainObjectBase): - pass - - cdef cppclass Array22d(PlainObjectBase): - pass - - cdef cppclass Array23d(PlainObjectBase): - pass - - cdef cppclass Array24d(PlainObjectBase): - pass - - cdef cppclass Array2Xd(PlainObjectBase): - pass - - cdef cppclass Array32d(PlainObjectBase): - pass - - cdef cppclass Array33d(PlainObjectBase): - pass - - cdef cppclass Array34d(PlainObjectBase): - pass - - cdef cppclass Array3Xd(PlainObjectBase): - pass - - cdef cppclass Array42d(PlainObjectBase): - pass - - cdef cppclass Array43d(PlainObjectBase): - pass - - cdef cppclass Array44d(PlainObjectBase): - pass - - cdef cppclass Array4Xd(PlainObjectBase): - pass - - cdef cppclass ArrayX2d(PlainObjectBase): - pass - - cdef cppclass ArrayX3d(PlainObjectBase): - pass - - cdef cppclass ArrayX4d(PlainObjectBase): - pass - - cdef cppclass ArrayXXd(PlainObjectBase): - pass - - cdef cppclass Array2d(PlainObjectBase): - pass - - cdef cppclass Array3d(PlainObjectBase): - pass - - cdef cppclass Array4d(PlainObjectBase): - pass - - cdef cppclass ArrayXd(PlainObjectBase): - pass - - cdef cppclass Array22cf(PlainObjectBase): - pass - - cdef cppclass Array23cf(PlainObjectBase): - pass - - cdef cppclass Array24cf(PlainObjectBase): - pass - - cdef cppclass Array2Xcf(PlainObjectBase): - pass - - cdef cppclass Array32cf(PlainObjectBase): - pass - - cdef cppclass Array33cf(PlainObjectBase): - pass - - cdef cppclass Array34cf(PlainObjectBase): - pass - - cdef cppclass Array3Xcf(PlainObjectBase): - pass - - cdef cppclass Array42cf(PlainObjectBase): - pass - - cdef cppclass Array43cf(PlainObjectBase): - pass - - cdef cppclass Array44cf(PlainObjectBase): - pass - - cdef cppclass Array4Xcf(PlainObjectBase): - pass - - cdef cppclass ArrayX2cf(PlainObjectBase): - pass - - cdef cppclass ArrayX3cf(PlainObjectBase): - pass - - cdef cppclass ArrayX4cf(PlainObjectBase): - pass - - cdef cppclass ArrayXXcf(PlainObjectBase): - pass - - cdef cppclass Array2cf(PlainObjectBase): - pass - - cdef cppclass Array3cf(PlainObjectBase): - pass - - cdef cppclass Array4cf(PlainObjectBase): - pass - - cdef cppclass ArrayXcf(PlainObjectBase): - pass - - cdef cppclass Array22cd(PlainObjectBase): - pass - - cdef cppclass Array23cd(PlainObjectBase): - pass - - cdef cppclass Array24cd(PlainObjectBase): - pass - - cdef cppclass Array2Xcd(PlainObjectBase): - pass - - cdef cppclass Array32cd(PlainObjectBase): - pass - - cdef cppclass Array33cd(PlainObjectBase): - pass - - cdef cppclass Array34cd(PlainObjectBase): - pass - - cdef cppclass Array3Xcd(PlainObjectBase): - pass - - cdef cppclass Array42cd(PlainObjectBase): - pass - - cdef cppclass Array43cd(PlainObjectBase): - pass - - cdef cppclass Array44cd(PlainObjectBase): - pass - - cdef cppclass Array4Xcd(PlainObjectBase): - pass - - cdef cppclass ArrayX2cd(PlainObjectBase): - pass - - cdef cppclass ArrayX3cd(PlainObjectBase): - pass - - cdef cppclass ArrayX4cd(PlainObjectBase): - pass - - cdef cppclass ArrayXXcd(PlainObjectBase): - pass - - cdef cppclass Array2cd(PlainObjectBase): - pass - - cdef cppclass Array3cd(PlainObjectBase): - pass - - cdef cppclass Array4cd(PlainObjectBase): - pass - - cdef cppclass ArrayXcd(PlainObjectBase): - pass - - diff --git a/cython/gtsam_eigency/core.pyx b/cython/gtsam_eigency/core.pyx deleted file mode 100644 index 8b1378917..000000000 --- a/cython/gtsam_eigency/core.pyx +++ /dev/null @@ -1 +0,0 @@ - diff --git a/cython/gtsam_eigency/eigency_cpp.h b/cython/gtsam_eigency/eigency_cpp.h deleted file mode 100644 index ce303182e..000000000 --- a/cython/gtsam_eigency/eigency_cpp.h +++ /dev/null @@ -1,504 +0,0 @@ -#include - -#include -#include -#include - -typedef ::std::complex< double > __pyx_t_double_complex; -typedef ::std::complex< float > __pyx_t_float_complex; - -#include "conversions_api.h" - -#ifndef EIGENCY_CPP -#define EIGENCY_CPP - -namespace eigency { - -template -inline PyArrayObject *_ndarray_view(Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0); -template -inline PyArrayObject *_ndarray_copy(const Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0); - -// Strides: -// Eigen and numpy differ in their way of dealing with strides. Eigen has the concept of outer and -// inner strides, which are dependent on whether the array/matrix is row-major of column-major: -// Inner stride: denotes the offset between succeeding elements in each row (row-major) or column (column-major). -// Outer stride: denotes the offset between succeeding rows (row-major) or succeeding columns (column-major). -// In contrast, numpy's stride is simply a measure of how fast each dimension should be incremented. -// Consequently, a switch in numpy storage order from row-major to column-major involves a switch -// in strides, while it does not affect the stride in Eigen. -template<> -inline PyArrayObject *_ndarray_view(double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) { - // Eigen row-major mode: row_stride=outer_stride, and col_stride=inner_stride - // If no stride is given, the row_stride is set to the number of columns. - return ndarray_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - } else { - // Eigen column-major mode: row_stride=outer_stride, and col_stride=inner_stride - // If no stride is given, the cow_stride is set to the number of rows. - return ndarray_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); - } -} -template<> -inline PyArrayObject *_ndarray_copy(const double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view(unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy(const unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view >(std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy >(const std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - -template<> -inline PyArrayObject *_ndarray_view >(std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} -template<> -inline PyArrayObject *_ndarray_copy >(const std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) { - if (is_row_major) - return ndarray_copy_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1); - else - return ndarray_copy_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows); -} - - -template -inline PyArrayObject *ndarray(Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -// If C++11 is available, check if m is an r-value reference, in -// which case a copy should always be made -#if __cplusplus >= 201103L -template -inline PyArrayObject *ndarray(Eigen::PlainObjectBase &&m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -#endif -template -inline PyArrayObject *ndarray(const Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -template -inline PyArrayObject *ndarray_view(Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} -template -inline PyArrayObject *ndarray_view(const Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor); -} -template -inline PyArrayObject *ndarray_copy(const Eigen::PlainObjectBase &m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor); -} - -template -inline PyArrayObject *ndarray(Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray(const Eigen::Map &m) { - import_gtsam_eigency__conversions(); - // Since this is a map, we assume that ownership is correctly taken care - // of, and we avoid taking a copy - return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray_view(Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray_view(const Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} -template -inline PyArrayObject *ndarray_copy(const Eigen::Map &m) { - import_gtsam_eigency__conversions(); - return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride()); -} - - -template > -class MapBase: public Eigen::Map { -public: - typedef Eigen::Map Base; - typedef typename Base::Scalar Scalar; - - MapBase(Scalar* data, - long rows, - long cols, - _StrideType stride=_StrideType()) - : Base(data, - // If both dimensions are dynamic or dimensions match, accept dimensions as they are - ((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) || - (Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols)) - ? rows - // otherwise, test if swapping them makes them fit - : ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows) - ? cols - : rows), - ((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) || - (Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols)) - ? cols - : ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows) - ? rows - : cols), - stride - ) {} - - MapBase &operator=(const MatrixType &other) { - Base::operator=(other); - return *this; - } - - virtual ~MapBase() { } -}; - - -template class EigencyDenseBase, - typename Scalar, - int _Rows, int _Cols, - int _Options = Eigen::AutoAlign | -#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4 - // workaround a bug in at least gcc 3.4.6 - // the innermost ?: ternary operator is misparsed. We write it slightly - // differently and this makes gcc 3.4.6 happy, but it's ugly. - // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined - // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor) - ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor -// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19 -#if EIGEN_VERSION_AT_LEAST(3,2,90) - : !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION -#else - : !(_Cols==1 && _Rows!=1) ? Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION -#endif - : ColMajor ), -#else - ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor - : (_Cols==1 && _Rows!=1) ? Eigen::ColMajor -// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19 -#if EIGEN_VERSION_AT_LEAST(3,2,90) - : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ), -#else - : Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ), -#endif -#endif - int _MapOptions = Eigen::Unaligned, - int _StrideOuter=0, int _StrideInner=0, - int _MaxRows = _Rows, - int _MaxCols = _Cols> -class FlattenedMap: public MapBase, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > { -public: - typedef MapBase, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > Base; - - FlattenedMap() - : Base(NULL, 0, 0), - object_(NULL) {} - - FlattenedMap(Scalar *data, long rows, long cols, long outer_stride=0, long inner_stride=0) - : Base(data, rows, cols, - Eigen::Stride<_StrideOuter, _StrideInner>(outer_stride, inner_stride)), - object_(NULL) { - } - - FlattenedMap(PyArrayObject *object) - : Base((Scalar *)((PyArrayObject*)object)->data, - // : Base(_from_numpy((PyArrayObject*)object), - (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1, - (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0], - Eigen::Stride<_StrideOuter, _StrideInner>(_StrideOuter != Eigen::Dynamic ? _StrideOuter : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1, - _StrideInner != Eigen::Dynamic ? _StrideInner : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0])), - object_(object) { - - if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object)) - throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map."); - - Py_XINCREF(object_); - } - FlattenedMap &operator=(const FlattenedMap &other) { - if (other.object_) { - new (this) FlattenedMap(other.object_); - } else { - // Replace the memory that we point to (not a memory allocation) - new (this) FlattenedMap(const_cast(other.data()), - other.rows(), - other.cols(), - other.outerStride(), - other.innerStride()); - } - - return *this; - } - - operator Base() const { - return static_cast(*this); - } - - operator Base&() const { - return static_cast(*this); - } - - operator EigencyDenseBase() const { - return EigencyDenseBase(static_cast(*this)); - } - - virtual ~FlattenedMap() { - Py_XDECREF(object_); - } - -private: - PyArrayObject * const object_; -}; - - -template -class Map: public MapBase { -public: - typedef MapBase Base; - typedef typename MatrixType::Scalar Scalar; - - enum { - RowsAtCompileTime = Base::Base::RowsAtCompileTime, - ColsAtCompileTime = Base::Base::ColsAtCompileTime - }; - - Map() - : Base(NULL, - (RowsAtCompileTime == Eigen::Dynamic) ? 0 : RowsAtCompileTime, - (ColsAtCompileTime == Eigen::Dynamic) ? 0 : ColsAtCompileTime), - object_(NULL) { - } - - Map(Scalar *data, long rows, long cols) - : Base(data, rows, cols), - object_(NULL) {} - - Map(PyArrayObject *object) - : Base((PyObject*)object == Py_None? NULL: (Scalar *)object->data, - // ROW: If array is in row-major order, transpose (see README) - (PyObject*)object == Py_None? 0 : - (!PyArray_IS_F_CONTIGUOUS(object) - ? ((object->nd == 1) - ? 1 // ROW: If 1D row-major numpy array, set to 1 (row vector) - : object->dimensions[1]) - : object->dimensions[0]), - // COLUMN: If array is in row-major order: transpose (see README) - (PyObject*)object == Py_None? 0 : - (!PyArray_IS_F_CONTIGUOUS(object) - ? object->dimensions[0] - : ((object->nd == 1) - ? 1 // COLUMN: If 1D col-major numpy array, set to length (column vector) - : object->dimensions[1]))), - object_(object) { - - if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object)) - throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map."); - Py_XINCREF(object_); - } - - Map &operator=(const Map &other) { - if (other.object_) { - new (this) Map(other.object_); - } else { - // Replace the memory that we point to (not a memory allocation) - new (this) Map(const_cast(other.data()), - other.rows(), - other.cols()); - } - - return *this; - } - - Map &operator=(const MatrixType &other) { - MapBase::operator=(other); - return *this; - } - - operator Base() const { - return static_cast(*this); - } - - operator Base&() const { - return static_cast(*this); - } - - operator MatrixType() const { - return MatrixType(static_cast(*this)); - } - - virtual ~Map() { - Py_XDECREF(object_); - } - -private: - PyArrayObject * const object_; -}; - - -} - -#endif - - - diff --git a/cython/requirements.txt b/cython/requirements.txt deleted file mode 100644 index cd77b097d..000000000 --- a/cython/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -Cython>=0.25.2 -backports_abc>=0.5 -numpy>=1.12.0 diff --git a/debian/README.md b/debian/README.md deleted file mode 100644 index 74eb351cd..000000000 --- a/debian/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# How to build a GTSAM debian package - -To use the ``debuild`` command, install the ``devscripts`` package - - sudo apt install devscripts - -Change into the gtsam directory, then run: - - debuild -us -uc -j4 - -Adjust the ``-j4`` depending on how many CPUs you want to build on in -parallel. diff --git a/debian/changelog b/debian/changelog deleted file mode 100644 index ef5d5ab97..000000000 --- a/debian/changelog +++ /dev/null @@ -1,5 +0,0 @@ -gtsam (4.0.0-1berndpfrommer) bionic; urgency=medium - - * initial release - - -- Bernd Pfrommer Wed, 18 Jul 2018 20:36:44 -0400 diff --git a/debian/compat b/debian/compat deleted file mode 100644 index ec635144f..000000000 --- a/debian/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/debian/control b/debian/control deleted file mode 100644 index 9b3ae5308..000000000 --- a/debian/control +++ /dev/null @@ -1,15 +0,0 @@ -Source: gtsam -Section: libs -Priority: optional -Maintainer: Frank Dellaert -Uploaders: Jose Luis Blanco Claraco , Bernd Pfrommer -Build-Depends: cmake, libboost-all-dev (>= 1.58), libeigen3-dev, libtbb-dev, debhelper (>=9) -Standards-Version: 3.9.7 -Homepage: https://github.com/borglab/gtsam -Vcs-Browser: https://github.com/borglab/gtsam - -Package: libgtsam-dev -Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends}, libboost-serialization-dev, libboost-system-dev, libboost-filesystem-dev, libboost-thread-dev, libboost-program-options-dev, libboost-date-time-dev, libboost-timer-dev, libboost-chrono-dev, libboost-regex-dev -Description: Georgia Tech Smoothing and Mapping Library - gtsam: Georgia Tech Smoothing and Mapping library for SLAM type applications diff --git a/debian/copyright b/debian/copyright deleted file mode 100644 index c2f41d83d..000000000 --- a/debian/copyright +++ /dev/null @@ -1,15 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: gtsam -Source: https://bitbucket.org/gtborg/gtsam.git - -Files: * -Copyright: 2017, Frank Dellaert -License: BSD - -Files: gtsam/3rdparty/CCOLAMD/* -Copyright: 2005-2011, Univ. of Florida. Authors: Timothy A. Davis, Sivasankaran Rajamanickam, and Stefan Larimore. Closely based on COLAMD by Davis, Stefan Larimore, in collaboration with Esmond Ng, and John Gilbert. http://www.cise.ufl.edu/research/sparse -License: GNU LESSER GENERAL PUBLIC LICENSE - -Files: gtsam/3rdparty/Eigen/* -Copyright: 2017, Multiple Authors -License: MPL2 diff --git a/debian/rules b/debian/rules deleted file mode 100755 index fab798f6e..000000000 --- a/debian/rules +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/make -f -# See debhelper(7) (uncomment to enable) -# output every command that modifies files on the build system. -export DH_VERBOSE = 1 - -# Makefile target name for running unit tests: -GTSAM_TEST_TARGET = check - -# see FEATURE AREAS in dpkg-buildflags(1) -#export DEB_BUILD_MAINT_OPTIONS = hardening=+all - -# see ENVIRONMENT in dpkg-buildflags(1) -# package maintainers to append CFLAGS -#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic -# package maintainers to append LDFLAGS -#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed - -%: - dh $@ --parallel - -# dh_make generated override targets -# This is example for Cmake (See https://bugs.debian.org/641051 ) -override_dh_auto_configure: - dh_auto_configure -- -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DGTSAM_BUILD_TESTS=ON -DGTSAM_BUILD_WRAP=OFF -DGTSAM_BUILD_DOCS=OFF -DGTSAM_INSTALL_CPPUNITLITE=OFF -DGTSAM_INSTALL_GEOGRAPHICLIB=OFF -DGTSAM_BUILD_TYPE_POSTFIXES=OFF -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF - -override_dh_auto_test-arch: - # Tests for arch-dependent : - echo "[override_dh_auto_test-arch]" - dh_auto_build -O--buildsystem=cmake -- $(GTSAM_TEST_TARGET) diff --git a/debian/source/format b/debian/source/format deleted file mode 100644 index 163aaf8d8..000000000 --- a/debian/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index d5c969a8a..fd7f4e5f6 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -1188,7 +1188,7 @@ USE_MATHJAX = YES # MathJax, but it is strongly recommended to install a local copy of MathJax # before deployment. -MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest +MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. diff --git a/doc/gtsam-coordinate-frames.lyx b/doc/gtsam-coordinate-frames.lyx index 33d0dd977..cfb44696b 100644 --- a/doc/gtsam-coordinate-frames.lyx +++ b/doc/gtsam-coordinate-frames.lyx @@ -2291,15 +2291,11 @@ uncalibration used in the residual). \end_layout -\begin_layout Standard -\begin_inset Note Note -status collapsed - \begin_layout Section Noise models of prior factors \end_layout -\begin_layout Plain Layout +\begin_layout Standard The simplest way to describe noise models is by an example. Let's take a prior factor on a 3D pose \begin_inset Formula $x\in\SE 3$ @@ -2353,7 +2349,7 @@ e\left(x\right)=\norm{h\left(x\right)}_{\Sigma}^{2}=h\left(x\right)^{\t}\Sigma^{ useful answer out quickly ] \end_layout -\begin_layout Plain Layout +\begin_layout Standard The density induced by a noise model on the prior factor is Gaussian in the tangent space about the linearization point. Suppose that the pose is linearized at @@ -2431,7 +2427,7 @@ Here we see that the update . \end_layout -\begin_layout Plain Layout +\begin_layout Standard This means that to draw random pose samples, we actually draw random samples of \begin_inset Formula $\delta x$ @@ -2456,7 +2452,7 @@ This means that to draw random pose samples, we actually draw random samples Noise models of between factors \end_layout -\begin_layout Plain Layout +\begin_layout Standard The noise model of a BetweenFactor is a bit more complicated. The unwhitened error is \begin_inset Formula @@ -2516,11 +2512,6 @@ e\left(\delta x_{1}\right) & \approx\norm{\log\left(z^{-1}\left(x_{1}\exp\delta \end_inset -\end_layout - -\end_inset - - \end_layout \end_body diff --git a/doc/gtsam-coordinate-frames.pdf b/doc/gtsam-coordinate-frames.pdf index 3613ef0ac..77910b4cf 100644 Binary files a/doc/gtsam-coordinate-frames.pdf and b/doc/gtsam-coordinate-frames.pdf differ diff --git a/doc/robust.pdf b/doc/robust.pdf new file mode 100644 index 000000000..67b853f44 Binary files /dev/null and b/doc/robust.pdf differ diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..0c136f94c --- /dev/null +++ b/docker/README.md @@ -0,0 +1,21 @@ +# Instructions + +Build all docker images, in order: + +```bash +(cd ubuntu-boost-tbb && ./build.sh) +(cd ubuntu-gtsam && ./build.sh) +(cd ubuntu-gtsam-python && ./build.sh) +(cd ubuntu-gtsam-python-vnc && ./build.sh) +``` + +Then launch with: + + docker run -p 5900:5900 dellaert/ubuntu-gtsam-python-vnc:bionic + +Then open a remote VNC X client, for example: + + sudo apt-get install tigervnc-viewer + xtigervncviewer :5900 + + diff --git a/docker/ubuntu-boost-tbb-eigen3/Dockerfile b/docker/ubuntu-boost-tbb-eigen3/Dockerfile deleted file mode 100644 index 33aa1ab96..000000000 --- a/docker/ubuntu-boost-tbb-eigen3/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# Get the base Ubuntu image from Docker Hub -FROM ubuntu:bionic - -# Update apps on the base image -RUN apt-get -y update && apt-get install -y - -# Install C++ -RUN apt-get -y install build-essential - -# Install boost and cmake -RUN apt-get -y install libboost-all-dev cmake - -# Install TBB -RUN apt-get -y install libtbb-dev - -# Install latest Eigen -RUN apt-get install -y libeigen3-dev - diff --git a/docker/ubuntu-boost-tbb/Dockerfile b/docker/ubuntu-boost-tbb/Dockerfile new file mode 100644 index 000000000..9f6eea3b8 --- /dev/null +++ b/docker/ubuntu-boost-tbb/Dockerfile @@ -0,0 +1,19 @@ +# Basic Ubuntu 18.04 image with Boost and TBB installed. To be used for building further downstream packages. + +# Get the base Ubuntu image from Docker Hub +FROM ubuntu:bionic + +# Disable GUI prompts +ENV DEBIAN_FRONTEND noninteractive + +# Update apps on the base image +RUN apt-get -y update && apt-get -y install + +# Install C++ +RUN apt-get -y install build-essential apt-utils + +# Install boost and cmake +RUN apt-get -y install libboost-all-dev cmake + +# Install TBB +RUN apt-get -y install libtbb-dev diff --git a/docker/ubuntu-boost-tbb/build.sh b/docker/ubuntu-boost-tbb/build.sh new file mode 100755 index 000000000..2dac4c3db --- /dev/null +++ b/docker/ubuntu-boost-tbb/build.sh @@ -0,0 +1,3 @@ +# Build command for Docker image +# TODO(dellaert): use docker compose and/or cmake +docker build --no-cache -t dellaert/ubuntu-boost-tbb:bionic . diff --git a/docker/ubuntu-gtsam-python-vnc/Dockerfile b/docker/ubuntu-gtsam-python-vnc/Dockerfile new file mode 100644 index 000000000..61ecd9b9a --- /dev/null +++ b/docker/ubuntu-gtsam-python-vnc/Dockerfile @@ -0,0 +1,20 @@ +# This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction. + +# Get the base Ubuntu/GTSAM image from Docker Hub +FROM dellaert/ubuntu-gtsam-python:bionic + +# Things needed to get a python GUI +ENV DEBIAN_FRONTEND noninteractive +RUN apt install -y python-tk +RUN python3 -m pip install matplotlib + +# Install a VNC X-server, Frame buffer, and windows manager +RUN apt install -y x11vnc xvfb fluxbox + +# Finally, install wmctrl needed for bootstrap script +RUN apt install -y wmctrl + +# Copy bootstrap script and make sure it runs +COPY bootstrap.sh / + +CMD '/bootstrap.sh' diff --git a/docker/ubuntu-gtsam-python-vnc/bootstrap.sh b/docker/ubuntu-gtsam-python-vnc/bootstrap.sh new file mode 100755 index 000000000..21356138f --- /dev/null +++ b/docker/ubuntu-gtsam-python-vnc/bootstrap.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +# Based on: http://www.richud.com/wiki/Ubuntu_Fluxbox_GUI_with_x11vnc_and_Xvfb + +main() { + log_i "Starting xvfb virtual display..." + launch_xvfb + log_i "Starting window manager..." + launch_window_manager + log_i "Starting VNC server..." + run_vnc_server +} + +launch_xvfb() { + local xvfbLockFilePath="/tmp/.X1-lock" + if [ -f "${xvfbLockFilePath}" ] + then + log_i "Removing xvfb lock file '${xvfbLockFilePath}'..." + if ! rm -v "${xvfbLockFilePath}" + then + log_e "Failed to remove xvfb lock file" + exit 1 + fi + fi + + # Set defaults if the user did not specify envs. + export DISPLAY=${XVFB_DISPLAY:-:1} + local screen=${XVFB_SCREEN:-0} + local resolution=${XVFB_RESOLUTION:-1280x960x24} + local timeout=${XVFB_TIMEOUT:-5} + + # Start and wait for either Xvfb to be fully up or we hit the timeout. + Xvfb ${DISPLAY} -screen ${screen} ${resolution} & + local loopCount=0 + until xdpyinfo -display ${DISPLAY} > /dev/null 2>&1 + do + loopCount=$((loopCount+1)) + sleep 1 + if [ ${loopCount} -gt ${timeout} ] + then + log_e "xvfb failed to start" + exit 1 + fi + done +} + +launch_window_manager() { + local timeout=${XVFB_TIMEOUT:-5} + + # Start and wait for either fluxbox to be fully up or we hit the timeout. + fluxbox & + local loopCount=0 + until wmctrl -m > /dev/null 2>&1 + do + loopCount=$((loopCount+1)) + sleep 1 + if [ ${loopCount} -gt ${timeout} ] + then + log_e "fluxbox failed to start" + exit 1 + fi + done +} + +run_vnc_server() { + local passwordArgument='-nopw' + + if [ -n "${VNC_SERVER_PASSWORD}" ] + then + local passwordFilePath="${HOME}/.x11vnc.pass" + if ! x11vnc -storepasswd "${VNC_SERVER_PASSWORD}" "${passwordFilePath}" + then + log_e "Failed to store x11vnc password" + exit 1 + fi + passwordArgument=-"-rfbauth ${passwordFilePath}" + log_i "The VNC server will ask for a password" + else + log_w "The VNC server will NOT ask for a password" + fi + + x11vnc -ncache 10 -ncache_cr -display ${DISPLAY} -forever ${passwordArgument} & + wait $! +} + +log_i() { + log "[INFO] ${@}" +} + +log_w() { + log "[WARN] ${@}" +} + +log_e() { + log "[ERROR] ${@}" +} + +log() { + echo "[$(date '+%Y-%m-%d %H:%M:%S')] ${@}" +} + +control_c() { + echo "" + exit +} + +trap control_c SIGINT SIGTERM SIGHUP + +main + +exit diff --git a/docker/ubuntu-gtsam-python-vnc/build.sh b/docker/ubuntu-gtsam-python-vnc/build.sh new file mode 100755 index 000000000..8d280252f --- /dev/null +++ b/docker/ubuntu-gtsam-python-vnc/build.sh @@ -0,0 +1,4 @@ +# Build command for Docker image +# TODO(dellaert): use docker compose and/or cmake +# Needs to be run in docker/ubuntu-gtsam-python-vnc directory +docker build -t dellaert/ubuntu-gtsam-python-vnc:bionic . diff --git a/docker/ubuntu-gtsam-python-vnc/vnc.sh b/docker/ubuntu-gtsam-python-vnc/vnc.sh new file mode 100755 index 000000000..c0ab692c6 --- /dev/null +++ b/docker/ubuntu-gtsam-python-vnc/vnc.sh @@ -0,0 +1,5 @@ +# After running this script, connect VNC client to 0.0.0.0:5900 +docker run -it \ + --workdir="/usr/src/gtsam" \ + -p 5900:5900 \ + dellaert/ubuntu-gtsam-python-vnc:bionic \ No newline at end of file diff --git a/docker/ubuntu-gtsam-python/Dockerfile b/docker/ubuntu-gtsam-python/Dockerfile new file mode 100644 index 000000000..ce5d8fdca --- /dev/null +++ b/docker/ubuntu-gtsam-python/Dockerfile @@ -0,0 +1,31 @@ +# GTSAM Ubuntu image with Python wrapper support. + +# Get the base Ubuntu/GTSAM image from Docker Hub +FROM dellaert/ubuntu-gtsam:bionic + +# Install pip +RUN apt-get install -y python3-pip python3-dev + +# Install python wrapper requirements +RUN python3 -m pip install -U -r /usr/src/gtsam/python/requirements.txt + +# Run cmake again, now with python toolbox on +WORKDIR /usr/src/gtsam/build +RUN cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DGTSAM_WITH_EIGEN_MKL=OFF \ + -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ + -DGTSAM_BUILD_TIMING_ALWAYS=OFF \ + -DGTSAM_BUILD_TESTS=OFF \ + -DGTSAM_BUILD_PYTHON=ON \ + -DGTSAM_PYTHON_VERSION=3\ + .. + +# Build again, as ubuntu-gtsam image cleaned +RUN make -j4 install && make clean + +# Needed to run python wrapper: +RUN echo 'export PYTHONPATH=/usr/local/python/:$PYTHONPATH' >> /root/.bashrc + +# Run bash +CMD ["bash"] diff --git a/docker/ubuntu-gtsam-python/build.sh b/docker/ubuntu-gtsam-python/build.sh new file mode 100755 index 000000000..1696f6c61 --- /dev/null +++ b/docker/ubuntu-gtsam-python/build.sh @@ -0,0 +1,3 @@ +# Build command for Docker image +# TODO(dellaert): use docker compose and/or cmake +docker build --no-cache -t dellaert/ubuntu-gtsam-python:bionic . diff --git a/docker/ubuntu-gtsam/Dockerfile b/docker/ubuntu-gtsam/Dockerfile new file mode 100644 index 000000000..f2b476f15 --- /dev/null +++ b/docker/ubuntu-gtsam/Dockerfile @@ -0,0 +1,35 @@ +# Ubuntu image with GTSAM installed. Configured with Boost and TBB support. + +# Get the base Ubuntu image from Docker Hub +FROM dellaert/ubuntu-boost-tbb:bionic + +# Install git +RUN apt-get update && \ + apt-get install -y git + +# Install compiler +RUN apt-get install -y build-essential + +# Clone GTSAM (develop branch) +WORKDIR /usr/src/ +RUN git clone --single-branch --branch develop https://github.com/borglab/gtsam.git + +# Change to build directory. Will be created automatically. +WORKDIR /usr/src/gtsam/build +# Run cmake +RUN cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DGTSAM_WITH_EIGEN_MKL=OFF \ + -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \ + -DGTSAM_BUILD_TIMING_ALWAYS=OFF \ + -DGTSAM_BUILD_TESTS=OFF \ + .. + +# Build +RUN make -j4 install && make clean + +# Needed to link with GTSAM +RUN echo 'export LD_LIBRARY_PATH=/usr/local/lib:LD_LIBRARY_PATH' >> /root/.bashrc + +# Run bash +CMD ["bash"] diff --git a/docker/ubuntu-gtsam/build.sh b/docker/ubuntu-gtsam/build.sh new file mode 100755 index 000000000..bf545e9c2 --- /dev/null +++ b/docker/ubuntu-gtsam/build.sh @@ -0,0 +1,3 @@ +# Build command for Docker image +# TODO(dellaert): use docker compose and/or cmake +docker build --no-cache -t dellaert/ubuntu-gtsam:bionic . diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 7251c2b6f..476f4ae21 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,7 +1,4 @@ set (excluded_examples - DiscreteBayesNet_FG.cpp - UGM_chain.cpp - UGM_small.cpp elaboratePoint2KalmanFilter.cpp ) diff --git a/examples/CameraResectioning.cpp b/examples/CameraResectioning.cpp index b12418098..7ac2de8b1 100644 --- a/examples/CameraResectioning.cpp +++ b/examples/CameraResectioning.cpp @@ -46,8 +46,8 @@ class ResectioningFactor: public NoiseModelFactor1 { } /// evaluate the error - virtual Vector evaluateError(const Pose3& pose, boost::optional H = - boost::none) const { + Vector evaluateError(const Pose3& pose, boost::optional H = + boost::none) const override { PinholeCamera camera(pose, *K_); return camera.project(P_, H, boost::none, boost::none) - p_; } diff --git a/examples/CombinedImuFactorsExample.cpp b/examples/CombinedImuFactorsExample.cpp new file mode 100644 index 000000000..c9646e64d --- /dev/null +++ b/examples/CombinedImuFactorsExample.cpp @@ -0,0 +1,303 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file CombinedImuFactorsExample + * @brief Test example for using GTSAM ImuCombinedFactor + * navigation code. + * @author Varun Agrawal + */ + +/** + * Example of use of the CombinedImuFactor in + * conjunction with GPS + * - we read IMU and GPS data from a CSV file, with the following format: + * A row starting with "i" is the first initial position formatted with + * N, E, D, qx, qY, qZ, qW, velN, velE, velD + * A row starting with "0" is an imu measurement + * linAccN, linAccE, linAccD, angVelN, angVelE, angVelD + * A row starting with "1" is a gps correction formatted with + * N, E, D, qX, qY, qZ, qW + * Note that for GPS correction, we're only using the position not the + * rotation. The rotation is provided in the file for ground truth comparison. + * + * See usage: ./CombinedImuFactorsExample --help + */ + +#include + +// GTSAM related includes. +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +using namespace gtsam; +using namespace std; + +using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz) +using symbol_shorthand::V; // Vel (xdot,ydot,zdot) +using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y) + +namespace po = boost::program_options; + +po::variables_map parseOptions(int argc, char* argv[]) { + po::options_description desc; + desc.add_options()("help,h", "produce help message")( + "data_csv_path", po::value()->default_value("imuAndGPSdata.csv"), + "path to the CSV file with the IMU data")( + "output_filename", + po::value()->default_value("imuFactorExampleResults.csv"), + "path to the result file to use")("use_isam", po::bool_switch(), + "use ISAM as the optimizer"); + + po::variables_map vm; + po::store(po::parse_command_line(argc, argv, desc), vm); + + if (vm.count("help")) { + cout << desc << "\n"; + exit(1); + } + + return vm; +} + +Vector10 readInitialState(ifstream& file) { + string value; + // Format is (N,E,D,qX,qY,qZ,qW,velN,velE,velD) + Vector10 initial_state; + getline(file, value, ','); // i + for (int i = 0; i < 9; i++) { + getline(file, value, ','); + initial_state(i) = stof(value.c_str()); + } + getline(file, value, '\n'); + initial_state(9) = stof(value.c_str()); + + return initial_state; +} + +boost::shared_ptr imuParams() { + // We use the sensor specs to build the noise model for the IMU factor. + double accel_noise_sigma = 0.0003924; + double gyro_noise_sigma = 0.000205689024915; + double accel_bias_rw_sigma = 0.004905; + double gyro_bias_rw_sigma = 0.000001454441043; + Matrix33 measured_acc_cov = I_3x3 * pow(accel_noise_sigma, 2); + Matrix33 measured_omega_cov = I_3x3 * pow(gyro_noise_sigma, 2); + Matrix33 integration_error_cov = + I_3x3 * 1e-8; // error committed in integrating position from velocities + Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2); + Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2); + Matrix66 bias_acc_omega_int = + I_6x6 * 1e-5; // error in the bias used for preintegration + + auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0); + // PreintegrationBase params: + p->accelerometerCovariance = + measured_acc_cov; // acc white noise in continuous + p->integrationCovariance = + integration_error_cov; // integration uncertainty continuous + // should be using 2nd order integration + // PreintegratedRotation params: + p->gyroscopeCovariance = + measured_omega_cov; // gyro white noise in continuous + // PreintegrationCombinedMeasurements params: + p->biasAccCovariance = bias_acc_cov; // acc bias in continuous + p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous + p->biasAccOmegaInt = bias_acc_omega_int; + + return p; +} + +int main(int argc, char* argv[]) { + string data_filename, output_filename; + po::variables_map var_map = parseOptions(argc, argv); + + data_filename = findExampleDataFile(var_map["data_csv_path"].as()); + output_filename = var_map["output_filename"].as(); + + // Set up output file for plotting errors + FILE* fp_out = fopen(output_filename.c_str(), "w+"); + fprintf(fp_out, + "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m),gt_qx," + "gt_qy,gt_qz,gt_qw\n"); + + // Begin parsing the CSV file. Input the first line for initialization. + // From there, we'll iterate through the file and we'll preintegrate the IMU + // or add in the GPS given the input. + ifstream file(data_filename.c_str()); + + Vector10 initial_state = readInitialState(file); + cout << "initial state:\n" << initial_state.transpose() << "\n\n"; + + // Assemble initial quaternion through GTSAM constructor + // ::Quaternion(w,x,y,z); + Rot3 prior_rotation = Rot3::Quaternion(initial_state(6), initial_state(3), + initial_state(4), initial_state(5)); + Point3 prior_point(initial_state.head<3>()); + Pose3 prior_pose(prior_rotation, prior_point); + Vector3 prior_velocity(initial_state.tail<3>()); + + imuBias::ConstantBias prior_imu_bias; // assume zero initial bias + + int index = 0; + + Values initial_values; + + // insert pose at initialization + initial_values.insert(X(index), prior_pose); + initial_values.insert(V(index), prior_velocity); + initial_values.insert(B(index), prior_imu_bias); + + // Assemble prior noise model and add it the graph.` + auto pose_noise_model = noiseModel::Diagonal::Sigmas( + (Vector(6) << 0.01, 0.01, 0.01, 0.5, 0.5, 0.5) + .finished()); // rad,rad,rad,m, m, m + auto velocity_noise_model = noiseModel::Isotropic::Sigma(3, 0.1); // m/s + auto bias_noise_model = noiseModel::Isotropic::Sigma(6, 1e-3); + + // Add all prior factors (pose, velocity, bias) to the graph. + NonlinearFactorGraph graph; + graph.addPrior(X(index), prior_pose, pose_noise_model); + graph.addPrior(V(index), prior_velocity, velocity_noise_model); + graph.addPrior(B(index), prior_imu_bias, + bias_noise_model); + + auto p = imuParams(); + + std::shared_ptr preintegrated = + std::make_shared(p, prior_imu_bias); + + assert(preintegrated); + + // Store previous state for imu integration and latest predicted outcome. + NavState prev_state(prior_pose, prior_velocity); + NavState prop_state = prev_state; + imuBias::ConstantBias prev_bias = prior_imu_bias; + + // Keep track of total error over the entire run as simple performance metric. + double current_position_error = 0.0, current_orientation_error = 0.0; + + double output_time = 0.0; + double dt = 0.005; // The real system has noise, but here, results are nearly + // exactly the same, so keeping this for simplicity. + + // All priors have been set up, now iterate through the data file. + while (file.good()) { + // Parse out first value + string value; + getline(file, value, ','); + int type = stoi(value.c_str()); + + if (type == 0) { // IMU measurement + Vector6 imu; + for (int i = 0; i < 5; ++i) { + getline(file, value, ','); + imu(i) = stof(value.c_str()); + } + getline(file, value, '\n'); + imu(5) = stof(value.c_str()); + + // Adding the IMU preintegration. + preintegrated->integrateMeasurement(imu.head<3>(), imu.tail<3>(), dt); + + } else if (type == 1) { // GPS measurement + Vector7 gps; + for (int i = 0; i < 6; ++i) { + getline(file, value, ','); + gps(i) = stof(value.c_str()); + } + getline(file, value, '\n'); + gps(6) = stof(value.c_str()); + + index++; + + // Adding IMU factor and GPS factor and optimizing. + auto preint_imu_combined = + dynamic_cast( + *preintegrated); + CombinedImuFactor imu_factor(X(index - 1), V(index - 1), X(index), + V(index), B(index - 1), B(index), + preint_imu_combined); + graph.add(imu_factor); + + auto correction_noise = noiseModel::Isotropic::Sigma(3, 1.0); + GPSFactor gps_factor(X(index), + Point3(gps(0), // N, + gps(1), // E, + gps(2)), // D, + correction_noise); + graph.add(gps_factor); + + // Now optimize and compare results. + prop_state = preintegrated->predict(prev_state, prev_bias); + initial_values.insert(X(index), prop_state.pose()); + initial_values.insert(V(index), prop_state.v()); + initial_values.insert(B(index), prev_bias); + + LevenbergMarquardtParams params; + params.setVerbosityLM("SUMMARY"); + LevenbergMarquardtOptimizer optimizer(graph, initial_values, params); + Values result = optimizer.optimize(); + + // Overwrite the beginning of the preintegration for the next step. + prev_state = + NavState(result.at(X(index)), result.at(V(index))); + prev_bias = result.at(B(index)); + + // Reset the preintegration object. + preintegrated->resetIntegrationAndSetBias(prev_bias); + + // Print out the position and orientation error for comparison. + Vector3 result_position = prev_state.pose().translation(); + Vector3 position_error = result_position - gps.head<3>(); + current_position_error = position_error.norm(); + + Quaternion result_quat = prev_state.pose().rotation().toQuaternion(); + Quaternion gps_quat(gps(6), gps(3), gps(4), gps(5)); + Quaternion quat_error = result_quat * gps_quat.inverse(); + quat_error.normalize(); + Vector3 euler_angle_error(quat_error.x() * 2, quat_error.y() * 2, + quat_error.z() * 2); + current_orientation_error = euler_angle_error.norm(); + + // display statistics + cout << "Position error:" << current_position_error << "\t " + << "Angular error:" << current_orientation_error << "\n" + << endl; + + fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n", + output_time, result_position(0), result_position(1), + result_position(2), result_quat.x(), result_quat.y(), + result_quat.z(), result_quat.w(), gps(0), gps(1), gps(2), + gps_quat.x(), gps_quat.y(), gps_quat.z(), gps_quat.w()); + + output_time += 1.0; + + } else { + cerr << "ERROR parsing file\n"; + return 1; + } + } + fclose(fp_out); + cout << "Complete, results written to " << output_filename << "\n\n"; + + return 0; +} diff --git a/examples/Data/Klaus3.g2o b/examples/Data/Klaus3.g2o new file mode 100644 index 000000000..83a6e6fd2 --- /dev/null +++ b/examples/Data/Klaus3.g2o @@ -0,0 +1,6 @@ +VERTEX_SE3:QUAT 0 -1.6618596980158338 -0.5736497760548741 -3.3319774096611026 -0.02676080288219576 -0.024497002638379624 -0.015064701622500615 0.9992281076190063 +VERTEX_SE3:QUAT 1 -1.431820463019384 -0.549139761976065 -3.160677992237872 -0.049543805396343954 -0.03232420352077356 -0.004386230477751116 0.998239108728862 +VERTEX_SE3:QUAT 2 -1.0394840214436651 -0.5268841046291037 -2.972143862665523 -0.07993768981394891 0.0825062894866454 -0.04088089479075661 0.9925378735259738 +EDGE_SE3:QUAT 0 1 0.23003923499644974 0.02451001407880915 0.17129941742323052 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +EDGE_SE3:QUAT 0 2 0.6223756765721686 0.04676567142577037 0.35983354699557957 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 +EDGE_SE3:QUAT 1 2 0.3923364415757189 0.022255657346961222 0.18853412957234905 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0 diff --git a/examples/Data/example_with_vertices.g2o b/examples/Data/example_with_vertices.g2o new file mode 100644 index 000000000..ca7cd86df --- /dev/null +++ b/examples/Data/example_with_vertices.g2o @@ -0,0 +1,16 @@ +VERTEX_SE3:QUAT 0 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162 +VERTEX_SE3:QUAT 1 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508 +VERTEX_SE3:QUAT 2 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10 +VERTEX_SE3:QUAT 3 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508 +VERTEX_SE3:QUAT 4 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162 +VERTEX_SE3:QUAT 5 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567 +VERTEX_SE3:QUAT 6 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412 +VERTEX_SE3:QUAT 7 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567 +VERTEX_TRACKXYZ 0 10 10 10 +VERTEX_TRACKXYZ 1 -10 10 10 +VERTEX_TRACKXYZ 2 -10 -10 10 +VERTEX_TRACKXYZ 3 10 -10 10 +VERTEX_TRACKXYZ 4 10 10 -10 +VERTEX_TRACKXYZ 5 -10 10 -10 +VERTEX_TRACKXYZ 6 -10 -10 -10 +VERTEX_TRACKXYZ 7 10 -10 -10 diff --git a/examples/Data/toyExample.g2o b/examples/Data/toyExample.g2o new file mode 100755 index 000000000..5ff1ba74a --- /dev/null +++ b/examples/Data/toyExample.g2o @@ -0,0 +1,11 @@ +VERTEX_SE3:QUAT 0 0 0 0 0 0 0 1 +VERTEX_SE3:QUAT 1 0 0 0 0 0 0 1 +VERTEX_SE3:QUAT 2 0 0 0 0.00499994 0.00499994 0.00499994 0.999963 +VERTEX_SE3:QUAT 3 0 0 0 -0.00499994 -0.00499994 -0.00499994 0.999963 +VERTEX_SE3:QUAT 4 0 0 0 0.00499994 0.00499994 0.00499994 0.999963 +EDGE_SE3:QUAT 1 2 1 2 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 +EDGE_SE3:QUAT 2 3 -3.26795e-07 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 +EDGE_SE3:QUAT 3 4 1 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 +EDGE_SE3:QUAT 3 1 6.9282e-07 2 0 0 0 1 1.73205e-07 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 +EDGE_SE3:QUAT 1 4 -1 1 0 0 0 -0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 +EDGE_SE3:QUAT 0 1 0 0 0 0 0 0 1 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100 diff --git a/examples/DiscreteBayesNetExample.cpp b/examples/DiscreteBayesNetExample.cpp new file mode 100644 index 000000000..5dca116c3 --- /dev/null +++ b/examples/DiscreteBayesNetExample.cpp @@ -0,0 +1,83 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file DiscreteBayesNetExample.cpp + * @brief Discrete Bayes Net example with famous Asia Bayes Network + * @author Frank Dellaert + * @date JULY 10, 2020 + */ + +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +int main(int argc, char **argv) { + DiscreteBayesNet asia; + DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2), + Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2); + asia.add(Asia % "99/1"); + asia.add(Smoking % "50/50"); + + asia.add(Tuberculosis | Asia = "99/1 95/5"); + asia.add(LungCancer | Smoking = "99/1 90/10"); + asia.add(Bronchitis | Smoking = "70/30 40/60"); + + asia.add((Either | Tuberculosis, LungCancer) = "F T T T"); + + asia.add(XRay | Either = "95/5 2/98"); + asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9"); + + // print + vector pretty = {"Asia", "Dyspnea", "XRay", "Tuberculosis", + "Smoking", "Either", "LungCancer", "Bronchitis"}; + auto formatter = [pretty](Key key) { return pretty[key]; }; + asia.print("Asia", formatter); + + // Convert to factor graph + DiscreteFactorGraph fg(asia); + + // Create solver and eliminate + Ordering ordering; + ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7); + DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering); + + // solve + DiscreteFactor::sharedValues mpe = chordal->optimize(); + GTSAM_PRINT(*mpe); + + // We can also build a Bayes tree (directed junction tree). + // The elimination order above will do fine: + auto bayesTree = fg.eliminateMultifrontal(ordering); + bayesTree->print("bayesTree", formatter); + + // add evidence, we were in Asia and we have dyspnea + fg.add(Asia, "0 1"); + fg.add(Dyspnea, "0 1"); + + // solve again, now with evidence + DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering); + DiscreteFactor::sharedValues mpe2 = chordal2->optimize(); + GTSAM_PRINT(*mpe2); + + // We can also sample from it + cout << "\n10 samples:" << endl; + for (size_t i = 0; i < 10; i++) { + DiscreteFactor::sharedValues sample = chordal2->sample(); + GTSAM_PRINT(*sample); + } + return 0; +} diff --git a/examples/DiscreteBayesNet_FG.cpp b/examples/DiscreteBayesNet_FG.cpp index 6eb08c12e..121df4bef 100644 --- a/examples/DiscreteBayesNet_FG.cpp +++ b/examples/DiscreteBayesNet_FG.cpp @@ -15,105 +15,106 @@ * @author Abhijit * @date Jun 4, 2012 * - * We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, p529] - * You may be familiar with other graphical model packages like BNT (available - * at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this is used as an - * example. The following demo is same as that in the above link, except that - * everything is using GTSAM. + * We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, + * p529] You may be familiar with other graphical model packages like BNT + * (available at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this + * is used as an example. The following demo is same as that in the above link, + * except that everything is using GTSAM. */ #include -#include +#include + #include using namespace std; using namespace gtsam; int main(int argc, char **argv) { + // Define keys and a print function + Key C(1), S(2), R(3), W(4); + auto print = [=](DiscreteFactor::sharedValues values) { + cout << boolalpha << "Cloudy = " << static_cast((*values)[C]) + << " Sprinkler = " << static_cast((*values)[S]) + << " Rain = " << boolalpha << static_cast((*values)[R]) + << " WetGrass = " << static_cast((*values)[W]) << endl; + }; // We assume binary state variables // we have 0 == "False" and 1 == "True" const size_t nrStates = 2; // define variables - DiscreteKey Cloudy(1, nrStates), Sprinkler(2, nrStates), Rain(3, nrStates), - WetGrass(4, nrStates); + DiscreteKey Cloudy(C, nrStates), Sprinkler(S, nrStates), Rain(R, nrStates), + WetGrass(W, nrStates); // create Factor Graph of the bayes net DiscreteFactorGraph graph; // add factors - graph.add(Cloudy, "0.5 0.5"); //P(Cloudy) - graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); //P(Sprinkler | Cloudy) - graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); //P(Rain | Cloudy) + graph.add(Cloudy, "0.5 0.5"); // P(Cloudy) + graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); // P(Sprinkler | Cloudy) + graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); // P(Rain | Cloudy) graph.add(Sprinkler & Rain & WetGrass, - "1 0 0.1 0.9 0.1 0.9 0.001 0.99"); //P(WetGrass | Sprinkler, Rain) + "1 0 0.1 0.9 0.1 0.9 0.001 0.99"); // P(WetGrass | Sprinkler, Rain) - // Alternatively we can also create a DiscreteBayesNet, add DiscreteConditional - // factors and create a FactorGraph from it. (See testDiscreteBayesNet.cpp) + // Alternatively we can also create a DiscreteBayesNet, add + // DiscreteConditional factors and create a FactorGraph from it. (See + // testDiscreteBayesNet.cpp) // Since this is a relatively small distribution, we can as well print // the whole distribution.. cout << "Distribution of Example: " << endl; cout << setw(11) << "Cloudy(C)" << setw(14) << "Sprinkler(S)" << setw(10) - << "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)" - << endl; + << "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)" + << endl; for (size_t a = 0; a < nrStates; a++) for (size_t m = 0; m < nrStates; m++) for (size_t h = 0; h < nrStates; h++) for (size_t c = 0; c < nrStates; c++) { DiscreteFactor::Values values; - values[Cloudy.first] = c; - values[Sprinkler.first] = h; - values[Rain.first] = m; - values[WetGrass.first] = a; + values[C] = c; + values[S] = h; + values[R] = m; + values[W] = a; double prodPot = graph(values); - cout << boolalpha << setw(8) << (bool) c << setw(14) - << (bool) h << setw(12) << (bool) m << setw(13) - << (bool) a << setw(16) << prodPot << endl; + cout << setw(8) << static_cast(c) << setw(14) + << static_cast(h) << setw(12) << static_cast(m) + << setw(13) << static_cast(a) << setw(16) << prodPot + << endl; } - // "Most Probable Explanation", i.e., configuration with largest value - DiscreteSequentialSolver solver(graph); - DiscreteFactor::sharedValues optimalDecoding = solver.optimize(); - cout <<"\nMost Probable Explanation (MPE):" << endl; - cout << boolalpha << "Cloudy = " << (bool)(*optimalDecoding)[Cloudy.first] - << " Sprinkler = " << (bool)(*optimalDecoding)[Sprinkler.first] - << " Rain = " << boolalpha << (bool)(*optimalDecoding)[Rain.first] - << " WetGrass = " << (bool)(*optimalDecoding)[WetGrass.first]<< endl; - - - // "Inference" We show an inference query like: probability that the Sprinkler was on; - // given that the grass is wet i.e. P( S | W=1) =? - cout << "\nInference Query: Probability of Sprinkler being on given Grass is Wet" << endl; - - // Method 1: we can compute the joint marginal P(S,W) and from that we can compute - // P(S | W=1) = P(S,W=1)/P(W=1) We do this in following three steps.. - - //Step1: Compute P(S,W) - DiscreteFactorGraph jointFG; - jointFG = *solver.jointFactorGraph(DiscreteKeys(Sprinkler & WetGrass).indices()); - DecisionTreeFactor probSW = jointFG.product(); - - //Step2: Compute P(W) - DiscreteFactor::shared_ptr probW = solver.marginalFactor(WetGrass.first); - - //Step3: Computer P(S | W=1) = P(S,W=1)/P(W=1) - DiscreteFactor::Values values; - values[WetGrass.first] = 1; - - //print P(S=0|W=1) - values[Sprinkler.first] = 0; - cout << "P(S=0|W=1) = " << probSW(values)/(*probW)(values) << endl; - - //print P(S=1|W=1) - values[Sprinkler.first] = 1; - cout << "P(S=1|W=1) = " << probSW(values)/(*probW)(values) << endl; - - // TODO: Method 2 : One way is to modify the factor graph to - // incorporate the evidence node and compute the marginal - // TODO: graph.addEvidence(Cloudy,0); - + DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize(); + cout << "\nMost Probable Explanation (MPE):" << endl; + print(mpe); + + // "Inference" We show an inference query like: probability that the Sprinkler + // was on; given that the grass is wet i.e. P( S | C=0) = ? + + // add evidence that it is not Cloudy + graph.add(Cloudy, "1 0"); + + // solve again, now with evidence + DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); + DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize(); + + cout << "\nMPE given C=0:" << endl; + print(mpe_with_evidence); + + // we can also calculate arbitrary marginals: + DiscreteMarginals marginals(graph); + cout << "\nP(S=1|C=0):" << marginals.marginalProbabilities(Sprinkler)[1] + << endl; + cout << "\nP(R=0|C=0):" << marginals.marginalProbabilities(Rain)[0] << endl; + cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1] + << endl; + + // We can also sample from it + cout << "\n10 samples:" << endl; + for (size_t i = 0; i < 10; i++) { + DiscreteFactor::sharedValues sample = chordal->sample(); + print(sample); + } return 0; } diff --git a/examples/HMMExample.cpp b/examples/HMMExample.cpp new file mode 100644 index 000000000..ee861e381 --- /dev/null +++ b/examples/HMMExample.cpp @@ -0,0 +1,94 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file DiscreteBayesNetExample.cpp + * @brief Hidden Markov Model example, discrete. + * @author Frank Dellaert + * @date July 12, 2020 + */ + +#include +#include +#include + +#include +#include + +using namespace std; +using namespace gtsam; + +int main(int argc, char **argv) { + const int nrNodes = 4; + const size_t nrStates = 3; + + // Define variables as well as ordering + Ordering ordering; + vector keys; + for (int k = 0; k < nrNodes; k++) { + DiscreteKey key_i(k, nrStates); + keys.push_back(key_i); + ordering.emplace_back(k); + } + + // Create HMM as a DiscreteBayesNet + DiscreteBayesNet hmm; + + // Define backbone + const string transition = "8/1/1 1/8/1 1/1/8"; + for (int k = 1; k < nrNodes; k++) { + hmm.add(keys[k] | keys[k - 1] = transition); + } + + // Add some measurements, not needed for all time steps! + hmm.add(keys[0] % "7/2/1"); + hmm.add(keys[1] % "1/9/0"); + hmm.add(keys.back() % "5/4/1"); + + // print + hmm.print("HMM"); + + // Convert to factor graph + DiscreteFactorGraph factorGraph(hmm); + + // Create solver and eliminate + // This will create a DAG ordered with arrow of time reversed + DiscreteBayesNet::shared_ptr chordal = + factorGraph.eliminateSequential(ordering); + chordal->print("Eliminated"); + + // solve + DiscreteFactor::sharedValues mpe = chordal->optimize(); + GTSAM_PRINT(*mpe); + + // We can also sample from it + cout << "\n10 samples:" << endl; + for (size_t k = 0; k < 10; k++) { + DiscreteFactor::sharedValues sample = chordal->sample(); + GTSAM_PRINT(*sample); + } + + // Or compute the marginals. This re-eliminates the FG into a Bayes tree + cout << "\nComputing Node Marginals .." << endl; + DiscreteMarginals marginals(factorGraph); + for (int k = 0; k < nrNodes; k++) { + Vector margProbs = marginals.marginalProbabilities(keys[k]); + stringstream ss; + ss << "marginal " << k; + print(margProbs, ss.str()); + } + + // TODO(frank): put in the glue to have DiscreteMarginals produce *arbitrary* + // joints efficiently, by the Bayes tree shortcut magic. All the code is there + // but it's not yet connected. + + return 0; +} diff --git a/examples/IMUKittiExampleGPS.cpp b/examples/IMUKittiExampleGPS.cpp new file mode 100644 index 000000000..e2ca49647 --- /dev/null +++ b/examples/IMUKittiExampleGPS.cpp @@ -0,0 +1,359 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file IMUKittiExampleGPS + * @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE + * @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics + */ + +// GTSAM related includes. +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +using namespace std; +using namespace gtsam; + +using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y) +using symbol_shorthand::V; // Vel (xdot,ydot,zdot) +using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz) + +struct KittiCalibration { + double body_ptx; + double body_pty; + double body_ptz; + double body_prx; + double body_pry; + double body_prz; + double accelerometer_sigma; + double gyroscope_sigma; + double integration_sigma; + double accelerometer_bias_sigma; + double gyroscope_bias_sigma; + double average_delta_t; +}; + +struct ImuMeasurement { + double time; + double dt; + Vector3 accelerometer; + Vector3 gyroscope; // omega +}; + +struct GpsMeasurement { + double time; + Vector3 position; // x,y,z +}; + +const string output_filename = "IMUKittiExampleGPSResults.csv"; + +void loadKittiData(KittiCalibration& kitti_calibration, + vector& imu_measurements, + vector& gps_measurements) { + string line; + + // Read IMU metadata and compute relative sensor pose transforms + // BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma + // AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT + string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt"); + ifstream imu_metadata(imu_metadata_file.c_str()); + + printf("-- Reading sensor metadata\n"); + + getline(imu_metadata, line, '\n'); // ignore the first line + + // Load Kitti calibration + getline(imu_metadata, line, '\n'); + sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf", + &kitti_calibration.body_ptx, + &kitti_calibration.body_pty, + &kitti_calibration.body_ptz, + &kitti_calibration.body_prx, + &kitti_calibration.body_pry, + &kitti_calibration.body_prz, + &kitti_calibration.accelerometer_sigma, + &kitti_calibration.gyroscope_sigma, + &kitti_calibration.integration_sigma, + &kitti_calibration.accelerometer_bias_sigma, + &kitti_calibration.gyroscope_bias_sigma, + &kitti_calibration.average_delta_t); + printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n", + kitti_calibration.body_ptx, + kitti_calibration.body_pty, + kitti_calibration.body_ptz, + kitti_calibration.body_prx, + kitti_calibration.body_pry, + kitti_calibration.body_prz, + kitti_calibration.accelerometer_sigma, + kitti_calibration.gyroscope_sigma, + kitti_calibration.integration_sigma, + kitti_calibration.accelerometer_bias_sigma, + kitti_calibration.gyroscope_bias_sigma, + kitti_calibration.average_delta_t); + + // Read IMU data + // Time dt accelX accelY accelZ omegaX omegaY omegaZ + string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt"); + printf("-- Reading IMU measurements from file\n"); + { + ifstream imu_data(imu_data_file.c_str()); + getline(imu_data, line, '\n'); // ignore the first line + + double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0; + while (!imu_data.eof()) { + getline(imu_data, line, '\n'); + sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf", + &time, &dt, + &acc_x, &acc_y, &acc_z, + &gyro_x, &gyro_y, &gyro_z); + + ImuMeasurement measurement; + measurement.time = time; + measurement.dt = dt; + measurement.accelerometer = Vector3(acc_x, acc_y, acc_z); + measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z); + imu_measurements.push_back(measurement); + } + } + + // Read GPS data + // Time,X,Y,Z + string gps_data_file = findExampleDataFile("KittiGps_converted.txt"); + printf("-- Reading GPS measurements from file\n"); + { + ifstream gps_data(gps_data_file.c_str()); + getline(gps_data, line, '\n'); // ignore the first line + + double time = 0, gps_x = 0, gps_y = 0, gps_z = 0; + while (!gps_data.eof()) { + getline(gps_data, line, '\n'); + sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z); + + GpsMeasurement measurement; + measurement.time = time; + measurement.position = Vector3(gps_x, gps_y, gps_z); + gps_measurements.push_back(measurement); + } + } +} + +int main(int argc, char* argv[]) { + KittiCalibration kitti_calibration; + vector imu_measurements; + vector gps_measurements; + loadKittiData(kitti_calibration, imu_measurements, gps_measurements); + + Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz, + kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz) + .finished(); + auto body_T_imu = Pose3::Expmap(BodyP); + if (!body_T_imu.equals(Pose3(), 1e-5)) { + printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same"); + exit(-1); + } + + // Configure different variables + // double t_offset = gps_measurements[0].time; + size_t first_gps_pose = 1; + size_t gps_skip = 10; // Skip this many GPS measurements each time + double g = 9.8; + auto w_coriolis = Vector3::Zero(); // zero vector + + // Configure noise models + auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0), + Vector3::Constant(1.0/0.07)) + .finished()); + + // Set initial conditions for the estimated trajectory + // initial pose is the reference frame (navigation frame) + auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position); + // the vehicle is stationary at the beginning at position 0,0,0 + Vector3 current_velocity_global = Vector3::Zero(); + auto current_bias = imuBias::ConstantBias(); // init with zero bias + + auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0), + Vector3::Constant(1.0)) + .finished()); + auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0)); + auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100), + Vector3::Constant(5.00e-05)) + .finished()); + + // Set IMU preintegration parameters + Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2); + Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2); + // error committed in integrating position from velocities + Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2); + + auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g); + imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous + imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous + imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous + imu_params->omegaCoriolis = w_coriolis; + + std::shared_ptr current_summarized_measurement = nullptr; + + // Set ISAM2 parameters and create ISAM2 solver object + ISAM2Params isam_params; + isam_params.factorization = ISAM2Params::CHOLESKY; + isam_params.relinearizeSkip = 10; + + ISAM2 isam(isam_params); + + // Create the factor graph and values object that will store new factors and values to add to the incremental graph + NonlinearFactorGraph new_factors; + Values new_values; // values storing the initial estimates of new nodes in the factor graph + + /// Main loop: + /// (1) we read the measurements + /// (2) we create the corresponding factors in the graph + /// (3) we solve the graph to obtain and optimal estimate of robot trajectory + printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n"); + size_t j = 0; + for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) { + // At each non=IMU measurement we initialize a new node in the graph + auto current_pose_key = X(i); + auto current_vel_key = V(i); + auto current_bias_key = B(i); + double t = gps_measurements[i].time; + + if (i == first_gps_pose) { + // Create initial estimate and prior on initial pose, velocity, and biases + new_values.insert(current_pose_key, current_pose_global); + new_values.insert(current_vel_key, current_velocity_global); + new_values.insert(current_bias_key, current_bias); + new_factors.emplace_shared>(current_pose_key, current_pose_global, sigma_init_x); + new_factors.emplace_shared>(current_vel_key, current_velocity_global, sigma_init_v); + new_factors.emplace_shared>(current_bias_key, current_bias, sigma_init_b); + } else { + double t_previous = gps_measurements[i-1].time; + + // Summarize IMU data between the previous GPS measurement and now + current_summarized_measurement = std::make_shared(imu_params, current_bias); + static size_t included_imu_measurement_count = 0; + while (j < imu_measurements.size() && imu_measurements[j].time <= t) { + if (imu_measurements[j].time >= t_previous) { + current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer, + imu_measurements[j].gyroscope, + imu_measurements[j].dt); + included_imu_measurement_count++; + } + j++; + } + + // Create IMU factor + auto previous_pose_key = X(i-1); + auto previous_vel_key = V(i-1); + auto previous_bias_key = B(i-1); + + new_factors.emplace_shared(previous_pose_key, previous_vel_key, + current_pose_key, current_vel_key, + previous_bias_key, *current_summarized_measurement); + + // Bias evolution as given in the IMU metadata + auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() << + Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma), + Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma)) + .finished()); + new_factors.emplace_shared>(previous_bias_key, + current_bias_key, + imuBias::ConstantBias(), + sigma_between_b); + + // Create GPS factor + auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position); + if ((i % gps_skip) == 0) { + new_factors.emplace_shared>(current_pose_key, gps_pose, noise_model_gps); + new_values.insert(current_pose_key, gps_pose); + + printf("################ POSE INCLUDED AT TIME %lf ################\n", t); + cout << gps_pose.translation(); + printf("\n\n"); + } else { + new_values.insert(current_pose_key, current_pose_global); + } + + // Add initial values for velocity and bias based on the previous estimates + new_values.insert(current_vel_key, current_velocity_global); + new_values.insert(current_bias_key, current_bias); + + // Update solver + // ======================================================================= + // We accumulate 2*GPSskip GPS measurements before updating the solver at + // first so that the heading becomes observable. + if (i > (first_gps_pose + 2*gps_skip)) { + printf("################ NEW FACTORS AT TIME %lf ################\n", t); + new_factors.print(); + + isam.update(new_factors, new_values); + + // Reset the newFactors and newValues list + new_factors.resize(0); + new_values.clear(); + + // Extract the result/current estimates + Values result = isam.calculateEstimate(); + + current_pose_global = result.at(current_pose_key); + current_velocity_global = result.at(current_vel_key); + current_bias = result.at(current_bias_key); + + printf("\n################ POSE AT TIME %lf ################\n", t); + current_pose_global.print(); + printf("\n\n"); + } + } + } + + // Save results to file + printf("\nWriting results to file...\n"); + FILE* fp_out = fopen(output_filename.c_str(), "w+"); + fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n"); + + Values result = isam.calculateEstimate(); + for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) { + auto pose_key = X(i); + auto vel_key = V(i); + auto bias_key = B(i); + + auto pose = result.at(pose_key); + auto velocity = result.at(vel_key); + auto bias = result.at(bias_key); + + auto pose_quat = pose.rotation().toQuaternion(); + auto gps = gps_measurements[i].position; + + cout << "State at #" << i << endl; + cout << "Pose:" << endl << pose << endl; + cout << "Velocity:" << endl << velocity << endl; + cout << "Bias:" << endl << bias << endl; + + fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n", + gps_measurements[i].time, + pose.x(), pose.y(), pose.z(), + pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), + gps(0), gps(1), gps(2)); + } + + fclose(fp_out); +} diff --git a/examples/ImuFactorsExample.cpp b/examples/ImuFactorsExample.cpp index a4707ea46..793927d7e 100644 --- a/examples/ImuFactorsExample.cpp +++ b/examples/ImuFactorsExample.cpp @@ -10,7 +10,7 @@ * -------------------------------------------------------------------------- */ /** - * @file imuFactorsExample + * @file ImuFactorsExample * @brief Test example for using GTSAM ImuFactor and ImuCombinedFactor * navigation code. * @author Garrett (ghemann@gmail.com), Luca Carlone @@ -31,17 +31,17 @@ * Note that for GPS correction, we're only using the position not the * rotation. The rotation is provided in the file for ground truth comparison. * - * Usage: ./ImuFactorsExample [data_csv_path] [-c] - * optional arguments: - * data_csv_path path to the CSV file with the IMU data. - * -c use CombinedImuFactor + * See usage: ./ImuFactorsExample --help */ +#include + // GTSAM related includes. #include #include #include #include +#include #include #include #include @@ -58,34 +58,87 @@ using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz) using symbol_shorthand::V; // Vel (xdot,ydot,zdot) using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y) -static const char output_filename[] = "imuFactorExampleResults.csv"; -static const char use_combined_imu_flag[3] = "-c"; +namespace po = boost::program_options; + +po::variables_map parseOptions(int argc, char* argv[]) { + po::options_description desc; + desc.add_options()("help,h", "produce help message")( + "data_csv_path", po::value()->default_value("imuAndGPSdata.csv"), + "path to the CSV file with the IMU data")( + "output_filename", + po::value()->default_value("imuFactorExampleResults.csv"), + "path to the result file to use")("use_isam", po::bool_switch(), + "use ISAM as the optimizer"); + + po::variables_map vm; + po::store(po::parse_command_line(argc, argv, desc), vm); + + if (vm.count("help")) { + cout << desc << "\n"; + exit(1); + } + + return vm; +} + +boost::shared_ptr imuParams() { + // We use the sensor specs to build the noise model for the IMU factor. + double accel_noise_sigma = 0.0003924; + double gyro_noise_sigma = 0.000205689024915; + double accel_bias_rw_sigma = 0.004905; + double gyro_bias_rw_sigma = 0.000001454441043; + Matrix33 measured_acc_cov = I_3x3 * pow(accel_noise_sigma, 2); + Matrix33 measured_omega_cov = I_3x3 * pow(gyro_noise_sigma, 2); + Matrix33 integration_error_cov = + I_3x3 * 1e-8; // error committed in integrating position from velocities + Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2); + Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2); + Matrix66 bias_acc_omega_int = + I_6x6 * 1e-5; // error in the bias used for preintegration + + auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0); + // PreintegrationBase params: + p->accelerometerCovariance = + measured_acc_cov; // acc white noise in continuous + p->integrationCovariance = + integration_error_cov; // integration uncertainty continuous + // should be using 2nd order integration + // PreintegratedRotation params: + p->gyroscopeCovariance = + measured_omega_cov; // gyro white noise in continuous + // PreintegrationCombinedMeasurements params: + p->biasAccCovariance = bias_acc_cov; // acc bias in continuous + p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous + p->biasAccOmegaInt = bias_acc_omega_int; + + return p; +} int main(int argc, char* argv[]) { - string data_filename; - bool use_combined_imu = false; - if (argc < 2) { - printf("using default CSV file\n"); - data_filename = findExampleDataFile("imuAndGPSdata.csv"); - } else if (argc < 3) { - if (strcmp(argv[1], use_combined_imu_flag) == 0) { - printf("using CombinedImuFactor\n"); - use_combined_imu = true; - printf("using default CSV file\n"); - data_filename = findExampleDataFile("imuAndGPSdata.csv"); - } else { - data_filename = argv[1]; - } + string data_filename, output_filename; + + bool use_isam = false; + + po::variables_map var_map = parseOptions(argc, argv); + + data_filename = findExampleDataFile(var_map["data_csv_path"].as()); + output_filename = var_map["output_filename"].as(); + use_isam = var_map["use_isam"].as(); + + ISAM2* isam2 = 0; + if (use_isam) { + printf("Using ISAM2\n"); + ISAM2Params parameters; + parameters.relinearizeThreshold = 0.01; + parameters.relinearizeSkip = 1; + isam2 = new ISAM2(parameters); + } else { - data_filename = argv[1]; - if (strcmp(argv[2], use_combined_imu_flag) == 0) { - printf("using CombinedImuFactor\n"); - use_combined_imu = true; - } + printf("Using Levenberg Marquardt Optimizer\n"); } // Set up output file for plotting errors - FILE* fp_out = fopen(output_filename, "w+"); + FILE* fp_out = fopen(output_filename.c_str(), "w+"); fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m),gt_qx," "gt_qy,gt_qz,gt_qw\n"); @@ -101,10 +154,10 @@ int main(int argc, char* argv[]) { getline(file, value, ','); // i for (int i = 0; i < 9; i++) { getline(file, value, ','); - initial_state(i) = atof(value.c_str()); + initial_state(i) = stof(value.c_str()); } getline(file, value, '\n'); - initial_state(9) = atof(value.c_str()); + initial_state(9) = stof(value.c_str()); cout << "initial state:\n" << initial_state.transpose() << "\n\n"; // Assemble initial quaternion through GTSAM constructor @@ -135,43 +188,11 @@ int main(int argc, char* argv[]) { graph->addPrior(V(correction_count), prior_velocity, velocity_noise_model); graph->addPrior(B(correction_count), prior_imu_bias, bias_noise_model); - // We use the sensor specs to build the noise model for the IMU factor. - double accel_noise_sigma = 0.0003924; - double gyro_noise_sigma = 0.000205689024915; - double accel_bias_rw_sigma = 0.004905; - double gyro_bias_rw_sigma = 0.000001454441043; - Matrix33 measured_acc_cov = I_3x3 * pow(accel_noise_sigma, 2); - Matrix33 measured_omega_cov = I_3x3 * pow(gyro_noise_sigma, 2); - Matrix33 integration_error_cov = - I_3x3 * 1e-8; // error committed in integrating position from velocities - Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2); - Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2); - Matrix66 bias_acc_omega_int = - I_6x6 * 1e-5; // error in the bias used for preintegration + auto p = imuParams(); - auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0); - // PreintegrationBase params: - p->accelerometerCovariance = - measured_acc_cov; // acc white noise in continuous - p->integrationCovariance = - integration_error_cov; // integration uncertainty continuous - // should be using 2nd order integration - // PreintegratedRotation params: - p->gyroscopeCovariance = - measured_omega_cov; // gyro white noise in continuous - // PreintegrationCombinedMeasurements params: - p->biasAccCovariance = bias_acc_cov; // acc bias in continuous - p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous - p->biasAccOmegaInt = bias_acc_omega_int; + std::shared_ptr preintegrated = + std::make_shared(p, prior_imu_bias); - std::shared_ptr preintegrated = nullptr; - if (use_combined_imu) { - preintegrated = - std::make_shared(p, prior_imu_bias); - } else { - preintegrated = - std::make_shared(p, prior_imu_bias); - } assert(preintegrated); // Store previous state for imu integration and latest predicted outcome. @@ -190,16 +211,16 @@ int main(int argc, char* argv[]) { while (file.good()) { // Parse out first value getline(file, value, ','); - int type = atoi(value.c_str()); + int type = stoi(value.c_str()); if (type == 0) { // IMU measurement Vector6 imu; for (int i = 0; i < 5; ++i) { getline(file, value, ','); - imu(i) = atof(value.c_str()); + imu(i) = stof(value.c_str()); } getline(file, value, '\n'); - imu(5) = atof(value.c_str()); + imu(5) = stof(value.c_str()); // Adding the IMU preintegration. preintegrated->integrateMeasurement(imu.head<3>(), imu.tail<3>(), dt); @@ -208,35 +229,24 @@ int main(int argc, char* argv[]) { Vector7 gps; for (int i = 0; i < 6; ++i) { getline(file, value, ','); - gps(i) = atof(value.c_str()); + gps(i) = stof(value.c_str()); } getline(file, value, '\n'); - gps(6) = atof(value.c_str()); + gps(6) = stof(value.c_str()); correction_count++; // Adding IMU factor and GPS factor and optimizing. - if (use_combined_imu) { - auto preint_imu_combined = - dynamic_cast( - *preintegrated); - CombinedImuFactor imu_factor( - X(correction_count - 1), V(correction_count - 1), - X(correction_count), V(correction_count), B(correction_count - 1), - B(correction_count), preint_imu_combined); - graph->add(imu_factor); - } else { - auto preint_imu = - dynamic_cast(*preintegrated); - ImuFactor imu_factor(X(correction_count - 1), V(correction_count - 1), - X(correction_count), V(correction_count), - B(correction_count - 1), preint_imu); - graph->add(imu_factor); - imuBias::ConstantBias zero_bias(Vector3(0, 0, 0), Vector3(0, 0, 0)); - graph->add(BetweenFactor( - B(correction_count - 1), B(correction_count), zero_bias, - bias_noise_model)); - } + auto preint_imu = + dynamic_cast(*preintegrated); + ImuFactor imu_factor(X(correction_count - 1), V(correction_count - 1), + X(correction_count), V(correction_count), + B(correction_count - 1), preint_imu); + graph->add(imu_factor); + imuBias::ConstantBias zero_bias(Vector3(0, 0, 0), Vector3(0, 0, 0)); + graph->add(BetweenFactor( + B(correction_count - 1), B(correction_count), zero_bias, + bias_noise_model)); auto correction_noise = noiseModel::Isotropic::Sigma(3, 1.0); GPSFactor gps_factor(X(correction_count), @@ -252,8 +262,21 @@ int main(int argc, char* argv[]) { initial_values.insert(V(correction_count), prop_state.v()); initial_values.insert(B(correction_count), prev_bias); - LevenbergMarquardtOptimizer optimizer(*graph, initial_values); - Values result = optimizer.optimize(); + Values result; + + if (use_isam) { + isam2->update(*graph, initial_values); + isam2->update(); + result = isam2->calculateEstimate(); + + // reset the graph + graph->resize(0); + initial_values.clear(); + + } else { + LevenbergMarquardtOptimizer optimizer(*graph, initial_values); + result = optimizer.optimize(); + } // Overwrite the beginning of the preintegration for the next step. prev_state = NavState(result.at(X(correction_count)), diff --git a/examples/ImuFactorExample2.cpp b/examples/ImuFactorsExample2.cpp similarity index 89% rename from examples/ImuFactorExample2.cpp rename to examples/ImuFactorsExample2.cpp index cb3650421..0031acbe8 100644 --- a/examples/ImuFactorExample2.cpp +++ b/examples/ImuFactorsExample2.cpp @@ -1,3 +1,19 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ImuFactorExample2 + * @brief Test example for using GTSAM ImuFactor and ImuCombinedFactor with ISAM2. + * @author Robert Truax + */ #include #include diff --git a/examples/LocalizationExample.cpp b/examples/LocalizationExample.cpp index a28ead5fe..e6a0f6622 100644 --- a/examples/LocalizationExample.cpp +++ b/examples/LocalizationExample.cpp @@ -85,7 +85,7 @@ class UnaryFactor: public NoiseModelFactor1 { // function, returning a vector of errors when evaluated at the provided variable value. It // must also calculate the Jacobians for this measurement function, if requested. Vector evaluateError(const Pose2& q, - boost::optional H = boost::none) const { + boost::optional H = boost::none) const override { // The measurement function for a GPS-like measurement is simple: // error_x = pose.x - measurement.x // error_y = pose.y - measurement.y @@ -99,7 +99,7 @@ class UnaryFactor: public NoiseModelFactor1 { // The second is a 'clone' function that allows the factor to be copied. Under most // circumstances, the following code that employs the default copy constructor should // work fine. - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new UnaryFactor(*this))); } diff --git a/examples/Pose3Localization.cpp b/examples/Pose3Localization.cpp index 1667b43d9..c18a9e9ce 100644 --- a/examples/Pose3Localization.cpp +++ b/examples/Pose3Localization.cpp @@ -43,7 +43,7 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const Values::ConstKeyValuePair& key_value : *initial) { + for (const auto key_value : *initial) { std::cout << "Adding prior to g2o file " << std::endl; firstKey = key_value.key; graph->addPrior(firstKey, Pose3(), priorModel); @@ -74,7 +74,7 @@ int main(const int argc, const char* argv[]) { // Calculate and print marginal covariances for all variables Marginals marginals(*graph, result); - for (const auto& key_value : result) { + for (const auto key_value : result) { auto p = dynamic_cast*>(&key_value.value); if (!p) continue; std::cout << marginals.marginalCovariance(key_value.key) << endl; diff --git a/examples/Pose3SLAMExample_changeKeys.cpp b/examples/Pose3SLAMExample_changeKeys.cpp index abadce975..5d4ed6657 100644 --- a/examples/Pose3SLAMExample_changeKeys.cpp +++ b/examples/Pose3SLAMExample_changeKeys.cpp @@ -55,7 +55,7 @@ int main(const int argc, const char *argv[]) { std::cout << "Rewriting input to file: " << inputFileRewritten << std::endl; // Additional: rewrite input with simplified keys 0,1,... Values simpleInitial; - for(const Values::ConstKeyValuePair& key_value: *initial) { + for(const auto key_value: *initial) { Key key; if(add) key = key_value.key + firstKey; diff --git a/examples/Pose3SLAMExample_g2o.cpp b/examples/Pose3SLAMExample_g2o.cpp index 1cca92f59..367964307 100644 --- a/examples/Pose3SLAMExample_g2o.cpp +++ b/examples/Pose3SLAMExample_g2o.cpp @@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const Values::ConstKeyValuePair& key_value : *initial) { + for (const auto key_value : *initial) { std::cout << "Adding prior to g2o file " << std::endl; firstKey = key_value.key; graph->addPrior(firstKey, Pose3(), priorModel); diff --git a/examples/Pose3SLAMExample_initializePose3Chordal.cpp b/examples/Pose3SLAMExample_initializePose3Chordal.cpp index 00a546bb2..992750fed 100644 --- a/examples/Pose3SLAMExample_initializePose3Chordal.cpp +++ b/examples/Pose3SLAMExample_initializePose3Chordal.cpp @@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const Values::ConstKeyValuePair& key_value : *initial) { + for (const auto key_value : *initial) { std::cout << "Adding prior to g2o file " << std::endl; firstKey = key_value.key; graph->addPrior(firstKey, Pose3(), priorModel); diff --git a/examples/Pose3SLAMExample_initializePose3Gradient.cpp b/examples/Pose3SLAMExample_initializePose3Gradient.cpp index 2f92afb9e..384f290a1 100644 --- a/examples/Pose3SLAMExample_initializePose3Gradient.cpp +++ b/examples/Pose3SLAMExample_initializePose3Gradient.cpp @@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const Values::ConstKeyValuePair& key_value : *initial) { + for (const auto key_value : *initial) { std::cout << "Adding prior to g2o file " << std::endl; firstKey = key_value.key; graph->addPrior(firstKey, Pose3(), priorModel); diff --git a/examples/SFMExample.cpp b/examples/SFMExample.cpp index 7f0c79e0e..fddca8169 100644 --- a/examples/SFMExample.cpp +++ b/examples/SFMExample.cpp @@ -109,7 +109,7 @@ int main(int argc, char* argv[]) { Symbol('x', i), corrupted_pose); } for (size_t j = 0; j < points.size(); ++j) { - auto corrupted_point = points[j] + Point3(-0.25, 0.20, 0.15); + Point3 corrupted_point = points[j] + Point3(-0.25, 0.20, 0.15); initialEstimate.insert(Symbol('l', j), corrupted_point); } initialEstimate.print("Initial Estimates:\n"); diff --git a/examples/ShonanAveragingCLI.cpp b/examples/ShonanAveragingCLI.cpp new file mode 100644 index 000000000..c72a32017 --- /dev/null +++ b/examples/ShonanAveragingCLI.cpp @@ -0,0 +1,138 @@ +/* ---------------------------------------------------------------------------- + +* GTSAM Copyright 2010, Georgia Tech Research Corporation, +* Atlanta, Georgia 30332-0415 +* All Rights Reserved +* Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +* See LICENSE for the license information +* -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveragingCLI.cpp + * @brief Run Shonan Rotation Averaging Algorithm on a file or example dataset + * @author Frank Dellaert + * @date August, 2020 + * + * Example usage: + * + * Running without arguments will run on tiny 3D example pose3example-grid + * ./ShonanAveragingCLI + * + * Read 2D dataset w10000 (in examples/data) and output to w10000-rotations.g2o + * ./ShonanAveragingCLI -d 2 -n w10000 -o w10000-rotations.g2o + * + * Read 3D dataset sphere25000.txt and output to shonan.g2o (default) + * ./ShonanAveragingCLI -i spere2500.txt + * + * If you prefer using a robust Huber loss, you can add the option "-h true", + * for instance + * ./ShonanAveragingCLI -i spere2500.txt -h true + */ + +#include +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; +namespace po = boost::program_options; + +/* ************************************************************************* */ +int main(int argc, char* argv[]) { + string datasetName; + string inputFile; + string outputFile; + int d, seed, pMin; + bool useHuberLoss; + po::options_description desc( + "Shonan Rotation Averaging CLI reads a *pose* graph, extracts the " + "rotation constraints, and runs the Shonan algorithm."); + desc.add_options()("help", "Print help message")( + "named_dataset,n", + po::value(&datasetName)->default_value("pose3example-grid"), + "Find and read frome example dataset file")( + "input_file,i", po::value(&inputFile)->default_value(""), + "Read pose constraints graph from the specified file")( + "output_file,o", + po::value(&outputFile)->default_value("shonan.g2o"), + "Write solution to the specified file")( + "dimension,d", po::value(&d)->default_value(3), + "Optimize over 2D or 3D rotations")( + "useHuberLoss,h", po::value(&useHuberLoss)->default_value(false), + "set True to use Huber loss")("pMin,p", + po::value(&pMin)->default_value(3), + "set to use desired rank pMin")( + "seed,s", po::value(&seed)->default_value(42), + "Random seed for initial estimate"); + po::variables_map vm; + po::store(po::command_line_parser(argc, argv).options(desc).run(), vm); + po::notify(vm); + + if (vm.count("help")) { + cout << desc << "\n"; + return 1; + } + + // Get input file + if (inputFile.empty()) { + if (datasetName.empty()) { + cout << "You must either specify a named dataset or an input file\n" + << desc << endl; + return 1; + } + inputFile = findExampleDataFile(datasetName); + } + + // Seed random number generator + static std::mt19937 rng(seed); + + NonlinearFactorGraph::shared_ptr inputGraph; + Values::shared_ptr posesInFile; + Values poses; + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + if (d == 2) { + cout << "Running Shonan averaging for SO(2) on " << inputFile << endl; + ShonanAveraging2::Parameters parameters(lmParams); + parameters.setUseHuber(useHuberLoss); + ShonanAveraging2 shonan(inputFile, parameters); + auto initial = shonan.initializeRandomly(rng); + auto result = shonan.run(initial, pMin); + + // Parse file again to set up translation problem, adding a prior + boost::tie(inputGraph, posesInFile) = load2D(inputFile); + auto priorModel = noiseModel::Unit::Create(3); + inputGraph->addPrior(0, posesInFile->at(0), priorModel); + + cout << "recovering 2D translations" << endl; + auto poseGraph = initialize::buildPoseGraph(*inputGraph); + poses = initialize::computePoses(result.first, &poseGraph); + } else if (d == 3) { + cout << "Running Shonan averaging for SO(3) on " << inputFile << endl; + ShonanAveraging3::Parameters parameters(lmParams); + parameters.setUseHuber(useHuberLoss); + ShonanAveraging3 shonan(inputFile, parameters); + auto initial = shonan.initializeRandomly(rng); + auto result = shonan.run(initial, pMin); + + // Parse file again to set up translation problem, adding a prior + boost::tie(inputGraph, posesInFile) = load3D(inputFile); + auto priorModel = noiseModel::Unit::Create(6); + inputGraph->addPrior(0, posesInFile->at(0), priorModel); + + cout << "recovering 3D translations" << endl; + auto poseGraph = initialize::buildPoseGraph(*inputGraph); + poses = initialize::computePoses(result.first, &poseGraph); + } else { + cout << "Can only run SO(2) or SO(3) averaging\n" << desc << endl; + return 1; + } + cout << "Writing result to " << outputFile << endl; + writeG2o(*inputGraph, poses, outputFile); + return 0; +} + +/* ************************************************************************* */ diff --git a/examples/SolverComparer.cpp b/examples/SolverComparer.cpp index d1155fe4c..7bae41403 100644 --- a/examples/SolverComparer.cpp +++ b/examples/SolverComparer.cpp @@ -50,11 +50,11 @@ #include #include #include -#include #include #include #include +#include #ifdef GTSAM_USE_TBB #include // tbb::task_arena @@ -554,12 +554,12 @@ void runCompare() void runPerturb() { // Set up random number generator - boost::mt19937 rng; - boost::normal_distribution normal(0.0, perturbationNoise); + std::mt19937 rng; + std::normal_distribution normal(0.0, perturbationNoise); // Perturb values VectorValues noise; - for(const Values::KeyValuePair& key_val: initial) + for(const Values::KeyValuePair key_val: initial) { Vector noisev(key_val.value.dim()); for(Vector::Index i = 0; i < noisev.size(); ++i) diff --git a/examples/UGM_chain.cpp b/examples/UGM_chain.cpp index 4ce4e7af4..3a885a844 100644 --- a/examples/UGM_chain.cpp +++ b/examples/UGM_chain.cpp @@ -10,7 +10,7 @@ * -------------------------------------------------------------------------- */ /** - * @file small.cpp + * @file UGM_chain.cpp * @brief UGM (undirected graphical model) examples: chain * @author Frank Dellaert * @author Abhijit Kundu @@ -19,10 +19,9 @@ * for more explanation. This code demos the same example using GTSAM. */ +#include #include -#include #include -#include #include @@ -30,9 +29,8 @@ using namespace std; using namespace gtsam; int main(int argc, char** argv) { - - // Set Number of Nodes in the Graph - const int nrNodes = 60; + // Set Number of Nodes in the Graph + const int nrNodes = 60; // Each node takes 1 of 7 possible states denoted by 0-6 in following order: // ["VideoGames" "Industry" "GradSchool" "VideoGames(with PhD)" @@ -40,70 +38,55 @@ int main(int argc, char** argv) { const size_t nrStates = 7; // define variables - vector nodes; - for (int i = 0; i < nrNodes; i++){ - DiscreteKey dk(i, nrStates); - nodes.push_back(dk); - } + vector nodes; + for (int i = 0; i < nrNodes; i++) { + DiscreteKey dk(i, nrStates); + nodes.push_back(dk); + } // create graph DiscreteFactorGraph graph; // add node potentials graph.add(nodes[0], ".3 .6 .1 0 0 0 0"); - for (int i = 1; i < nrNodes; i++) - graph.add(nodes[i], "1 1 1 1 1 1 1"); + for (int i = 1; i < nrNodes; i++) graph.add(nodes[i], "1 1 1 1 1 1 1"); - const std::string edgePotential = ".08 .9 .01 0 0 0 .01 " - ".03 .95 .01 0 0 0 .01 " - ".06 .06 .75 .05 .05 .02 .01 " - "0 0 0 .3 .6 .09 .01 " - "0 0 0 .02 .95 .02 .01 " - "0 0 0 .01 .01 .97 .01 " - "0 0 0 0 0 0 1"; + const std::string edgePotential = + ".08 .9 .01 0 0 0 .01 " + ".03 .95 .01 0 0 0 .01 " + ".06 .06 .75 .05 .05 .02 .01 " + "0 0 0 .3 .6 .09 .01 " + "0 0 0 .02 .95 .02 .01 " + "0 0 0 .01 .01 .97 .01 " + "0 0 0 0 0 0 1"; // add edge potentials for (int i = 0; i < nrNodes - 1; i++) graph.add(nodes[i] & nodes[i + 1], edgePotential); cout << "Created Factor Graph with " << nrNodes << " variable nodes and " - << graph.size() << " factors (Unary+Edge)."; + << graph.size() << " factors (Unary+Edge)."; // "Decoding", i.e., configuration with largest value // We use sequential variable elimination - DiscreteSequentialSolver solver(graph); - DiscreteFactor::sharedValues optimalDecoding = solver.optimize(); + DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); + DiscreteFactor::sharedValues optimalDecoding = chordal->optimize(); optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n"); // "Inference" Computing marginals for each node - - - cout << "\nComputing Node Marginals ..(Sequential Elimination)" << endl; - gttic_(Sequential); - for (vector::iterator itr = nodes.begin(); itr != nodes.end(); - ++itr) { - //Compute the marginal - Vector margProbs = solver.marginalProbabilities(*itr); - - //Print the marginals - cout << "Node#" << setw(4) << itr->first << " : "; - print(margProbs); - } - gttoc_(Sequential); - // Here we'll make use of DiscreteMarginals class, which makes use of // bayes-tree based shortcut evaluation of marginals DiscreteMarginals marginals(graph); cout << "\nComputing Node Marginals ..(BayesTree based)" << endl; gttic_(Multifrontal); - for (vector::iterator itr = nodes.begin(); itr != nodes.end(); - ++itr) { - //Compute the marginal - Vector margProbs = marginals.marginalProbabilities(*itr); + for (vector::iterator it = nodes.begin(); it != nodes.end(); + ++it) { + // Compute the marginal + Vector margProbs = marginals.marginalProbabilities(*it); - //Print the marginals - cout << "Node#" << setw(4) << itr->first << " : "; + // Print the marginals + cout << "Node#" << setw(4) << it->first << " : "; print(margProbs); } gttoc_(Multifrontal); @@ -111,4 +94,3 @@ int main(int argc, char** argv) { tictoc_print_(); return 0; } - diff --git a/examples/UGM_small.cpp b/examples/UGM_small.cpp index f5338bc67..27a6205a3 100644 --- a/examples/UGM_small.cpp +++ b/examples/UGM_small.cpp @@ -10,15 +10,16 @@ * -------------------------------------------------------------------------- */ /** - * @file small.cpp + * @file UGM_small.cpp * @brief UGM (undirected graphical model) examples: small * @author Frank Dellaert * * See http://www.di.ens.fr/~mschmidt/Software/UGM/small.html */ +#include #include -#include +#include using namespace std; using namespace gtsam; @@ -61,24 +62,24 @@ int main(int argc, char** argv) { // "Decoding", i.e., configuration with largest value (MPE) // We use sequential variable elimination - DiscreteSequentialSolver solver(graph); - DiscreteFactor::sharedValues optimalDecoding = solver.optimize(); + DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential(); + DiscreteFactor::sharedValues optimalDecoding = chordal->optimize(); optimalDecoding->print("\noptimalDecoding"); // "Inference" Computing marginals cout << "\nComputing Node Marginals .." << endl; - Vector margProbs; + DiscreteMarginals marginals(graph); - margProbs = solver.marginalProbabilities(Cathy); + Vector margProbs = marginals.marginalProbabilities(Cathy); print(margProbs, "Cathy's Node Marginal:"); - margProbs = solver.marginalProbabilities(Heather); + margProbs = marginals.marginalProbabilities(Heather); print(margProbs, "Heather's Node Marginal"); - margProbs = solver.marginalProbabilities(Mark); + margProbs = marginals.marginalProbabilities(Mark); print(margProbs, "Mark's Node Marginal"); - margProbs = solver.marginalProbabilities(Allison); + margProbs = marginals.marginalProbabilities(Allison); print(margProbs, "Allison's Node Marginal"); return 0; diff --git a/gtsam/3rdparty/CMakeLists.txt b/gtsam/3rdparty/CMakeLists.txt index 89149d964..8b356393b 100644 --- a/gtsam/3rdparty/CMakeLists.txt +++ b/gtsam/3rdparty/CMakeLists.txt @@ -1,7 +1,8 @@ -# install CCOLAMD headers +# install CCOLAMD and SuiteSparse_config headers install(FILES CCOLAMD/Include/ccolamd.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/CCOLAMD) install(FILES SuiteSparse_config/SuiteSparse_config.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/gtsam/3rdparty/SuiteSparse_config) +# install Eigen unless System Eigen is used if(NOT GTSAM_USE_SYSTEM_EIGEN) # Find plain .h files file(GLOB_RECURSE eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/Eigen/*.h") @@ -17,7 +18,7 @@ if(NOT GTSAM_USE_SYSTEM_EIGEN) endforeach(eigen_dir) if(GTSAM_WITH_EIGEN_UNSUPPORTED) - message("-- Installing Eigen Unsupported modules") + message(STATUS "Installing Eigen Unsupported modules") # do the same for the unsupported eigen folder file(GLOB_RECURSE unsupported_eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/unsupported/Eigen/*.h") diff --git a/gtsam/3rdparty/Spectra/GenEigsBase.h b/gtsam/3rdparty/Spectra/GenEigsBase.h new file mode 100644 index 000000000..e084033d7 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsBase.h @@ -0,0 +1,482 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_BASE_H +#define GEN_EIGS_BASE_H + +#include +#include // std::vector +#include // std::abs, std::pow, std::sqrt +#include // std::min, std::copy +#include // std::complex, std::conj, std::norm, std::abs +#include // std::invalid_argument + +#include "Util/TypeTraits.h" +#include "Util/SelectionRule.h" +#include "Util/CompInfo.h" +#include "Util/SimpleRandom.h" +#include "MatOp/internal/ArnoldiOp.h" +#include "LinAlg/UpperHessenbergQR.h" +#include "LinAlg/DoubleShiftQR.h" +#include "LinAlg/UpperHessenbergEigen.h" +#include "LinAlg/Arnoldi.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This is the base class for general eigen solvers, mainly for internal use. +/// It is kept here to provide the documentation for member functions of concrete eigen solvers +/// such as GenEigsSolver and GenEigsRealShiftSolver. +/// +template +class GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Array Array; + typedef Eigen::Array BoolArray; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef ArnoldiOp ArnoldiOpType; + typedef Arnoldi ArnoldiFac; + +protected: + // clang-format off + OpType* m_op; // object to conduct matrix operation, + // e.g. matrix-vector product + const Index m_n; // dimension of matrix A + const Index m_nev; // number of eigenvalues requested + const Index m_ncv; // dimension of Krylov subspace in the Arnoldi method + Index m_nmatop; // number of matrix operations called + Index m_niter; // number of restarting iterations + + ArnoldiFac m_fac; // Arnoldi factorization + + ComplexVector m_ritz_val; // Ritz values + ComplexMatrix m_ritz_vec; // Ritz vectors + ComplexVector m_ritz_est; // last row of m_ritz_vec + +private: + BoolArray m_ritz_conv; // indicator of the convergence of Ritz values + int m_info; // status of the computation + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + const Scalar m_eps23; // m_eps^(2/3), used to test the convergence + // clang-format on + + // Real Ritz values calculated from UpperHessenbergEigen have exact zero imaginary part + // Complex Ritz values have exact conjugate pairs + // So we use exact tests here + static bool is_complex(const Complex& v) { return v.imag() != Scalar(0); } + static bool is_conj(const Complex& v1, const Complex& v2) { return v1 == Eigen::numext::conj(v2); } + + // Implicitly restarted Arnoldi factorization + void restart(Index k) + { + using std::norm; + + if (k >= m_ncv) + return; + + DoubleShiftQR decomp_ds(m_ncv); + UpperHessenbergQR decomp_hb(m_ncv); + Matrix Q = Matrix::Identity(m_ncv, m_ncv); + + for (Index i = k; i < m_ncv; i++) + { + if (is_complex(m_ritz_val[i]) && is_conj(m_ritz_val[i], m_ritz_val[i + 1])) + { + // H - mu * I = Q1 * R1 + // H <- R1 * Q1 + mu * I = Q1' * H * Q1 + // H - conj(mu) * I = Q2 * R2 + // H <- R2 * Q2 + conj(mu) * I = Q2' * H * Q2 + // + // (H - mu * I) * (H - conj(mu) * I) = Q1 * Q2 * R2 * R1 = Q * R + const Scalar s = Scalar(2) * m_ritz_val[i].real(); + const Scalar t = norm(m_ritz_val[i]); + + decomp_ds.compute(m_fac.matrix_H(), s, t); + + // Q -> Q * Qi + decomp_ds.apply_YQ(Q); + // H -> Q'HQ + // Matrix Q = Matrix::Identity(m_ncv, m_ncv); + // decomp_ds.apply_YQ(Q); + // m_fac_H = Q.transpose() * m_fac_H * Q; + m_fac.compress_H(decomp_ds); + + i++; + } + else + { + // QR decomposition of H - mu * I, mu is real + decomp_hb.compute(m_fac.matrix_H(), m_ritz_val[i].real()); + + // Q -> Q * Qi + decomp_hb.apply_YQ(Q); + // H -> Q'HQ = RQ + mu * I + m_fac.compress_H(decomp_hb); + } + } + + m_fac.compress_V(Q); + m_fac.factorize_from(k, m_ncv, m_nmatop); + + retrieve_ritzpair(); + } + + // Calculates the number of converged Ritz values + Index num_converged(Scalar tol) + { + // thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value + Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23); + Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm(); + // Converged "wanted" Ritz values + m_ritz_conv = (resid < thresh); + + return m_ritz_conv.cast().sum(); + } + + // Returns the adjusted nev for restarting + Index nev_adjusted(Index nconv) + { + using std::abs; + + Index nev_new = m_nev; + for (Index i = m_nev; i < m_ncv; i++) + if (abs(m_ritz_est[i]) < m_near_0) + nev_new++; + + // Adjust nev_new, according to dnaup2.f line 660~674 in ARPACK + nev_new += std::min(nconv, (m_ncv - nev_new) / 2); + if (nev_new == 1 && m_ncv >= 6) + nev_new = m_ncv / 2; + else if (nev_new == 1 && m_ncv > 3) + nev_new = 2; + + if (nev_new > m_ncv - 2) + nev_new = m_ncv - 2; + + // Increase nev by one if ritz_val[nev - 1] and + // ritz_val[nev] are conjugate pairs + if (is_complex(m_ritz_val[nev_new - 1]) && + is_conj(m_ritz_val[nev_new - 1], m_ritz_val[nev_new])) + { + nev_new++; + } + + return nev_new; + } + + // Retrieves and sorts Ritz values and Ritz vectors + void retrieve_ritzpair() + { + UpperHessenbergEigen decomp(m_fac.matrix_H()); + const ComplexVector& evals = decomp.eigenvalues(); + ComplexMatrix evecs = decomp.eigenvectors(); + + SortEigenvalue sorting(evals.data(), evals.size()); + std::vector ind = sorting.index(); + + // Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively + for (Index i = 0; i < m_ncv; i++) + { + m_ritz_val[i] = evals[ind[i]]; + m_ritz_est[i] = evecs(m_ncv - 1, ind[i]); + } + for (Index i = 0; i < m_nev; i++) + { + m_ritz_vec.col(i).noalias() = evecs.col(ind[i]); + } + } + +protected: + // Sorts the first nev Ritz pairs in the specified order + // This is used to return the final results + virtual void sort_ritzpair(int sort_rule) + { + // First make sure that we have a valid index vector + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + std::vector ind = sorting.index(); + + switch (sort_rule) + { + case LARGEST_MAGN: + break; + case LARGEST_REAL: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case LARGEST_IMAG: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_REAL: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_IMAG: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + default: + throw std::invalid_argument("unsupported sorting rule"); + } + + ComplexVector new_ritz_val(m_ncv); + ComplexMatrix new_ritz_vec(m_ncv, m_nev); + BoolArray new_ritz_conv(m_nev); + + for (Index i = 0; i < m_nev; i++) + { + new_ritz_val[i] = m_ritz_val[ind[i]]; + new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]); + new_ritz_conv[i] = m_ritz_conv[ind[i]]; + } + + m_ritz_val.swap(new_ritz_val); + m_ritz_vec.swap(new_ritz_vec); + m_ritz_conv.swap(new_ritz_conv); + } + +public: + /// \cond + + GenEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) : + m_op(op), + m_n(m_op->rows()), + m_nev(nev), + m_ncv(ncv > m_n ? m_n : ncv), + m_nmatop(0), + m_niter(0), + m_fac(ArnoldiOpType(op, Bop), m_ncv), + m_info(NOT_COMPUTED), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3)) + { + if (nev < 1 || nev > m_n - 2) + throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 2, n is the size of matrix"); + + if (ncv < nev + 2 || ncv > m_n) + throw std::invalid_argument("ncv must satisfy nev + 2 <= ncv <= n, n is the size of matrix"); + } + + /// + /// Virtual destructor + /// + virtual ~GenEigsBase() {} + + /// \endcond + + /// + /// Initializes the solver by providing an initial residual vector. + /// + /// \param init_resid Pointer to the initial residual vector. + /// + /// **Spectra** (and also **ARPACK**) uses an iterative algorithm + /// to find eigenvalues. This function allows the user to provide the initial + /// residual vector. + /// + void init(const Scalar* init_resid) + { + // Reset all matrices/vectors to zero + m_ritz_val.resize(m_ncv); + m_ritz_vec.resize(m_ncv, m_nev); + m_ritz_est.resize(m_ncv); + m_ritz_conv.resize(m_nev); + + m_ritz_val.setZero(); + m_ritz_vec.setZero(); + m_ritz_est.setZero(); + m_ritz_conv.setZero(); + + m_nmatop = 0; + m_niter = 0; + + // Initialize the Arnoldi factorization + MapConstVec v0(init_resid, m_n); + m_fac.init(v0, m_nmatop); + } + + /// + /// Initializes the solver by providing a random initial residual vector. + /// + /// This overloaded function generates a random initial residual vector + /// (with a fixed random seed) for the algorithm. Elements in the vector + /// follow independent Uniform(-0.5, 0.5) distribution. + /// + void init() + { + SimpleRandom rng(0); + Vector init_resid = rng.random_vec(m_n); + init(init_resid.data()); + } + + /// + /// Conducts the major computation procedure. + /// + /// \param maxit Maximum number of iterations allowed in the algorithm. + /// \param tol Precision parameter for the calculated eigenvalues. + /// \param sort_rule Rule to sort the eigenvalues and eigenvectors. + /// Supported values are + /// `Spectra::LARGEST_MAGN`, `Spectra::LARGEST_REAL`, + /// `Spectra::LARGEST_IMAG`, `Spectra::SMALLEST_MAGN`, + /// `Spectra::SMALLEST_REAL` and `Spectra::SMALLEST_IMAG`, + /// for example `LARGEST_MAGN` indicates that eigenvalues + /// with largest magnitude come first. + /// Note that this argument is only used to + /// **sort** the final result, and the **selection** rule + /// (e.g. selecting the largest or smallest eigenvalues in the + /// full spectrum) is specified by the template parameter + /// `SelectionRule` of GenEigsSolver. + /// + /// \return Number of converged eigenvalues. + /// + Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_MAGN) + { + // The m-step Arnoldi factorization + m_fac.factorize_from(1, m_ncv, m_nmatop); + retrieve_ritzpair(); + // Restarting + Index i, nconv = 0, nev_adj; + for (i = 0; i < maxit; i++) + { + nconv = num_converged(tol); + if (nconv >= m_nev) + break; + + nev_adj = nev_adjusted(nconv); + restart(nev_adj); + } + // Sorting results + sort_ritzpair(sort_rule); + + m_niter += i + 1; + m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING; + + return std::min(m_nev, nconv); + } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Returns the number of iterations used in the computation. + /// + Index num_iterations() const { return m_niter; } + + /// + /// Returns the number of matrix operations used in the computation. + /// + Index num_operations() const { return m_nmatop; } + + /// + /// Returns the converged eigenvalues. + /// + /// \return A complex-valued vector containing the eigenvalues. + /// Returned vector type will be `Eigen::Vector, ...>`, depending on + /// the template parameter `Scalar` defined. + /// + ComplexVector eigenvalues() const + { + const Index nconv = m_ritz_conv.cast().sum(); + ComplexVector res(nconv); + + if (!nconv) + return res; + + Index j = 0; + for (Index i = 0; i < m_nev; i++) + { + if (m_ritz_conv[i]) + { + res[j] = m_ritz_val[i]; + j++; + } + } + + return res; + } + + /// + /// Returns the eigenvectors associated with the converged eigenvalues. + /// + /// \param nvec The number of eigenvectors to return. + /// + /// \return A complex-valued matrix containing the eigenvectors. + /// Returned matrix type will be `Eigen::Matrix, ...>`, + /// depending on the template parameter `Scalar` defined. + /// + ComplexMatrix eigenvectors(Index nvec) const + { + const Index nconv = m_ritz_conv.cast().sum(); + nvec = std::min(nvec, nconv); + ComplexMatrix res(m_n, nvec); + + if (!nvec) + return res; + + ComplexMatrix ritz_vec_conv(m_ncv, nvec); + Index j = 0; + for (Index i = 0; i < m_nev && j < nvec; i++) + { + if (m_ritz_conv[i]) + { + ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i); + j++; + } + } + + res.noalias() = m_fac.matrix_V() * ritz_vec_conv; + + return res; + } + + /// + /// Returns all converged eigenvectors. + /// + ComplexMatrix eigenvectors() const + { + return eigenvectors(m_nev); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_BASE_H diff --git a/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h b/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h new file mode 100644 index 000000000..da7c1b422 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsComplexShiftSolver.h @@ -0,0 +1,157 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_COMPLEX_SHIFT_SOLVER_H +#define GEN_EIGS_COMPLEX_SHIFT_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenComplexShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices with +/// a complex shift value in the **shift-and-invert mode**. The background +/// knowledge of the shift-and-invert mode can be found in the documentation +/// of the SymEigsShiftSolver class. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the DenseGenComplexShiftSolve wrapper class, or define their +/// own that implements all the public member functions as in +/// DenseGenComplexShiftSolve. +/// +template > +class GenEigsComplexShiftSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef std::complex Complex; + typedef Eigen::Matrix Vector; + typedef Eigen::Matrix ComplexVector; + + const Scalar m_sigmar; + const Scalar m_sigmai; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + using std::abs; + using std::sqrt; + using std::norm; + + // The eigenvalues we get from the iteration is + // nu = 0.5 * (1 / (lambda - sigma) + 1 / (lambda - conj(sigma))) + // So the eigenvalues of the original problem is + // 1 \pm sqrt(1 - 4 * nu^2 * sigmai^2) + // lambda = sigmar + ----------------------------------- + // 2 * nu + // We need to pick the correct root + // Let (lambdaj, vj) be the j-th eigen pair, then A * vj = lambdaj * vj + // and inv(A - r * I) * vj = 1 / (lambdaj - r) * vj + // where r is any shift value. + // We can use this identity to determine lambdaj + // + // op(v) computes Re(inv(A - r * I) * v) for any real v + // If r is real, then op(v) is also real. Let a = Re(vj), b = Im(vj), + // then op(vj) = op(a) + op(b) * i + // By comparing op(vj) and [1 / (lambdaj - r) * vj], we can determine + // which one is the correct root + + // Select a random shift value + SimpleRandom rng(0); + const Scalar shiftr = rng.random() * m_sigmar + rng.random(); + const Complex shift = Complex(shiftr, Scalar(0)); + this->m_op->set_shift(shiftr, Scalar(0)); + + // Calculate inv(A - r * I) * vj + Vector v_real(this->m_n), v_imag(this->m_n), OPv_real(this->m_n), OPv_imag(this->m_n); + const Scalar eps = Eigen::NumTraits::epsilon(); + for (Index i = 0; i < this->m_nev; i++) + { + v_real.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).real(); + v_imag.noalias() = this->m_fac.matrix_V() * this->m_ritz_vec.col(i).imag(); + this->m_op->perform_op(v_real.data(), OPv_real.data()); + this->m_op->perform_op(v_imag.data(), OPv_imag.data()); + + // Two roots computed from the quadratic equation + const Complex nu = this->m_ritz_val[i]; + const Complex root_part1 = m_sigmar + Scalar(0.5) / nu; + const Complex root_part2 = Scalar(0.5) * sqrt(Scalar(1) - Scalar(4) * m_sigmai * m_sigmai * (nu * nu)) / nu; + const Complex root1 = root_part1 + root_part2; + const Complex root2 = root_part1 - root_part2; + + // Test roots + Scalar err1 = Scalar(0), err2 = Scalar(0); + for (int k = 0; k < this->m_n; k++) + { + const Complex rhs1 = Complex(v_real[k], v_imag[k]) / (root1 - shift); + const Complex rhs2 = Complex(v_real[k], v_imag[k]) / (root2 - shift); + const Complex OPv = Complex(OPv_real[k], OPv_imag[k]); + err1 += norm(OPv - rhs1); + err2 += norm(OPv - rhs2); + } + + const Complex lambdaj = (err1 < err2) ? root1 : root2; + this->m_ritz_val[i] = lambdaj; + + if (abs(Eigen::numext::imag(lambdaj)) > eps) + { + this->m_ritz_val[i + 1] = Eigen::numext::conj(lambdaj); + i++; + } + else + { + this->m_ritz_val[i] = Complex(Eigen::numext::real(lambdaj), Scalar(0)); + } + } + + GenEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object. This class should implement + /// the complex shift-solve operation of \f$A\f$: calculating + /// \f$\mathrm{Re}\{(A-\sigma I)^{-1}v\}\f$ for any vector \f$v\f$. Users could either + /// create the object from the DenseGenComplexShiftSolve wrapper class, or + /// define their own that implements all the public member functions + /// as in DenseGenComplexShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// \param sigmar The real part of the shift. + /// \param sigmai The imaginary part of the shift. + /// + GenEigsComplexShiftSolver(OpType* op, Index nev, Index ncv, const Scalar& sigmar, const Scalar& sigmai) : + GenEigsBase(op, NULL, nev, ncv), + m_sigmar(sigmar), m_sigmai(sigmai) + { + this->m_op->set_shift(m_sigmar, m_sigmai); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_COMPLEX_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h b/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h new file mode 100644 index 000000000..6f28308f1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsRealShiftSolver.h @@ -0,0 +1,89 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_REAL_SHIFT_SOLVER_H +#define GEN_EIGS_REAL_SHIFT_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenRealShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices with +/// a real shift value in the **shift-and-invert mode**. The background +/// knowledge of the shift-and-invert mode can be found in the documentation +/// of the SymEigsShiftSolver class. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseGenRealShiftSolve and +/// SparseGenRealShiftSolve, or define their +/// own that implements all the public member functions as in +/// DenseGenRealShiftSolve. +/// +template > +class GenEigsRealShiftSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + typedef std::complex Complex; + typedef Eigen::Array ComplexArray; + + const Scalar m_sigma; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + // The eigenvalues we get from the iteration is nu = 1 / (lambda - sigma) + // So the eigenvalues of the original problem is lambda = 1 / nu + sigma + ComplexArray ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma; + this->m_ritz_val.head(this->m_nev) = ritz_val_org; + GenEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object. This class should implement + /// the shift-solve operation of \f$A\f$: calculating + /// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseGenRealShiftSolve, or + /// define their own that implements all the public member functions + /// as in DenseGenRealShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// \param sigma The real-valued shift. + /// + GenEigsRealShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) : + GenEigsBase(op, NULL, nev, ncv), + m_sigma(sigma) + { + this->m_op->set_shift(m_sigma); + } +}; + +} // namespace Spectra + +#endif // GEN_EIGS_REAL_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/GenEigsSolver.h b/gtsam/3rdparty/Spectra/GenEigsSolver.h new file mode 100644 index 000000000..3ead1dc4d --- /dev/null +++ b/gtsam/3rdparty/Spectra/GenEigsSolver.h @@ -0,0 +1,158 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEN_EIGS_SOLVER_H +#define GEN_EIGS_SOLVER_H + +#include + +#include "GenEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseGenMatProd.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for general real matrices, i.e., +/// to solve \f$Ax=\lambda x\f$ for a possibly non-symmetric \f$A\f$ matrix. +/// +/// Most of the background information documented in the SymEigsSolver class +/// also applies to the GenEigsSolver class here, except that the eigenvalues +/// and eigenvectors of a general matrix can now be complex-valued. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseGenMatProd and +/// SparseGenMatProd, or define their +/// own that implements all the public member functions as in +/// DenseGenMatProd. +/// +/// An example that illustrates the usage of GenEigsSolver is give below: +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to calculate the eigenvalues of M +/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(10, 10); +/// +/// // Construct matrix operation object using the wrapper class +/// DenseGenMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest +/// // (in magnitude, or norm) three eigenvalues +/// GenEigsSolver< double, LARGEST_MAGN, DenseGenMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXcd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +/// +/// And also an example for sparse matrices: +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // A band matrix with 1 on the main diagonal, 2 on the below-main subdiagonal, +/// // and 3 on the above-main subdiagonal +/// const int n = 10; +/// Eigen::SparseMatrix M(n, n); +/// M.reserve(Eigen::VectorXi::Constant(n, 3)); +/// for(int i = 0; i < n; i++) +/// { +/// M.insert(i, i) = 1.0; +/// if(i > 0) +/// M.insert(i - 1, i) = 3.0; +/// if(i < n - 1) +/// M.insert(i + 1, i) = 2.0; +/// } +/// +/// // Construct matrix operation object using the wrapper class SparseGenMatProd +/// SparseGenMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest three eigenvalues +/// GenEigsSolver< double, LARGEST_MAGN, SparseGenMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXcd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +template > +class GenEigsSolver : public GenEigsBase +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseGenMatProd, or + /// define their own that implements all the public member functions + /// as in DenseGenMatProd. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-2\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev+2 \le ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev + 1\f$. + /// + GenEigsSolver(OpType* op, Index nev, Index ncv) : + GenEigsBase(op, NULL, nev, ncv) + {} +}; + +} // namespace Spectra + +#endif // GEN_EIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h b/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h new file mode 100644 index 000000000..730ba9556 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/Arnoldi.h @@ -0,0 +1,284 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef ARNOLDI_H +#define ARNOLDI_H + +#include +#include // std::sqrt +#include // std::invalid_argument +#include // std::stringstream + +#include "../MatOp/internal/ArnoldiOp.h" +#include "../Util/TypeTraits.h" +#include "../Util/SimpleRandom.h" +#include "UpperHessenbergQR.h" +#include "DoubleShiftQR.h" + +namespace Spectra { + +// Arnoldi factorization A * V = V * H + f * e' +// A: n x n +// V: n x k +// H: k x k +// f: n x 1 +// e: [0, ..., 0, 1] +// V and H are allocated of dimension m, so the maximum value of k is m +template +class Arnoldi +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + +protected: + // clang-format off + ArnoldiOpType m_op; // Operators for the Arnoldi factorization + + const Index m_n; // dimension of A + const Index m_m; // maximum dimension of subspace V + Index m_k; // current dimension of subspace V + + Matrix m_fac_V; // V matrix in the Arnoldi factorization + Matrix m_fac_H; // H matrix in the Arnoldi factorization + Vector m_fac_f; // residual in the Arnoldi factorization + Scalar m_beta; // ||f||, B-norm of f + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + // clang-format on + + // Given orthonormal basis functions V, find a nonzero vector f such that V'Bf = 0 + // Assume that f has been properly allocated + void expand_basis(MapConstMat& V, const Index seed, Vector& f, Scalar& fnorm) + { + using std::sqrt; + + const Scalar thresh = m_eps * sqrt(Scalar(m_n)); + Vector Vf(V.cols()); + for (Index iter = 0; iter < 5; iter++) + { + // Randomly generate a new vector and orthogonalize it against V + SimpleRandom rng(seed + 123 * iter); + f.noalias() = rng.random_vec(m_n); + // f <- f - V * V'Bf, so that f is orthogonal to V in B-norm + m_op.trans_product(V, f, Vf); + f.noalias() -= V * Vf; + // fnorm <- ||f|| + fnorm = m_op.norm(f); + + // If fnorm is too close to zero, we try a new random vector, + // otherwise return the result + if (fnorm >= thresh) + return; + } + } + +public: + Arnoldi(const ArnoldiOpType& op, Index m) : + m_op(op), m_n(op.rows()), m_m(m), m_k(0), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()) + {} + + virtual ~Arnoldi() {} + + // Const-reference to internal structures + const Matrix& matrix_V() const { return m_fac_V; } + const Matrix& matrix_H() const { return m_fac_H; } + const Vector& vector_f() const { return m_fac_f; } + Scalar f_norm() const { return m_beta; } + Index subspace_dim() const { return m_k; } + + // Initialize with an operator and an initial vector + void init(MapConstVec& v0, Index& op_counter) + { + m_fac_V.resize(m_n, m_m); + m_fac_H.resize(m_m, m_m); + m_fac_f.resize(m_n); + m_fac_H.setZero(); + + // Verify the initial vector + const Scalar v0norm = m_op.norm(v0); + if (v0norm < m_near_0) + throw std::invalid_argument("initial residual vector cannot be zero"); + + // Points to the first column of V + MapVec v(m_fac_V.data(), m_n); + + // Normalize + v.noalias() = v0 / v0norm; + + // Compute H and f + Vector w(m_n); + m_op.perform_op(v.data(), w.data()); + op_counter++; + + m_fac_H(0, 0) = m_op.inner_product(v, w); + m_fac_f.noalias() = w - v * m_fac_H(0, 0); + + // In some cases f is zero in exact arithmetics, but due to rounding errors + // it may contain tiny fluctuations. When this happens, we force f to be zero + if (m_fac_f.cwiseAbs().maxCoeff() < m_eps) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + } + else + { + m_beta = m_op.norm(m_fac_f); + } + + // Indicate that this is a step-1 factorization + m_k = 1; + } + + // Arnoldi factorization starting from step-k + virtual void factorize_from(Index from_k, Index to_m, Index& op_counter) + { + using std::sqrt; + + if (to_m <= from_k) + return; + + if (from_k > m_k) + { + std::stringstream msg; + msg << "Arnoldi: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")"; + throw std::invalid_argument(msg.str()); + } + + const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n)); + + // Pre-allocate vectors + Vector Vf(to_m); + Vector w(m_n); + + // Keep the upperleft k x k submatrix of H and set other elements to 0 + m_fac_H.rightCols(m_m - from_k).setZero(); + m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero(); + + for (Index i = from_k; i <= to_m - 1; i++) + { + bool restart = false; + // If beta = 0, then the next V is not full rank + // We need to generate a new residual vector that is orthogonal + // to the current V, which we call a restart + if (m_beta < m_near_0) + { + MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns + expand_basis(V, 2 * i, m_fac_f, m_beta); + restart = true; + } + + // v <- f / ||f|| + m_fac_V.col(i).noalias() = m_fac_f / m_beta; // The (i+1)-th column + + // Note that H[i+1, i] equals to the unrestarted beta + m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta; + + // w <- A * v, v = m_fac_V.col(i) + m_op.perform_op(&m_fac_V(0, i), w.data()); + op_counter++; + + const Index i1 = i + 1; + // First i+1 columns of V + MapConstMat Vs(m_fac_V.data(), m_n, i1); + // h = m_fac_H(0:i, i) + MapVec h(&m_fac_H(0, i), i1); + // h <- V'Bw + m_op.trans_product(Vs, w, h); + + // f <- w - V * h + m_fac_f.noalias() = w - Vs * h; + m_beta = m_op.norm(m_fac_f); + + if (m_beta > Scalar(0.717) * m_op.norm(h)) + continue; + + // f/||f|| is going to be the next column of V, so we need to test + // whether V'B(f/||f||) ~= 0 + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + // If not, iteratively correct the residual + int count = 0; + while (count < 5 && ortho_err > m_eps * m_beta) + { + // There is an edge case: when beta=||f|| is close to zero, f mostly consists + // of noises of rounding errors, so the test [ortho_err < eps * beta] is very + // likely to fail. In particular, if beta=0, then the test is ensured to fail. + // Hence when this happens, we force f to be zero, and then restart in the + // next iteration. + if (m_beta < beta_thresh) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + break; + } + + // f <- f - V * Vf + m_fac_f.noalias() -= Vs * Vf.head(i1); + // h <- h + Vf + h.noalias() += Vf.head(i1); + // beta <- ||f|| + m_beta = m_op.norm(m_fac_f); + + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + count++; + } + } + + // Indicate that this is a step-m factorization + m_k = to_m; + } + + // Apply H -> Q'HQ, where Q is from a double shift QR decomposition + void compress_H(const DoubleShiftQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k -= 2; + } + + // Apply H -> Q'HQ, where Q is from an upper Hessenberg QR decomposition + void compress_H(const UpperHessenbergQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k--; + } + + // Apply V -> VQ and compute the new f. + // Should be called after compress_H(), since m_k is updated there. + // Only need to update the first k+1 columns of V + // The first (m - k + i) elements of the i-th column of Q are non-zero, + // and the rest are zero + void compress_V(const Matrix& Q) + { + Matrix Vs(m_n, m_k + 1); + for (Index i = 0; i < m_k; i++) + { + const Index nnz = m_m - m_k + i + 1; + MapConstVec q(&Q(0, i), nnz); + Vs.col(i).noalias() = m_fac_V.leftCols(nnz) * q; + } + Vs.col(m_k).noalias() = m_fac_V * Q.col(m_k); + m_fac_V.leftCols(m_k + 1).noalias() = Vs; + + Vector fk = m_fac_f * Q(m_m - 1, m_k - 1) + m_fac_V.col(m_k) * m_fac_H(m_k, m_k - 1); + m_fac_f.swap(fk); + m_beta = m_op.norm(m_fac_f); + } +}; + +} // namespace Spectra + +#endif // ARNOLDI_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h b/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h new file mode 100644 index 000000000..2345e0fda --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/BKLDLT.h @@ -0,0 +1,530 @@ +// Copyright (C) 2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef BK_LDLT_H +#define BK_LDLT_H + +#include +#include +#include + +#include "../Util/CompInfo.h" + +namespace Spectra { + +// Bunch-Kaufman LDLT decomposition +// References: +// 1. Bunch, J. R., & Kaufman, L. (1977). Some stable methods for calculating inertia and solving symmetric linear systems. +// Mathematics of computation, 31(137), 163-179. +// 2. Golub, G. H., & Van Loan, C. F. (2012). Matrix computations (Vol. 3). JHU press. Section 4.4. +// 3. Bunch-Parlett diagonal pivoting +// 4. Ashcraft, C., Grimes, R. G., & Lewis, J. G. (1998). Accurate symmetric indefinite linear equation solvers. +// SIAM Journal on Matrix Analysis and Applications, 20(2), 513-561. +template +class BKLDLT +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef Eigen::Matrix IntVector; + typedef Eigen::Ref GenericVector; + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + typedef const Eigen::Ref ConstGenericVector; + + Index m_n; + Vector m_data; // storage for a lower-triangular matrix + std::vector m_colptr; // pointers to columns + IntVector m_perm; // [-2, -1, 3, 1, 4, 5]: 0 <-> 2, 1 <-> 1, 2 <-> 3, 3 <-> 1, 4 <-> 4, 5 <-> 5 + std::vector > m_permc; // compressed version of m_perm: [(0, 2), (2, 3), (3, 1)] + + bool m_computed; + int m_info; + + // Access to elements + // Pointer to the k-th column + Scalar* col_pointer(Index k) { return m_colptr[k]; } + // A[i, j] -> m_colptr[j][i - j], i >= j + Scalar& coeff(Index i, Index j) { return m_colptr[j][i - j]; } + const Scalar& coeff(Index i, Index j) const { return m_colptr[j][i - j]; } + // A[i, i] -> m_colptr[i][0] + Scalar& diag_coeff(Index i) { return m_colptr[i][0]; } + const Scalar& diag_coeff(Index i) const { return m_colptr[i][0]; } + + // Compute column pointers + void compute_pointer() + { + m_colptr.clear(); + m_colptr.reserve(m_n); + Scalar* head = m_data.data(); + + for (Index i = 0; i < m_n; i++) + { + m_colptr.push_back(head); + head += (m_n - i); + } + } + + // Copy mat - shift * I to m_data + void copy_data(ConstGenericMatrix& mat, int uplo, const Scalar& shift) + { + if (uplo == Eigen::Lower) + { + for (Index j = 0; j < m_n; j++) + { + const Scalar* begin = &mat.coeffRef(j, j); + const Index len = m_n - j; + std::copy(begin, begin + len, col_pointer(j)); + diag_coeff(j) -= shift; + } + } + else + { + Scalar* dest = m_data.data(); + for (Index i = 0; i < m_n; i++) + { + for (Index j = i; j < m_n; j++, dest++) + { + *dest = mat.coeff(i, j); + } + diag_coeff(i) -= shift; + } + } + } + + // Compute compressed permutations + void compress_permutation() + { + for (Index i = 0; i < m_n; i++) + { + // Recover the permutation action + const Index perm = (m_perm[i] >= 0) ? (m_perm[i]) : (-m_perm[i] - 1); + if (perm != i) + m_permc.push_back(std::make_pair(i, perm)); + } + } + + // Working on the A[k:end, k:end] submatrix + // Exchange k <-> r + // Assume r >= k + void pivoting_1x1(Index k, Index r) + { + // No permutation + if (k == r) + { + m_perm[k] = r; + return; + } + + // A[k, k] <-> A[r, r] + std::swap(diag_coeff(k), diag_coeff(r)); + + // A[(r+1):end, k] <-> A[(r+1):end, r] + std::swap_ranges(&coeff(r + 1, k), col_pointer(k + 1), &coeff(r + 1, r)); + + // A[(k+1):(r-1), k] <-> A[r, (k+1):(r-1)] + Scalar* src = &coeff(k + 1, k); + for (Index j = k + 1; j < r; j++, src++) + { + std::swap(*src, coeff(r, j)); + } + + m_perm[k] = r; + } + + // Working on the A[k:end, k:end] submatrix + // Exchange [k+1, k] <-> [r, p] + // Assume p >= k, r >= k+1 + void pivoting_2x2(Index k, Index r, Index p) + { + pivoting_1x1(k, p); + pivoting_1x1(k + 1, r); + + // A[k+1, k] <-> A[r, k] + std::swap(coeff(k + 1, k), coeff(r, k)); + + // Use negative signs to indicate a 2x2 block + // Also minus one to distinguish a negative zero from a positive zero + m_perm[k] = -m_perm[k] - 1; + m_perm[k + 1] = -m_perm[k + 1] - 1; + } + + // A[r1, c1:c2] <-> A[r2, c1:c2] + // Assume r2 >= r1 > c2 >= c1 + void interchange_rows(Index r1, Index r2, Index c1, Index c2) + { + if (r1 == r2) + return; + + for (Index j = c1; j <= c2; j++) + { + std::swap(coeff(r1, j), coeff(r2, j)); + } + } + + // lambda = |A[r, k]| = max{|A[k+1, k]|, ..., |A[end, k]|} + // Largest (in magnitude) off-diagonal element in the first column of the current reduced matrix + // r is the row index + // Assume k < end + Scalar find_lambda(Index k, Index& r) + { + using std::abs; + + const Scalar* head = col_pointer(k); // => A[k, k] + const Scalar* end = col_pointer(k + 1); + // Start with r=k+1, lambda=A[k+1, k] + r = k + 1; + Scalar lambda = abs(head[1]); + // Scan remaining elements + for (const Scalar* ptr = head + 2; ptr < end; ptr++) + { + const Scalar abs_elem = abs(*ptr); + if (lambda < abs_elem) + { + lambda = abs_elem; + r = k + (ptr - head); + } + } + + return lambda; + } + + // sigma = |A[p, r]| = max {|A[k, r]|, ..., |A[end, r]|} \ {A[r, r]} + // Largest (in magnitude) off-diagonal element in the r-th column of the current reduced matrix + // p is the row index + // Assume k < r < end + Scalar find_sigma(Index k, Index r, Index& p) + { + using std::abs; + + // First search A[r+1, r], ..., A[end, r], which has the same task as find_lambda() + // If r == end, we skip this search + Scalar sigma = Scalar(-1); + if (r < m_n - 1) + sigma = find_lambda(r, p); + + // Then search A[k, r], ..., A[r-1, r], which maps to A[r, k], ..., A[r, r-1] + for (Index j = k; j < r; j++) + { + const Scalar abs_elem = abs(coeff(r, j)); + if (sigma < abs_elem) + { + sigma = abs_elem; + p = j; + } + } + + return sigma; + } + + // Generate permutations and apply to A + // Return true if the resulting pivoting is 1x1, and false if 2x2 + bool permutate_mat(Index k, const Scalar& alpha) + { + using std::abs; + + Index r = k, p = k; + const Scalar lambda = find_lambda(k, r); + + // If lambda=0, no need to interchange + if (lambda > Scalar(0)) + { + const Scalar abs_akk = abs(diag_coeff(k)); + // If |A[k, k]| >= alpha * lambda, no need to interchange + if (abs_akk < alpha * lambda) + { + const Scalar sigma = find_sigma(k, r, p); + + // If sigma * |A[k, k]| >= alpha * lambda^2, no need to interchange + if (sigma * abs_akk < alpha * lambda * lambda) + { + if (abs_akk >= alpha * sigma) + { + // Permutation on A + pivoting_1x1(k, r); + + // Permutation on L + interchange_rows(k, r, 0, k - 1); + return true; + } + else + { + // There are two versions of permutation here + // 1. A[k+1, k] <-> A[r, k] + // 2. A[k+1, k] <-> A[r, p], where p >= k and r >= k+1 + // + // Version 1 and 2 are used by Ref[1] and Ref[2], respectively + + // Version 1 implementation + p = k; + + // Version 2 implementation + // [r, p] and [p, r] are symmetric, but we need to make sure + // p >= k and r >= k+1, so it is safe to always make r > p + // One exception is when min{r,p} == k+1, in which case we make + // r = k+1, so that only one permutation needs to be performed + /* const Index rp_min = std::min(r, p); + const Index rp_max = std::max(r, p); + if(rp_min == k + 1) + { + r = rp_min; p = rp_max; + } else { + r = rp_max; p = rp_min; + } */ + + // Right now we use Version 1 since it reduces the overhead of interchange + + // Permutation on A + pivoting_2x2(k, r, p); + // Permutation on L + interchange_rows(k, p, 0, k - 1); + interchange_rows(k + 1, r, 0, k - 1); + return false; + } + } + } + } + + return true; + } + + // E = [e11, e12] + // [e21, e22] + // Overwrite E with inv(E) + void inverse_inplace_2x2(Scalar& e11, Scalar& e21, Scalar& e22) const + { + // inv(E) = [d11, d12], d11 = e22/delta, d21 = -e21/delta, d22 = e11/delta + // [d21, d22] + const Scalar delta = e11 * e22 - e21 * e21; + std::swap(e11, e22); + e11 /= delta; + e22 /= delta; + e21 = -e21 / delta; + } + + // Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE + int gaussian_elimination_1x1(Index k) + { + // D = 1 / A[k, k] + const Scalar akk = diag_coeff(k); + // Return NUMERICAL_ISSUE if not invertible + if (akk == Scalar(0)) + return NUMERICAL_ISSUE; + + diag_coeff(k) = Scalar(1) / akk; + + // B -= l * l' / A[k, k], B := A[(k+1):end, (k+1):end], l := L[(k+1):end, k] + Scalar* lptr = col_pointer(k) + 1; + const Index ldim = m_n - k - 1; + MapVec l(lptr, ldim); + for (Index j = 0; j < ldim; j++) + { + MapVec(col_pointer(j + k + 1), ldim - j).noalias() -= (lptr[j] / akk) * l.tail(ldim - j); + } + + // l /= A[k, k] + l /= akk; + + return SUCCESSFUL; + } + + // Return value is the status, SUCCESSFUL/NUMERICAL_ISSUE + int gaussian_elimination_2x2(Index k) + { + // D = inv(E) + Scalar& e11 = diag_coeff(k); + Scalar& e21 = coeff(k + 1, k); + Scalar& e22 = diag_coeff(k + 1); + // Return NUMERICAL_ISSUE if not invertible + if (e11 * e22 - e21 * e21 == Scalar(0)) + return NUMERICAL_ISSUE; + + inverse_inplace_2x2(e11, e21, e22); + + // X = l * inv(E), l := L[(k+2):end, k:(k+1)] + Scalar* l1ptr = &coeff(k + 2, k); + Scalar* l2ptr = &coeff(k + 2, k + 1); + const Index ldim = m_n - k - 2; + MapVec l1(l1ptr, ldim), l2(l2ptr, ldim); + + Eigen::Matrix X(ldim, 2); + X.col(0).noalias() = l1 * e11 + l2 * e21; + X.col(1).noalias() = l1 * e21 + l2 * e22; + + // B -= l * inv(E) * l' = X * l', B = A[(k+2):end, (k+2):end] + for (Index j = 0; j < ldim; j++) + { + MapVec(col_pointer(j + k + 2), ldim - j).noalias() -= (X.col(0).tail(ldim - j) * l1ptr[j] + X.col(1).tail(ldim - j) * l2ptr[j]); + } + + // l = X + l1.noalias() = X.col(0); + l2.noalias() = X.col(1); + + return SUCCESSFUL; + } + +public: + BKLDLT() : + m_n(0), m_computed(false), m_info(NOT_COMPUTED) + {} + + // Factorize mat - shift * I + BKLDLT(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) : + m_n(mat.rows()), m_computed(false), m_info(NOT_COMPUTED) + { + compute(mat, uplo, shift); + } + + void compute(ConstGenericMatrix& mat, int uplo = Eigen::Lower, const Scalar& shift = Scalar(0)) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("BKLDLT: matrix must be square"); + + m_perm.setLinSpaced(m_n, 0, m_n - 1); + m_permc.clear(); + + // Copy data + m_data.resize((m_n * (m_n + 1)) / 2); + compute_pointer(); + copy_data(mat, uplo, shift); + + const Scalar alpha = (1.0 + std::sqrt(17.0)) / 8.0; + Index k = 0; + for (k = 0; k < m_n - 1; k++) + { + // 1. Interchange rows and columns of A, and save the result to m_perm + bool is_1x1 = permutate_mat(k, alpha); + + // 2. Gaussian elimination + if (is_1x1) + { + m_info = gaussian_elimination_1x1(k); + } + else + { + m_info = gaussian_elimination_2x2(k); + k++; + } + + // 3. Check status + if (m_info != SUCCESSFUL) + break; + } + // Invert the last 1x1 block if it exists + if (k == m_n - 1) + { + const Scalar akk = diag_coeff(k); + if (akk == Scalar(0)) + m_info = NUMERICAL_ISSUE; + + diag_coeff(k) = Scalar(1) / diag_coeff(k); + } + + compress_permutation(); + + m_computed = true; + } + + // Solve Ax=b + void solve_inplace(GenericVector b) const + { + if (!m_computed) + throw std::logic_error("BKLDLT: need to call compute() first"); + + // PAP' = LDL' + // 1. b -> Pb + Scalar* x = b.data(); + MapVec res(x, m_n); + Index npermc = m_permc.size(); + for (Index i = 0; i < npermc; i++) + { + std::swap(x[m_permc[i].first], x[m_permc[i].second]); + } + + // 2. Lz = Pb + // If m_perm[end] < 0, then end with m_n - 3, otherwise end with m_n - 2 + const Index end = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2); + for (Index i = 0; i <= end; i++) + { + const Index b1size = m_n - i - 1; + const Index b2size = b1size - 1; + if (m_perm[i] >= 0) + { + MapConstVec l(&coeff(i + 1, i), b1size); + res.segment(i + 1, b1size).noalias() -= l * x[i]; + } + else + { + MapConstVec l1(&coeff(i + 2, i), b2size); + MapConstVec l2(&coeff(i + 2, i + 1), b2size); + res.segment(i + 2, b2size).noalias() -= (l1 * x[i] + l2 * x[i + 1]); + i++; + } + } + + // 3. Dw = z + for (Index i = 0; i < m_n; i++) + { + const Scalar e11 = diag_coeff(i); + if (m_perm[i] >= 0) + { + x[i] *= e11; + } + else + { + const Scalar e21 = coeff(i + 1, i), e22 = diag_coeff(i + 1); + const Scalar wi = x[i] * e11 + x[i + 1] * e21; + x[i + 1] = x[i] * e21 + x[i + 1] * e22; + x[i] = wi; + i++; + } + } + + // 4. L'y = w + // If m_perm[end] < 0, then start with m_n - 3, otherwise start with m_n - 2 + Index i = (m_perm[m_n - 1] < 0) ? (m_n - 3) : (m_n - 2); + for (; i >= 0; i--) + { + const Index ldim = m_n - i - 1; + MapConstVec l(&coeff(i + 1, i), ldim); + x[i] -= res.segment(i + 1, ldim).dot(l); + + if (m_perm[i] < 0) + { + MapConstVec l2(&coeff(i + 1, i - 1), ldim); + x[i - 1] -= res.segment(i + 1, ldim).dot(l2); + i--; + } + } + + // 5. x = P'y + for (Index i = npermc - 1; i >= 0; i--) + { + std::swap(x[m_permc[i].first], x[m_permc[i].second]); + } + } + + Vector solve(ConstGenericVector& b) const + { + Vector res = b; + solve_inplace(res); + return res; + } + + int info() const { return m_info; } +}; + +} // namespace Spectra + +#endif // BK_LDLT_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h b/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h new file mode 100644 index 000000000..2845688b4 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/DoubleShiftQR.h @@ -0,0 +1,384 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DOUBLE_SHIFT_QR_H +#define DOUBLE_SHIFT_QR_H + +#include +#include // std::vector +#include // std::min, std::fill, std::copy +#include // std::abs, std::sqrt, std::pow +#include // std::invalid_argument, std::logic_error + +#include "../Util/TypeTraits.h" + +namespace Spectra { + +template +class DoubleShiftQR +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Matrix3X; + typedef Eigen::Matrix Vector; + typedef Eigen::Array IntArray; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Index m_n; // Dimension of the matrix + Matrix m_mat_H; // A copy of the matrix to be factorized + Scalar m_shift_s; // Shift constant + Scalar m_shift_t; // Shift constant + Matrix3X m_ref_u; // Householder reflectors + IntArray m_ref_nr; // How many rows does each reflector affects + // 3 - A general reflector + // 2 - A Givens rotation + // 1 - An identity transformation + const Scalar m_near_0; // a very small value, but 1.0 / m_safe_min does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, + // e.g. ~= 1e-16 for the "double" type + const Scalar m_eps_rel; + const Scalar m_eps_abs; + bool m_computed; // Whether matrix has been factorized + + void compute_reflector(const Scalar& x1, const Scalar& x2, const Scalar& x3, Index ind) + { + using std::abs; + + Scalar* u = &m_ref_u.coeffRef(0, ind); + unsigned char* nr = m_ref_nr.data(); + // In general case the reflector affects 3 rows + nr[ind] = 3; + Scalar x2x3 = Scalar(0); + // If x3 is zero, decrease nr by 1 + if (abs(x3) < m_near_0) + { + // If x2 is also zero, nr will be 1, and we can exit this function + if (abs(x2) < m_near_0) + { + nr[ind] = 1; + return; + } + else + { + nr[ind] = 2; + } + x2x3 = abs(x2); + } + else + { + x2x3 = Eigen::numext::hypot(x2, x3); + } + + // x1' = x1 - rho * ||x|| + // rho = -sign(x1), if x1 == 0, we choose rho = 1 + Scalar x1_new = x1 - ((x1 <= 0) - (x1 > 0)) * Eigen::numext::hypot(x1, x2x3); + Scalar x_norm = Eigen::numext::hypot(x1_new, x2x3); + // Double check the norm of new x + if (x_norm < m_near_0) + { + nr[ind] = 1; + return; + } + u[0] = x1_new / x_norm; + u[1] = x2 / x_norm; + u[2] = x3 / x_norm; + } + + void compute_reflector(const Scalar* x, Index ind) + { + compute_reflector(x[0], x[1], x[2], ind); + } + + // Update the block X = H(il:iu, il:iu) + void update_block(Index il, Index iu) + { + // Block size + const Index bsize = iu - il + 1; + + // If block size == 1, there is no need to apply reflectors + if (bsize == 1) + { + m_ref_nr.coeffRef(il) = 1; + return; + } + + const Scalar x00 = m_mat_H.coeff(il, il), + x01 = m_mat_H.coeff(il, il + 1), + x10 = m_mat_H.coeff(il + 1, il), + x11 = m_mat_H.coeff(il + 1, il + 1); + // m00 = x00 * (x00 - s) + x01 * x10 + t + const Scalar m00 = x00 * (x00 - m_shift_s) + x01 * x10 + m_shift_t; + // m10 = x10 * (x00 + x11 - s) + const Scalar m10 = x10 * (x00 + x11 - m_shift_s); + + // For block size == 2, do a Givens rotation on M = X * X - s * X + t * I + if (bsize == 2) + { + // This causes nr=2 + compute_reflector(m00, m10, 0, il); + // Apply the reflector to X + apply_PX(m_mat_H.block(il, il, 2, m_n - il), m_n, il); + apply_XP(m_mat_H.block(0, il, il + 2, 2), m_n, il); + + m_ref_nr.coeffRef(il + 1) = 1; + return; + } + + // For block size >=3, use the regular strategy + // m20 = x21 * x10 + const Scalar m20 = m_mat_H.coeff(il + 2, il + 1) * m_mat_H.coeff(il + 1, il); + compute_reflector(m00, m10, m20, il); + + // Apply the first reflector + apply_PX(m_mat_H.block(il, il, 3, m_n - il), m_n, il); + apply_XP(m_mat_H.block(0, il, il + std::min(bsize, Index(4)), 3), m_n, il); + + // Calculate the following reflectors + // If entering this loop, block size is at least 4. + for (Index i = 1; i < bsize - 2; i++) + { + compute_reflector(&m_mat_H.coeffRef(il + i, il + i - 1), il + i); + // Apply the reflector to X + apply_PX(m_mat_H.block(il + i, il + i - 1, 3, m_n - il - i + 1), m_n, il + i); + apply_XP(m_mat_H.block(0, il + i, il + std::min(bsize, Index(i + 4)), 3), m_n, il + i); + } + + // The last reflector + // This causes nr=2 + compute_reflector(m_mat_H.coeff(iu - 1, iu - 2), m_mat_H.coeff(iu, iu - 2), 0, iu - 1); + // Apply the reflector to X + apply_PX(m_mat_H.block(iu - 1, iu - 2, 2, m_n - iu + 2), m_n, iu - 1); + apply_XP(m_mat_H.block(0, iu - 1, il + bsize, 2), m_n, iu - 1); + + m_ref_nr.coeffRef(iu) = 1; + } + + // P = I - 2 * u * u' = P' + // PX = X - 2 * u * (u'X) + void apply_PX(GenericMatrix X, Index stride, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind); + const Scalar u0_2 = Scalar(2) * u0, + u1_2 = Scalar(2) * u1; + + const Index nrow = X.rows(); + const Index ncol = X.cols(); + + Scalar* xptr = X.data(); + if (nr == 2 || nrow == 2) + { + for (Index i = 0; i < ncol; i++, xptr += stride) + { + const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1]; + xptr[0] -= tmp * u0; + xptr[1] -= tmp * u1; + } + } + else + { + const Scalar u2 = m_ref_u.coeff(2, u_ind); + const Scalar u2_2 = Scalar(2) * u2; + for (Index i = 0; i < ncol; i++, xptr += stride) + { + const Scalar tmp = u0_2 * xptr[0] + u1_2 * xptr[1] + u2_2 * xptr[2]; + xptr[0] -= tmp * u0; + xptr[1] -= tmp * u1; + xptr[2] -= tmp * u2; + } + } + } + + // x is a pointer to a vector + // Px = x - 2 * dot(x, u) * u + void apply_PX(Scalar* x, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind), + u2 = m_ref_u.coeff(2, u_ind); + + // When the reflector only contains two elements, u2 has been set to zero + const bool nr_is_2 = (nr == 2); + const Scalar dot2 = Scalar(2) * (x[0] * u0 + x[1] * u1 + (nr_is_2 ? 0 : (x[2] * u2))); + x[0] -= dot2 * u0; + x[1] -= dot2 * u1; + if (!nr_is_2) + x[2] -= dot2 * u2; + } + + // XP = X - 2 * (X * u) * u' + void apply_XP(GenericMatrix X, Index stride, Index u_ind) const + { + const Index nr = m_ref_nr.coeff(u_ind); + if (nr == 1) + return; + + const Scalar u0 = m_ref_u.coeff(0, u_ind), + u1 = m_ref_u.coeff(1, u_ind); + const Scalar u0_2 = Scalar(2) * u0, + u1_2 = Scalar(2) * u1; + + const int nrow = X.rows(); + const int ncol = X.cols(); + Scalar *X0 = X.data(), *X1 = X0 + stride; // X0 => X.col(0), X1 => X.col(1) + + if (nr == 2 || ncol == 2) + { + // tmp = 2 * u0 * X0 + 2 * u1 * X1 + // X0 => X0 - u0 * tmp + // X1 => X1 - u1 * tmp + for (Index i = 0; i < nrow; i++) + { + const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i]; + X0[i] -= tmp * u0; + X1[i] -= tmp * u1; + } + } + else + { + Scalar* X2 = X1 + stride; // X2 => X.col(2) + const Scalar u2 = m_ref_u.coeff(2, u_ind); + const Scalar u2_2 = Scalar(2) * u2; + for (Index i = 0; i < nrow; i++) + { + const Scalar tmp = u0_2 * X0[i] + u1_2 * X1[i] + u2_2 * X2[i]; + X0[i] -= tmp * u0; + X1[i] -= tmp * u1; + X2[i] -= tmp * u2; + } + } + } + +public: + DoubleShiftQR(Index size) : + m_n(size), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps_rel(m_eps), + m_eps_abs(m_near_0 * (m_n / m_eps)), + m_computed(false) + {} + + DoubleShiftQR(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) : + m_n(mat.rows()), + m_mat_H(m_n, m_n), + m_shift_s(s), + m_shift_t(t), + m_ref_u(3, m_n), + m_ref_nr(m_n), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps_rel(m_eps), + m_eps_abs(m_near_0 * (m_n / m_eps)), + m_computed(false) + { + compute(mat, s, t); + } + + void compute(ConstGenericMatrix& mat, const Scalar& s, const Scalar& t) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("DoubleShiftQR: matrix must be square"); + + m_mat_H.resize(m_n, m_n); + m_shift_s = s; + m_shift_t = t; + m_ref_u.resize(3, m_n); + m_ref_nr.resize(m_n); + + // Make a copy of mat + std::copy(mat.data(), mat.data() + mat.size(), m_mat_H.data()); + + // Obtain the indices of zero elements in the subdiagonal, + // so that H can be divided into several blocks + std::vector zero_ind; + zero_ind.reserve(m_n - 1); + zero_ind.push_back(0); + Scalar* Hii = m_mat_H.data(); + for (Index i = 0; i < m_n - 2; i++, Hii += (m_n + 1)) + { + // Hii[1] => m_mat_H(i + 1, i) + const Scalar h = abs(Hii[1]); + if (h <= 0 || h <= m_eps_rel * (abs(Hii[0]) + abs(Hii[m_n + 1]))) + { + Hii[1] = 0; + zero_ind.push_back(i + 1); + } + // Make sure m_mat_H is upper Hessenberg + // Zero the elements below m_mat_H(i + 1, i) + std::fill(Hii + 2, Hii + m_n - i, Scalar(0)); + } + zero_ind.push_back(m_n); + + for (std::vector::size_type i = 0; i < zero_ind.size() - 1; i++) + { + const Index start = zero_ind[i]; + const Index end = zero_ind[i + 1] - 1; + // Compute refelctors and update each block + update_block(start, end); + } + + m_computed = true; + } + + void matrix_QtHQ(Matrix& dest) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + dest.noalias() = m_mat_H; + } + + // Q = P0 * P1 * ... + // Q'y = P_{n-2} * ... * P1 * P0 * y + void apply_QtY(Vector& y) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + Scalar* y_ptr = y.data(); + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++, y_ptr++) + { + apply_PX(y_ptr, i); + } + } + + // Q = P0 * P1 * ... + // YQ = Y * P0 * P1 * ... + void apply_YQ(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("DoubleShiftQR: need to call compute() first"); + + const Index nrow = Y.rows(); + const Index n2 = m_n - 2; + for (Index i = 0; i < n2; i++) + { + apply_XP(Y.block(0, i, nrow, 3), nrow, i); + } + apply_XP(Y.block(0, n2, nrow, 2), nrow, n2); + } +}; + +} // namespace Spectra + +#endif // DOUBLE_SHIFT_QR_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h b/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h new file mode 100644 index 000000000..53cf52be8 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/Lanczos.h @@ -0,0 +1,167 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef LANCZOS_H +#define LANCZOS_H + +#include +#include // std::sqrt +#include // std::invalid_argument +#include // std::stringstream + +#include "Arnoldi.h" + +namespace Spectra { + +// Lanczos factorization A * V = V * H + f * e' +// A: n x n +// V: n x k +// H: k x k +// f: n x 1 +// e: [0, ..., 0, 1] +// V and H are allocated of dimension m, so the maximum value of k is m +template +class Lanczos : public Arnoldi +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + + using Arnoldi::m_op; + using Arnoldi::m_n; + using Arnoldi::m_m; + using Arnoldi::m_k; + using Arnoldi::m_fac_V; + using Arnoldi::m_fac_H; + using Arnoldi::m_fac_f; + using Arnoldi::m_beta; + using Arnoldi::m_near_0; + using Arnoldi::m_eps; + +public: + Lanczos(const ArnoldiOpType& op, Index m) : + Arnoldi(op, m) + {} + + // Lanczos factorization starting from step-k + void factorize_from(Index from_k, Index to_m, Index& op_counter) + { + using std::sqrt; + + if (to_m <= from_k) + return; + + if (from_k > m_k) + { + std::stringstream msg; + msg << "Lanczos: from_k (= " << from_k << ") is larger than the current subspace dimension (= " << m_k << ")"; + throw std::invalid_argument(msg.str()); + } + + const Scalar beta_thresh = m_eps * sqrt(Scalar(m_n)); + + // Pre-allocate vectors + Vector Vf(to_m); + Vector w(m_n); + + // Keep the upperleft k x k submatrix of H and set other elements to 0 + m_fac_H.rightCols(m_m - from_k).setZero(); + m_fac_H.block(from_k, 0, m_m - from_k, from_k).setZero(); + + for (Index i = from_k; i <= to_m - 1; i++) + { + bool restart = false; + // If beta = 0, then the next V is not full rank + // We need to generate a new residual vector that is orthogonal + // to the current V, which we call a restart + if (m_beta < m_near_0) + { + MapConstMat V(m_fac_V.data(), m_n, i); // The first i columns + this->expand_basis(V, 2 * i, m_fac_f, m_beta); + restart = true; + } + + // v <- f / ||f|| + MapVec v(&m_fac_V(0, i), m_n); // The (i+1)-th column + v.noalias() = m_fac_f / m_beta; + + // Note that H[i+1, i] equals to the unrestarted beta + m_fac_H(i, i - 1) = restart ? Scalar(0) : m_beta; + + // w <- A * v + m_op.perform_op(v.data(), w.data()); + op_counter++; + + // H[i+1, i+1] = = v'Bw + m_fac_H(i - 1, i) = m_fac_H(i, i - 1); // Due to symmetry + m_fac_H(i, i) = m_op.inner_product(v, w); + + // f <- w - V * V'Bw = w - H[i+1, i] * V{i} - H[i+1, i+1] * V{i+1} + // If restarting, we know that H[i+1, i] = 0 + if (restart) + m_fac_f.noalias() = w - m_fac_H(i, i) * v; + else + m_fac_f.noalias() = w - m_fac_H(i, i - 1) * m_fac_V.col(i - 1) - m_fac_H(i, i) * v; + + m_beta = m_op.norm(m_fac_f); + + // f/||f|| is going to be the next column of V, so we need to test + // whether V'B(f/||f||) ~= 0 + const Index i1 = i + 1; + MapMat Vs(m_fac_V.data(), m_n, i1); // The first (i+1) columns + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + Scalar ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + // If not, iteratively correct the residual + int count = 0; + while (count < 5 && ortho_err > m_eps * m_beta) + { + // There is an edge case: when beta=||f|| is close to zero, f mostly consists + // of noises of rounding errors, so the test [ortho_err < eps * beta] is very + // likely to fail. In particular, if beta=0, then the test is ensured to fail. + // Hence when this happens, we force f to be zero, and then restart in the + // next iteration. + if (m_beta < beta_thresh) + { + m_fac_f.setZero(); + m_beta = Scalar(0); + break; + } + + // f <- f - V * Vf + m_fac_f.noalias() -= Vs * Vf.head(i1); + // h <- h + Vf + m_fac_H(i - 1, i) += Vf[i - 1]; + m_fac_H(i, i - 1) = m_fac_H(i - 1, i); + m_fac_H(i, i) += Vf[i]; + // beta <- ||f|| + m_beta = m_op.norm(m_fac_f); + + m_op.trans_product(Vs, m_fac_f, Vf.head(i1)); + ortho_err = Vf.head(i1).cwiseAbs().maxCoeff(); + count++; + } + } + + // Indicate that this is a step-m factorization + m_k = to_m; + } + + // Apply H -> Q'HQ, where Q is from a tridiagonal QR decomposition + void compress_H(const TridiagQR& decomp) + { + decomp.matrix_QtHQ(m_fac_H); + m_k--; + } +}; + +} // namespace Spectra + +#endif // LANCZOS_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h b/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h new file mode 100644 index 000000000..122e0e551 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/TridiagEigen.h @@ -0,0 +1,219 @@ +// The code was adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2010 Jitse Niesen +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef TRIDIAG_EIGEN_H +#define TRIDIAG_EIGEN_H + +#include +#include +#include + +#include "../Util/TypeTraits.h" + +namespace Spectra { + +template +class TridiagEigen +{ +private: + typedef Eigen::Index Index; + // For convenience in adapting the tridiagonal_qr_step() function + typedef Scalar RealScalar; + + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Index m_n; + Vector m_main_diag; // Main diagonal elements of the matrix + Vector m_sub_diag; // Sub-diagonal elements of the matrix + Matrix m_evecs; // To store eigenvectors + + bool m_computed; + const Scalar m_near_0; // a very small value, ~= 1e-307 for the "double" type + + // Adapted from Eigen/src/Eigenvaleus/SelfAdjointEigenSolver.h + static void tridiagonal_qr_step(RealScalar* diag, + RealScalar* subdiag, Index start, + Index end, Scalar* matrixQ, + Index n) + { + using std::abs; + + RealScalar td = (diag[end - 1] - diag[end]) * RealScalar(0.5); + RealScalar e = subdiag[end - 1]; + // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still + // underflow thus leading to inf/NaN values when using the following commented code: + // RealScalar e2 = numext::abs2(subdiag[end-1]); + // RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2)); + // This explain the following, somewhat more complicated, version: + RealScalar mu = diag[end]; + if (td == Scalar(0)) + mu -= abs(e); + else + { + RealScalar e2 = Eigen::numext::abs2(subdiag[end - 1]); + RealScalar h = Eigen::numext::hypot(td, e); + if (e2 == RealScalar(0)) + mu -= (e / (td + (td > RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h); + else + mu -= e2 / (td + (td > RealScalar(0) ? h : -h)); + } + + RealScalar x = diag[start] - mu; + RealScalar z = subdiag[start]; + Eigen::Map q(matrixQ, n, n); + for (Index k = start; k < end; ++k) + { + Eigen::JacobiRotation rot; + rot.makeGivens(x, z); + + const RealScalar s = rot.s(); + const RealScalar c = rot.c(); + + // do T = G' T G + RealScalar sdk = s * diag[k] + c * subdiag[k]; + RealScalar dkp1 = s * subdiag[k] + c * diag[k + 1]; + + diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k + 1]); + diag[k + 1] = s * sdk + c * dkp1; + subdiag[k] = c * sdk - s * dkp1; + + if (k > start) + subdiag[k - 1] = c * subdiag[k - 1] - s * z; + + x = subdiag[k]; + + if (k < end - 1) + { + z = -s * subdiag[k + 1]; + subdiag[k + 1] = c * subdiag[k + 1]; + } + + // apply the givens rotation to the unit matrix Q = Q * G + if (matrixQ) + q.applyOnTheRight(k, k + 1, rot); + } + } + +public: + TridiagEigen() : + m_n(0), m_computed(false), + m_near_0(TypeTraits::min() * Scalar(10)) + {} + + TridiagEigen(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_computed(false), + m_near_0(TypeTraits::min() * Scalar(10)) + { + compute(mat); + } + + void compute(ConstGenericMatrix& mat) + { + using std::abs; + + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("TridiagEigen: matrix must be square"); + + m_main_diag.resize(m_n); + m_sub_diag.resize(m_n - 1); + m_evecs.resize(m_n, m_n); + m_evecs.setIdentity(); + + // Scale matrix to improve stability + const Scalar scale = std::max(mat.diagonal().cwiseAbs().maxCoeff(), + mat.diagonal(-1).cwiseAbs().maxCoeff()); + // If scale=0, mat is a zero matrix, so we can early stop + if (scale < m_near_0) + { + // m_main_diag contains eigenvalues + m_main_diag.setZero(); + // m_evecs has been set identity + // m_evecs.setIdentity(); + m_computed = true; + return; + } + m_main_diag.noalias() = mat.diagonal() / scale; + m_sub_diag.noalias() = mat.diagonal(-1) / scale; + + Scalar* diag = m_main_diag.data(); + Scalar* subdiag = m_sub_diag.data(); + + Index end = m_n - 1; + Index start = 0; + Index iter = 0; // total number of iterations + int info = 0; // 0 for success, 1 for failure + + const Scalar considerAsZero = TypeTraits::min(); + const Scalar precision = Scalar(2) * Eigen::NumTraits::epsilon(); + + while (end > 0) + { + for (Index i = start; i < end; i++) + if (abs(subdiag[i]) <= considerAsZero || + abs(subdiag[i]) <= (abs(diag[i]) + abs(diag[i + 1])) * precision) + subdiag[i] = 0; + + // find the largest unreduced block + while (end > 0 && subdiag[end - 1] == Scalar(0)) + end--; + + if (end <= 0) + break; + + // if we spent too many iterations, we give up + iter++; + if (iter > 30 * m_n) + { + info = 1; + break; + } + + start = end - 1; + while (start > 0 && subdiag[start - 1] != Scalar(0)) + start--; + + tridiagonal_qr_step(diag, subdiag, start, end, m_evecs.data(), m_n); + } + + if (info > 0) + throw std::runtime_error("TridiagEigen: eigen decomposition failed"); + + // Scale eigenvalues back + m_main_diag *= scale; + + m_computed = true; + } + + const Vector& eigenvalues() const + { + if (!m_computed) + throw std::logic_error("TridiagEigen: need to call compute() first"); + + // After calling compute(), main_diag will contain the eigenvalues. + return m_main_diag; + } + + const Matrix& eigenvectors() const + { + if (!m_computed) + throw std::logic_error("TridiagEigen: need to call compute() first"); + + return m_evecs; + } +}; + +} // namespace Spectra + +#endif // TRIDIAG_EIGEN_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h new file mode 100644 index 000000000..4865e9db8 --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergEigen.h @@ -0,0 +1,319 @@ +// The code was adapted from Eigen/src/Eigenvaleus/EigenSolver.h +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2010,2012 Jitse Niesen +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef UPPER_HESSENBERG_EIGEN_H +#define UPPER_HESSENBERG_EIGEN_H + +#include +#include +#include + +namespace Spectra { + +template +class UpperHessenbergEigen +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + Index m_n; // Size of the matrix + Eigen::RealSchur m_realSchur; // Schur decomposition solver + Matrix m_matT; // Schur T matrix + Matrix m_eivec; // Storing eigenvectors + ComplexVector m_eivalues; // Eigenvalues + + bool m_computed; + + void doComputeEigenvectors() + { + using std::abs; + + const Index size = m_eivec.cols(); + const Scalar eps = Eigen::NumTraits::epsilon(); + + // inefficient! this is already computed in RealSchur + Scalar norm(0); + for (Index j = 0; j < size; ++j) + { + norm += m_matT.row(j).segment((std::max)(j - 1, Index(0)), size - (std::max)(j - 1, Index(0))).cwiseAbs().sum(); + } + + // Backsubstitute to find vectors of upper triangular form + if (norm == Scalar(0)) + return; + + for (Index n = size - 1; n >= 0; n--) + { + Scalar p = m_eivalues.coeff(n).real(); + Scalar q = m_eivalues.coeff(n).imag(); + + // Scalar vector + if (q == Scalar(0)) + { + Scalar lastr(0), lastw(0); + Index l = n; + + m_matT.coeffRef(n, n) = Scalar(1); + for (Index i = n - 1; i >= 0; i--) + { + Scalar w = m_matT.coeff(i, i) - p; + Scalar r = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1)); + + if (m_eivalues.coeff(i).imag() < Scalar(0)) + { + lastw = w; + lastr = r; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == Scalar(0)) + { + if (w != Scalar(0)) + m_matT.coeffRef(i, n) = -r / w; + else + m_matT.coeffRef(i, n) = -r / (eps * norm); + } + else // Solve real equations + { + Scalar x = m_matT.coeff(i, i + 1); + Scalar y = m_matT.coeff(i + 1, i); + Scalar denom = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); + Scalar t = (x * lastr - lastw * r) / denom; + m_matT.coeffRef(i, n) = t; + if (abs(x) > abs(lastw)) + m_matT.coeffRef(i + 1, n) = (-r - w * t) / x; + else + m_matT.coeffRef(i + 1, n) = (-lastr - y * t) / lastw; + } + + // Overflow control + Scalar t = abs(m_matT.coeff(i, n)); + if ((eps * t) * t > Scalar(1)) + m_matT.col(n).tail(size - i) /= t; + } + } + } + else if (q < Scalar(0) && n > 0) + { // Complex vector + Scalar lastra(0), lastsa(0), lastw(0); + Index l = n - 1; + + // Last vector component imaginary so matrix is triangular + if (abs(m_matT.coeff(n, n - 1)) > abs(m_matT.coeff(n - 1, n))) + { + m_matT.coeffRef(n - 1, n - 1) = q / m_matT.coeff(n, n - 1); + m_matT.coeffRef(n - 1, n) = -(m_matT.coeff(n, n) - p) / m_matT.coeff(n, n - 1); + } + else + { + Complex cc = Complex(Scalar(0), -m_matT.coeff(n - 1, n)) / Complex(m_matT.coeff(n - 1, n - 1) - p, q); + m_matT.coeffRef(n - 1, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(n - 1, n) = Eigen::numext::imag(cc); + } + m_matT.coeffRef(n, n - 1) = Scalar(0); + m_matT.coeffRef(n, n) = Scalar(1); + for (Index i = n - 2; i >= 0; i--) + { + Scalar ra = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n - 1).segment(l, n - l + 1)); + Scalar sa = m_matT.row(i).segment(l, n - l + 1).dot(m_matT.col(n).segment(l, n - l + 1)); + Scalar w = m_matT.coeff(i, i) - p; + + if (m_eivalues.coeff(i).imag() < Scalar(0)) + { + lastw = w; + lastra = ra; + lastsa = sa; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == Scalar(0)) + { + Complex cc = Complex(-ra, -sa) / Complex(w, q); + m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i, n) = Eigen::numext::imag(cc); + } + else + { + // Solve complex equations + Scalar x = m_matT.coeff(i, i + 1); + Scalar y = m_matT.coeff(i + 1, i); + Scalar vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; + Scalar vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; + if ((vr == Scalar(0)) && (vi == Scalar(0))) + vr = eps * norm * (abs(w) + abs(q) + abs(x) + abs(y) + abs(lastw)); + + Complex cc = Complex(x * lastra - lastw * ra + q * sa, x * lastsa - lastw * sa - q * ra) / Complex(vr, vi); + m_matT.coeffRef(i, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i, n) = Eigen::numext::imag(cc); + if (abs(x) > (abs(lastw) + abs(q))) + { + m_matT.coeffRef(i + 1, n - 1) = (-ra - w * m_matT.coeff(i, n - 1) + q * m_matT.coeff(i, n)) / x; + m_matT.coeffRef(i + 1, n) = (-sa - w * m_matT.coeff(i, n) - q * m_matT.coeff(i, n - 1)) / x; + } + else + { + cc = Complex(-lastra - y * m_matT.coeff(i, n - 1), -lastsa - y * m_matT.coeff(i, n)) / Complex(lastw, q); + m_matT.coeffRef(i + 1, n - 1) = Eigen::numext::real(cc); + m_matT.coeffRef(i + 1, n) = Eigen::numext::imag(cc); + } + } + + // Overflow control + Scalar t = std::max(abs(m_matT.coeff(i, n - 1)), abs(m_matT.coeff(i, n))); + if ((eps * t) * t > Scalar(1)) + m_matT.block(i, n - 1, size - i, 2) /= t; + } + } + + // We handled a pair of complex conjugate eigenvalues, so need to skip them both + n--; + } + } + + // Back transformation to get eigenvectors of original matrix + Vector m_tmp(size); + for (Index j = size - 1; j >= 0; j--) + { + m_tmp.noalias() = m_eivec.leftCols(j + 1) * m_matT.col(j).segment(0, j + 1); + m_eivec.col(j) = m_tmp; + } + } + +public: + UpperHessenbergEigen() : + m_n(0), m_computed(false) + {} + + UpperHessenbergEigen(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_computed(false) + { + compute(mat); + } + + void compute(ConstGenericMatrix& mat) + { + using std::abs; + using std::sqrt; + + if (mat.rows() != mat.cols()) + throw std::invalid_argument("UpperHessenbergEigen: matrix must be square"); + + m_n = mat.rows(); + // Scale matrix prior to the Schur decomposition + const Scalar scale = mat.cwiseAbs().maxCoeff(); + + // Reduce to real Schur form + Matrix Q = Matrix::Identity(m_n, m_n); + m_realSchur.computeFromHessenberg(mat / scale, Q, true); + if (m_realSchur.info() != Eigen::Success) + throw std::runtime_error("UpperHessenbergEigen: eigen decomposition failed"); + + m_matT = m_realSchur.matrixT(); + m_eivec = m_realSchur.matrixU(); + + // Compute eigenvalues from matT + m_eivalues.resize(m_n); + Index i = 0; + while (i < m_n) + { + // Real eigenvalue + if (i == m_n - 1 || m_matT.coeff(i + 1, i) == Scalar(0)) + { + m_eivalues.coeffRef(i) = m_matT.coeff(i, i); + ++i; + } + else // Complex eigenvalues + { + Scalar p = Scalar(0.5) * (m_matT.coeff(i, i) - m_matT.coeff(i + 1, i + 1)); + Scalar z; + // Compute z = sqrt(abs(p * p + m_matT.coeff(i+1, i) * m_matT.coeff(i, i+1))); + // without overflow + { + Scalar t0 = m_matT.coeff(i + 1, i); + Scalar t1 = m_matT.coeff(i, i + 1); + Scalar maxval = std::max(abs(p), std::max(abs(t0), abs(t1))); + t0 /= maxval; + t1 /= maxval; + Scalar p0 = p / maxval; + z = maxval * sqrt(abs(p0 * p0 + t0 * t1)); + } + m_eivalues.coeffRef(i) = Complex(m_matT.coeff(i + 1, i + 1) + p, z); + m_eivalues.coeffRef(i + 1) = Complex(m_matT.coeff(i + 1, i + 1) + p, -z); + i += 2; + } + } + + // Compute eigenvectors + doComputeEigenvectors(); + + // Scale eigenvalues back + m_eivalues *= scale; + + m_computed = true; + } + + const ComplexVector& eigenvalues() const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergEigen: need to call compute() first"); + + return m_eivalues; + } + + ComplexMatrix eigenvectors() + { + using std::abs; + + if (!m_computed) + throw std::logic_error("UpperHessenbergEigen: need to call compute() first"); + + Index n = m_eivec.cols(); + ComplexMatrix matV(n, n); + for (Index j = 0; j < n; ++j) + { + // imaginary part of real eigenvalue is already set to exact zero + if (Eigen::numext::imag(m_eivalues.coeff(j)) == Scalar(0) || j + 1 == n) + { + // we have a real eigen value + matV.col(j) = m_eivec.col(j).template cast(); + matV.col(j).normalize(); + } + else + { + // we have a pair of complex eigen values + for (Index i = 0; i < n; ++i) + { + matV.coeffRef(i, j) = Complex(m_eivec.coeff(i, j), m_eivec.coeff(i, j + 1)); + matV.coeffRef(i, j + 1) = Complex(m_eivec.coeff(i, j), -m_eivec.coeff(i, j + 1)); + } + matV.col(j).normalize(); + matV.col(j + 1).normalize(); + ++j; + } + } + + return matV; + } +}; + +} // namespace Spectra + +#endif // UPPER_HESSENBERG_EIGEN_H diff --git a/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h new file mode 100644 index 000000000..5778f11dc --- /dev/null +++ b/gtsam/3rdparty/Spectra/LinAlg/UpperHessenbergQR.h @@ -0,0 +1,670 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef UPPER_HESSENBERG_QR_H +#define UPPER_HESSENBERG_QR_H + +#include +#include // std::sqrt +#include // std::fill, std::copy +#include // std::logic_error + +namespace Spectra { + +/// +/// \defgroup Internals Internal Classes +/// +/// Classes for internal use. May be useful to developers. +/// + +/// +/// \ingroup Internals +/// @{ +/// + +/// +/// \defgroup LinearAlgebra Linear Algebra +/// +/// A number of classes for linear algebra operations. +/// + +/// +/// \ingroup LinearAlgebra +/// +/// Perform the QR decomposition of an upper Hessenberg matrix. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// +template +class UpperHessenbergQR +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Matrix RowVector; + typedef Eigen::Array Array; + + typedef Eigen::Ref GenericMatrix; + typedef const Eigen::Ref ConstGenericMatrix; + + Matrix m_mat_T; + +protected: + Index m_n; + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + // Q = G1 * G2 * ... * G_{n-1} + Scalar m_shift; + Array m_rot_cos; + Array m_rot_sin; + bool m_computed; + + // Given x and y, compute 1) r = sqrt(x^2 + y^2), 2) c = x / r, 3) s = -y / r + // If both x and y are zero, set c = 1 and s = 0 + // We must implement it in a numerically stable way + static void compute_rotation(const Scalar& x, const Scalar& y, Scalar& r, Scalar& c, Scalar& s) + { + using std::sqrt; + + const Scalar xsign = (x > Scalar(0)) - (x < Scalar(0)); + const Scalar ysign = (y > Scalar(0)) - (y < Scalar(0)); + const Scalar xabs = x * xsign; + const Scalar yabs = y * ysign; + if (xabs > yabs) + { + // In this case xabs != 0 + const Scalar ratio = yabs / xabs; // so that 0 <= ratio < 1 + const Scalar common = sqrt(Scalar(1) + ratio * ratio); + c = xsign / common; + r = xabs * common; + s = -y / r; + } + else + { + if (yabs == Scalar(0)) + { + r = Scalar(0); + c = Scalar(1); + s = Scalar(0); + return; + } + const Scalar ratio = xabs / yabs; // so that 0 <= ratio <= 1 + const Scalar common = sqrt(Scalar(1) + ratio * ratio); + s = -ysign / common; + r = yabs * common; + c = x / r; + } + } + +public: + /// + /// Constructor to preallocate memory. Computation can + /// be performed later by calling the compute() method. + /// + UpperHessenbergQR(Index size) : + m_n(size), + m_rot_cos(m_n - 1), + m_rot_sin(m_n - 1), + m_computed(false) + {} + + /// + /// Constructor to create an object that performs and stores the + /// QR decomposition of an upper Hessenberg matrix `mat`, with an + /// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix + /// `mat`, and \f$s\f$ is the shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the upper triangular and the lower subdiagonal parts of + /// the matrix are used. + /// + UpperHessenbergQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) : + m_n(mat.rows()), + m_shift(shift), + m_rot_cos(m_n - 1), + m_rot_sin(m_n - 1), + m_computed(false) + { + compute(mat, shift); + } + + /// + /// Virtual destructor. + /// + virtual ~UpperHessenbergQR(){}; + + /// + /// Conduct the QR factorization of an upper Hessenberg matrix with + /// an optional shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the upper triangular and the lower subdiagonal parts of + /// the matrix are used. + /// + virtual void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) + { + m_n = mat.rows(); + if (m_n != mat.cols()) + throw std::invalid_argument("UpperHessenbergQR: matrix must be square"); + + m_shift = shift; + m_mat_T.resize(m_n, m_n); + m_rot_cos.resize(m_n - 1); + m_rot_sin.resize(m_n - 1); + + // Make a copy of mat - s * I + std::copy(mat.data(), mat.data() + mat.size(), m_mat_T.data()); + m_mat_T.diagonal().array() -= m_shift; + + Scalar xi, xj, r, c, s; + Scalar *Tii, *ptr; + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + Tii = &m_mat_T.coeffRef(i, i); + + // Make sure mat_T is upper Hessenberg + // Zero the elements below mat_T(i + 1, i) + std::fill(Tii + 2, Tii + m_n - i, Scalar(0)); + + xi = Tii[0]; // mat_T(i, i) + xj = Tii[1]; // mat_T(i + 1, i) + compute_rotation(xi, xj, r, c, s); + m_rot_cos[i] = c; + m_rot_sin[i] = s; + + // For a complete QR decomposition, + // we first obtain the rotation matrix + // G = [ cos sin] + // [-sin cos] + // and then do T[i:(i + 1), i:(n - 1)] = G' * T[i:(i + 1), i:(n - 1)] + + // Gt << c, -s, s, c; + // m_mat_T.block(i, i, 2, m_n - i) = Gt * m_mat_T.block(i, i, 2, m_n - i); + Tii[0] = r; // m_mat_T(i, i) => r + Tii[1] = 0; // m_mat_T(i + 1, i) => 0 + ptr = Tii + m_n; // m_mat_T(i, k), k = i+1, i+2, ..., n-1 + for (Index j = i + 1; j < m_n; j++, ptr += m_n) + { + Scalar tmp = ptr[0]; + ptr[0] = c * tmp - s * ptr[1]; + ptr[1] = s * tmp + c * ptr[1]; + } + + // If we do not need to calculate the R matrix, then + // only the cos and sin sequences are required. + // In such case we only update T[i + 1, (i + 1):(n - 1)] + // m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) *= c; + // m_mat_T.block(i + 1, i + 1, 1, m_n - i - 1) += s * mat_T.block(i, i + 1, 1, m_n - i - 1); + } + + m_computed = true; + } + + /// + /// Return the \f$R\f$ matrix in the QR decomposition, which is an + /// upper triangular matrix. + /// + /// \return Returned matrix type will be `Eigen::Matrix`, depending on + /// the template parameter `Scalar` defined. + /// + virtual Matrix matrix_R() const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + return m_mat_T; + } + + /// + /// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`, + /// and \f$s\f$ is the shift. The result is an upper Hessenberg matrix. + /// + /// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + virtual void matrix_QtHQ(Matrix& dest) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + // Make a copy of the R matrix + dest.resize(m_n, m_n); + std::copy(m_mat_T.data(), m_mat_T.data() + m_mat_T.size(), dest.data()); + + // Compute the RQ matrix + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // RQ[, i:(i + 1)] = RQ[, i:(i + 1)] * Gi + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Scalar *Yi, *Yi1; + Yi = &dest.coeffRef(0, i); + Yi1 = Yi + m_n; // RQ(0, i + 1) + const Index i2 = i + 2; + for (Index j = 0; j < i2; j++) + { + const Scalar tmp = Yi[j]; + Yi[j] = c * tmp - s * Yi1[j]; + Yi1[j] = s * tmp + c * Yi1[j]; + } + + /* Vector dest = RQ.block(0, i, i + 2, 1); + dest.block(0, i, i + 2, 1) = c * Yi - s * dest.block(0, i + 1, i + 2, 1); + dest.block(0, i + 1, i + 2, 1) = s * Yi + c * dest.block(0, i + 1, i + 2, 1); */ + } + + // Add the shift to the diagonal + dest.diagonal().array() += m_shift; + } + + /// + /// Apply the \f$Q\f$ matrix to a vector \f$y\f$. + /// + /// \param Y A vector that will be overwritten by the matrix product \f$Qy\f$. + /// + /// Vector type can be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + // Y -> QY = G1 * G2 * ... * Y + void apply_QY(Vector& Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1)] = Gi * Y[i:(i + 1)] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Scalar tmp = Y[i]; + Y[i] = c * tmp + s * Y[i + 1]; + Y[i + 1] = -s * tmp + c * Y[i + 1]; + } + } + + /// + /// Apply the \f$Q\f$ matrix to a vector \f$y\f$. + /// + /// \param Y A vector that will be overwritten by the matrix product \f$Q'y\f$. + /// + /// Vector type can be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + // Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y + void apply_QtY(Vector& Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1)] = Gi' * Y[i:(i + 1)] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Scalar tmp = Y[i]; + Y[i] = c * tmp - s * Y[i + 1]; + Y[i + 1] = s * tmp + c * Y[i + 1]; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$QY\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> QY = G1 * G2 * ... * Y + void apply_QY(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + RowVector Yi(Y.cols()), Yi1(Y.cols()); + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1), ] = Gi * Y[i:(i + 1), ] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.row(i); + Yi1.noalias() = Y.row(i + 1); + Y.row(i) = c * Yi + s * Yi1; + Y.row(i + 1) = -s * Yi + c * Yi1; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$Q'Y\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> Q'Y = G_{n-1}' * ... * G2' * G1' * Y + void apply_QtY(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + RowVector Yi(Y.cols()), Yi1(Y.cols()); + const Index n1 = m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[i:(i + 1), ] = Gi' * Y[i:(i + 1), ] + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.row(i); + Yi1.noalias() = Y.row(i + 1); + Y.row(i) = c * Yi - s * Yi1; + Y.row(i + 1) = s * Yi + c * Yi1; + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$YQ\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> YQ = Y * G1 * G2 * ... + void apply_YQ(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + /*Vector Yi(Y.rows()); + for(Index i = 0; i < m_n - 1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.col(i); + Y.col(i) = c * Yi - s * Y.col(i + 1); + Y.col(i + 1) = s * Yi + c * Y.col(i + 1); + }*/ + Scalar *Y_col_i, *Y_col_i1; + const Index n1 = m_n - 1; + const Index nrow = Y.rows(); + for (Index i = 0; i < n1; i++) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + + Y_col_i = &Y.coeffRef(0, i); + Y_col_i1 = &Y.coeffRef(0, i + 1); + for (Index j = 0; j < nrow; j++) + { + Scalar tmp = Y_col_i[j]; + Y_col_i[j] = c * tmp - s * Y_col_i1[j]; + Y_col_i1[j] = s * tmp + c * Y_col_i1[j]; + } + } + } + + /// + /// Apply the \f$Q\f$ matrix to another matrix \f$Y\f$. + /// + /// \param Y A matrix that will be overwritten by the matrix product \f$YQ'\f$. + /// + /// Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + // Y -> YQ' = Y * G_{n-1}' * ... * G2' * G1' + void apply_YQt(GenericMatrix Y) const + { + if (!m_computed) + throw std::logic_error("UpperHessenbergQR: need to call compute() first"); + + Vector Yi(Y.rows()); + for (Index i = m_n - 2; i >= 0; i--) + { + const Scalar c = m_rot_cos.coeff(i); + const Scalar s = m_rot_sin.coeff(i); + // Y[, i:(i + 1)] = Y[, i:(i + 1)] * Gi' + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + Yi.noalias() = Y.col(i); + Y.col(i) = c * Yi + s * Y.col(i + 1); + Y.col(i + 1) = -s * Yi + c * Y.col(i + 1); + } + } +}; + +/// +/// \ingroup LinearAlgebra +/// +/// Perform the QR decomposition of a tridiagonal matrix, a special +/// case of upper Hessenberg matrices. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// +template +class TridiagQR : public UpperHessenbergQR +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef typename Matrix::Index Index; + + Vector m_T_diag; // diagonal elements of T + Vector m_T_lsub; // lower subdiagonal of T + Vector m_T_usub; // upper subdiagonal of T + Vector m_T_usub2; // 2nd upper subdiagonal of T + +public: + /// + /// Constructor to preallocate memory. Computation can + /// be performed later by calling the compute() method. + /// + TridiagQR(Index size) : + UpperHessenbergQR(size) + {} + + /// + /// Constructor to create an object that performs and stores the + /// QR decomposition of an upper Hessenberg matrix `mat`, with an + /// optional shift: \f$H-sI=QR\f$. Here \f$H\f$ stands for the matrix + /// `mat`, and \f$s\f$ is the shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the major- and sub- diagonal parts of + /// the matrix are used. + /// + TridiagQR(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) : + UpperHessenbergQR(mat.rows()) + { + this->compute(mat, shift); + } + + /// + /// Conduct the QR factorization of a tridiagonal matrix with an + /// optional shift. + /// + /// \param mat Matrix type can be `Eigen::Matrix` (e.g. + /// `Eigen::MatrixXd` and `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// Only the major- and sub- diagonal parts of + /// the matrix are used. + /// + void compute(ConstGenericMatrix& mat, const Scalar& shift = Scalar(0)) + { + this->m_n = mat.rows(); + if (this->m_n != mat.cols()) + throw std::invalid_argument("TridiagQR: matrix must be square"); + + this->m_shift = shift; + m_T_diag.resize(this->m_n); + m_T_lsub.resize(this->m_n - 1); + m_T_usub.resize(this->m_n - 1); + m_T_usub2.resize(this->m_n - 2); + this->m_rot_cos.resize(this->m_n - 1); + this->m_rot_sin.resize(this->m_n - 1); + + m_T_diag.array() = mat.diagonal().array() - this->m_shift; + m_T_lsub.noalias() = mat.diagonal(-1); + m_T_usub.noalias() = m_T_lsub; + + // A number of pointers to avoid repeated address calculation + Scalar *c = this->m_rot_cos.data(), // pointer to the cosine vector + *s = this->m_rot_sin.data(), // pointer to the sine vector + r; + const Index n1 = this->m_n - 1; + for (Index i = 0; i < n1; i++) + { + // diag[i] == T[i, i] + // lsub[i] == T[i + 1, i] + // r = sqrt(T[i, i]^2 + T[i + 1, i]^2) + // c = T[i, i] / r, s = -T[i + 1, i] / r + this->compute_rotation(m_T_diag.coeff(i), m_T_lsub.coeff(i), r, *c, *s); + + // For a complete QR decomposition, + // we first obtain the rotation matrix + // G = [ cos sin] + // [-sin cos] + // and then do T[i:(i + 1), i:(i + 2)] = G' * T[i:(i + 1), i:(i + 2)] + + // Update T[i, i] and T[i + 1, i] + // The updated value of T[i, i] is known to be r + // The updated value of T[i + 1, i] is known to be 0 + m_T_diag.coeffRef(i) = r; + m_T_lsub.coeffRef(i) = Scalar(0); + // Update T[i, i + 1] and T[i + 1, i + 1] + // usub[i] == T[i, i + 1] + // diag[i + 1] == T[i + 1, i + 1] + const Scalar tmp = m_T_usub.coeff(i); + m_T_usub.coeffRef(i) = (*c) * tmp - (*s) * m_T_diag.coeff(i + 1); + m_T_diag.coeffRef(i + 1) = (*s) * tmp + (*c) * m_T_diag.coeff(i + 1); + // Update T[i, i + 2] and T[i + 1, i + 2] + // usub2[i] == T[i, i + 2] + // usub[i + 1] == T[i + 1, i + 2] + if (i < n1 - 1) + { + m_T_usub2.coeffRef(i) = -(*s) * m_T_usub.coeff(i + 1); + m_T_usub.coeffRef(i + 1) *= (*c); + } + + c++; + s++; + + // If we do not need to calculate the R matrix, then + // only the cos and sin sequences are required. + // In such case we only update T[i + 1, (i + 1):(i + 2)] + // T[i + 1, i + 1] = c * T[i + 1, i + 1] + s * T[i, i + 1]; + // T[i + 1, i + 2] *= c; + } + + this->m_computed = true; + } + + /// + /// Return the \f$R\f$ matrix in the QR decomposition, which is an + /// upper triangular matrix. + /// + /// \return Returned matrix type will be `Eigen::Matrix`, depending on + /// the template parameter `Scalar` defined. + /// + Matrix matrix_R() const + { + if (!this->m_computed) + throw std::logic_error("TridiagQR: need to call compute() first"); + + Matrix R = Matrix::Zero(this->m_n, this->m_n); + R.diagonal().noalias() = m_T_diag; + R.diagonal(1).noalias() = m_T_usub; + R.diagonal(2).noalias() = m_T_usub2; + + return R; + } + + /// + /// Overwrite `dest` with \f$Q'HQ = RQ + sI\f$, where \f$H\f$ is the input matrix `mat`, + /// and \f$s\f$ is the shift. The result is a tridiagonal matrix. + /// + /// \param mat The matrix to be overwritten, whose type should be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + void matrix_QtHQ(Matrix& dest) const + { + if (!this->m_computed) + throw std::logic_error("TridiagQR: need to call compute() first"); + + // Make a copy of the R matrix + dest.resize(this->m_n, this->m_n); + dest.setZero(); + dest.diagonal().noalias() = m_T_diag; + // The upper diagonal refers to m_T_usub + // The 2nd upper subdiagonal will be zero in RQ + + // Compute the RQ matrix + // [m11 m12] points to RQ[i:(i+1), i:(i+1)] + // [0 m22] + // + // Gi = [ cos[i] sin[i]] + // [-sin[i] cos[i]] + const Index n1 = this->m_n - 1; + for (Index i = 0; i < n1; i++) + { + const Scalar c = this->m_rot_cos.coeff(i); + const Scalar s = this->m_rot_sin.coeff(i); + const Scalar m11 = dest.coeff(i, i), + m12 = m_T_usub.coeff(i), + m22 = m_T_diag.coeff(i + 1); + + // Update the diagonal and the lower subdiagonal of dest + dest.coeffRef(i, i) = c * m11 - s * m12; + dest.coeffRef(i + 1, i) = -s * m22; + dest.coeffRef(i + 1, i + 1) = c * m22; + } + + // Copy the lower subdiagonal to upper subdiagonal + dest.diagonal(1).noalias() = dest.diagonal(-1); + + // Add the shift to the diagonal + dest.diagonal().array() += this->m_shift; + } +}; + +/// +/// @} +/// + +} // namespace Spectra + +#endif // UPPER_HESSENBERG_QR_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h b/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h new file mode 100644 index 000000000..c41cae729 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseCholesky.h @@ -0,0 +1,108 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_CHOLESKY_H +#define DENSE_CHOLESKY_H + +#include +#include +#include +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the operations related to Cholesky decomposition on a +/// positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular +/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver +/// in the Cholesky decomposition mode. +/// +template +class DenseCholesky +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstMat; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + const Index m_n; + Eigen::LLT m_decomp; + int m_info; // status of the decomposition + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseCholesky(ConstGenericMatrix& mat) : + m_n(mat.rows()), m_info(NOT_COMPUTED) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseCholesky: matrix must be square"); + + m_decomp.compute(mat); + m_info = (m_decomp.info() == Eigen::Success) ? + SUCCESSFUL : + NUMERICAL_ISSUE; + } + + /// + /// Returns the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Returns the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Performs the lower triangular solving operation \f$y=L^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * x_in + void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixL().solve(x); + } + + /// + /// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L') * x_in + void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixU().solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_CHOLESKY_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h new file mode 100644 index 000000000..02ad32f8f --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenComplexShiftSolve.h @@ -0,0 +1,102 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_COMPLEX_SHIFT_SOLVE_H +#define DENSE_GEN_COMPLEX_SHIFT_SOLVE_H + +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the complex shift-solve operation on a general real matrix \f$A\f$, +/// i.e., calculating \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$ for any complex-valued +/// \f$\sigma\f$ and real-valued vector \f$x\f$. It is mainly used in the +/// GenEigsComplexShiftSolver eigen solver. +/// +template +class DenseGenComplexShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef Eigen::PartialPivLU ComplexSolver; + + ConstGenericMatrix m_mat; + const Index m_n; + ComplexSolver m_solver; + ComplexVector m_x_cache; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenComplexShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseGenComplexShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the complex shift \f$\sigma\f$. + /// + /// \param sigmar Real part of \f$\sigma\f$. + /// \param sigmai Imaginary part of \f$\sigma\f$. + /// + void set_shift(Scalar sigmar, Scalar sigmai) + { + m_solver.compute(m_mat.template cast() - Complex(sigmar, sigmai) * ComplexMatrix::Identity(m_n, m_n)); + m_x_cache.resize(m_n); + m_x_cache.setZero(); + } + + /// + /// Perform the complex shift-solve operation + /// \f$y=\mathrm{Re}\{(A-\sigma I)^{-1}x\}\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = Re( inv(A - sigma * I) * x_in ) + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_x_cache.real() = MapConstVec(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(m_x_cache).real(); + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_COMPLEX_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h new file mode 100644 index 000000000..c4ade80a3 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenMatProd.h @@ -0,0 +1,80 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_MAT_PROD_H +#define DENSE_GEN_MAT_PROD_H + +#include + +namespace Spectra { + +/// +/// \defgroup MatOp Matrix Operations +/// +/// Define matrix operations on existing matrix objects +/// + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// general real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the GenEigsSolver and +/// SymEigsSolver eigen solvers. +/// +template +class DenseGenMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenMatProd(ConstGenericMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat * x; + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h new file mode 100644 index 000000000..9c93ecc7a --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseGenRealShiftSolve.h @@ -0,0 +1,88 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_GEN_REAL_SHIFT_SOLVE_H +#define DENSE_GEN_REAL_SHIFT_SOLVE_H + +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a general real matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver. +/// +template +class DenseGenRealShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const Index m_n; + Eigen::PartialPivLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseGenRealShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseGenRealShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + m_solver.compute(m_mat - sigma * Matrix::Identity(m_n, m_n)); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_GEN_REAL_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h b/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h new file mode 100644 index 000000000..76c792686 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseSymMatProd.h @@ -0,0 +1,73 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_SYM_MAT_PROD_H +#define DENSE_SYM_MAT_PROD_H + +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// symmetric real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver. +/// +template +class DenseSymMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseSymMatProd(ConstGenericMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // DENSE_SYM_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h new file mode 100644 index 000000000..620aa9413 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/DenseSymShiftSolve.h @@ -0,0 +1,92 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef DENSE_SYM_SHIFT_SOLVE_H +#define DENSE_SYM_SHIFT_SOLVE_H + +#include +#include + +#include "../LinAlg/BKLDLT.h" +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a real symmetric matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver. +/// +template +class DenseSymShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_n; + BKLDLT m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** matrix object, whose type can be + /// `Eigen::Matrix` (e.g. `Eigen::MatrixXd` and + /// `Eigen::MatrixXf`), or its mapped version + /// (e.g. `Eigen::Map`). + /// + DenseSymShiftSolve(ConstGenericMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("DenseSymShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + m_solver.compute(m_mat, Uplo, sigma); + if (m_solver.info() != SUCCESSFUL) + throw std::invalid_argument("DenseSymShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // DENSE_SYM_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h b/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h new file mode 100644 index 000000000..a73efc389 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseCholesky.h @@ -0,0 +1,109 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_CHOLESKY_H +#define SPARSE_CHOLESKY_H + +#include +#include +#include +#include +#include "../Util/CompInfo.h" + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the operations related to Cholesky decomposition on a +/// sparse positive definite matrix, \f$B=LL'\f$, where \f$L\f$ is a lower triangular +/// matrix. It is mainly used in the SymGEigsSolver generalized eigen solver +/// in the Cholesky decomposition mode. +/// +template +class SparseCholesky +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + const Index m_n; + Eigen::SimplicialLLT m_decomp; + int m_info; // status of the decomposition + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseCholesky(ConstGenericSparseMatrix& mat) : + m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseCholesky: matrix must be square"); + + m_decomp.compute(mat); + m_info = (m_decomp.info() == Eigen::Success) ? + SUCCESSFUL : + NUMERICAL_ISSUE; + } + + /// + /// Returns the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Returns the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Performs the lower triangular solving operation \f$y=L^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * x_in + void lower_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.permutationP() * x; + m_decomp.matrixL().solveInPlace(y); + } + + /// + /// Performs the upper triangular solving operation \f$y=(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L') * x_in + void upper_triangular_solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_decomp.matrixU().solve(x); + y = m_decomp.permutationPinv() * y; + } +}; + +} // namespace Spectra + +#endif // SPARSE_CHOLESKY_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h b/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h new file mode 100644 index 000000000..0cc1f6674 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseGenMatProd.h @@ -0,0 +1,74 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_GEN_MAT_PROD_H +#define SPARSE_GEN_MAT_PROD_H + +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// sparse real matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the GenEigsSolver and SymEigsSolver +/// eigen solvers. +/// +template +class SparseGenMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseGenMatProd(ConstGenericSparseMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_GEN_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h new file mode 100644 index 000000000..fc6e3c508 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseGenRealShiftSolve.h @@ -0,0 +1,93 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_GEN_REAL_SHIFT_SOLVE_H +#define SPARSE_GEN_REAL_SHIFT_SOLVE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a sparse real matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the GenEigsRealShiftSolver eigen solver. +/// +template +class SparseGenRealShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::SparseLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseGenRealShiftSolve(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseGenRealShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + SparseMatrix I(m_n, m_n); + I.setIdentity(); + + m_solver.compute(m_mat - sigma * I); + if (m_solver.info() != Eigen::Success) + throw std::invalid_argument("SparseGenRealShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // SPARSE_GEN_REAL_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h b/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h new file mode 100644 index 000000000..3abd0c177 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseRegularInverse.h @@ -0,0 +1,100 @@ +// Copyright (C) 2017-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_REGULAR_INVERSE_H +#define SPARSE_REGULAR_INVERSE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines matrix operations required by the generalized eigen solver +/// in the regular inverse mode. For a sparse and positive definite matrix \f$B\f$, +/// it implements the matrix-vector product \f$y=Bx\f$ and the linear equation +/// solving operation \f$y=B^{-1}x\f$. +/// +/// This class is intended to be used with the SymGEigsSolver generalized eigen solver +/// in the regular inverse mode. +/// +template +class SparseRegularInverse +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::ConjugateGradient m_cg; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseRegularInverse(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseRegularInverse: matrix must be square"); + + m_cg.compute(mat); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Perform the solving operation \f$y=B^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(B) * x_in + void solve(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_cg.solve(x); + } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Bx\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = B * x_in + void mat_prod(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_REGULAR_INVERSE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h b/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h new file mode 100644 index 000000000..2dd8799eb --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseSymMatProd.h @@ -0,0 +1,73 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_SYM_MAT_PROD_H +#define SPARSE_SYM_MAT_PROD_H + +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the matrix-vector multiplication operation on a +/// sparse real symmetric matrix \f$A\f$, i.e., calculating \f$y=Ax\f$ for any vector +/// \f$x\f$. It is mainly used in the SymEigsSolver eigen solver. +/// +template +class SparseSymMatProd +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseSymMatProd(ConstGenericSparseMatrix& mat) : + m_mat(mat) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_mat.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_mat.cols(); } + + /// + /// Perform the matrix-vector multiplication operation \f$y=Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.rows()); + y.noalias() = m_mat.template selfadjointView() * x; + } +}; + +} // namespace Spectra + +#endif // SPARSE_SYM_MAT_PROD_H diff --git a/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h b/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h new file mode 100644 index 000000000..674c6ac52 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/SparseSymShiftSolve.h @@ -0,0 +1,95 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SPARSE_SYM_SHIFT_SOLVE_H +#define SPARSE_SYM_SHIFT_SOLVE_H + +#include +#include +#include +#include + +namespace Spectra { + +/// +/// \ingroup MatOp +/// +/// This class defines the shift-solve operation on a sparse real symmetric matrix \f$A\f$, +/// i.e., calculating \f$y=(A-\sigma I)^{-1}x\f$ for any real \f$\sigma\f$ and +/// vector \f$x\f$. It is mainly used in the SymEigsShiftSolver eigen solver. +/// +template +class SparseSymShiftSolve +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef Eigen::SparseMatrix SparseMatrix; + typedef const Eigen::Ref ConstGenericSparseMatrix; + + ConstGenericSparseMatrix m_mat; + const int m_n; + Eigen::SparseLU m_solver; + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param mat An **Eigen** sparse matrix object, whose type can be + /// `Eigen::SparseMatrix` or its mapped version + /// `Eigen::Map >`. + /// + SparseSymShiftSolve(ConstGenericSparseMatrix& mat) : + m_mat(mat), m_n(mat.rows()) + { + if (mat.rows() != mat.cols()) + throw std::invalid_argument("SparseSymShiftSolve: matrix must be square"); + } + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_n; } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_n; } + + /// + /// Set the real shift \f$\sigma\f$. + /// + void set_shift(Scalar sigma) + { + SparseMatrix mat = m_mat.template selfadjointView(); + SparseMatrix identity(m_n, m_n); + identity.setIdentity(); + mat = mat - sigma * identity; + m_solver.isSymmetric(true); + m_solver.compute(mat); + if (m_solver.info() != Eigen::Success) + throw std::invalid_argument("SparseSymShiftSolve: factorization failed with the given shift"); + } + + /// + /// Perform the shift-solve operation \f$y=(A-\sigma I)^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(A - sigma * I) * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) const + { + MapConstVec x(x_in, m_n); + MapVec y(y_out, m_n); + y.noalias() = m_solver.solve(x); + } +}; + +} // namespace Spectra + +#endif // SPARSE_SYM_SHIFT_SOLVE_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h new file mode 100644 index 000000000..68654aafd --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/ArnoldiOp.h @@ -0,0 +1,150 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef ARNOLDI_OP_H +#define ARNOLDI_OP_H + +#include +#include // std::sqrt + +namespace Spectra { + +/// +/// \ingroup Internals +/// @{ +/// + +/// +/// \defgroup Operators Operators +/// +/// Different types of operators. +/// + +/// +/// \ingroup Operators +/// +/// Operators used in the Arnoldi factorization. +/// +template +class ArnoldiOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; + +public: + ArnoldiOp(OpType* op, BOpType* Bop) : + m_op(*op), m_Bop(*Bop), m_cache(op->rows()) + {} + + inline Index rows() const { return m_op.rows(); } + + // In generalized eigenvalue problem Ax=lambda*Bx, define the inner product to be = x'By. + // For regular eigenvalue problems, it is the usual inner product = x'y + + // Compute = x'By + // x and y are two vectors + template + Scalar inner_product(const Arg1& x, const Arg2& y) + { + m_Bop.mat_prod(y.data(), m_cache.data()); + return x.dot(m_cache); + } + + // Compute res = = X'By + // X is a matrix, y is a vector, res is a vector + template + void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref res) + { + m_Bop.mat_prod(y.data(), m_cache.data()); + res.noalias() = x.transpose() * m_cache; + } + + // B-norm of a vector, ||x||_B = sqrt(x'Bx) + template + Scalar norm(const Arg& x) + { + using std::sqrt; + return sqrt(inner_product(x, x)); + } + + // The "A" operator to generate the Krylov subspace + inline void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, y_out); + } +}; + +/// +/// \ingroup Operators +/// +/// Placeholder for the B-operator when \f$B = I\f$. +/// +class IdentityBOp +{}; + +/// +/// \ingroup Operators +/// +/// Partial specialization for the case \f$B = I\f$. +/// +template +class ArnoldiOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + OpType& m_op; + +public: + ArnoldiOp(OpType* op, IdentityBOp* /*Bop*/) : + m_op(*op) + {} + + inline Index rows() const { return m_op.rows(); } + + // Compute = x'y + // x and y are two vectors + template + Scalar inner_product(const Arg1& x, const Arg2& y) const + { + return x.dot(y); + } + + // Compute res = = X'y + // X is a matrix, y is a vector, res is a vector + template + void trans_product(const Arg1& x, const Arg2& y, Eigen::Ref res) const + { + res.noalias() = x.transpose() * y; + } + + // B-norm of a vector. For regular eigenvalue problems it is simply the L2 norm + template + Scalar norm(const Arg& x) + { + return x.norm(); + } + + // The "A" operator to generate the Krylov subspace + inline void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, y_out); + } +}; + +/// +/// @} +/// + +} // namespace Spectra + +#endif // ARNOLDI_OP_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h new file mode 100644 index 000000000..fa9958352 --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsCholeskyOp.h @@ -0,0 +1,75 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_CHOLESKY_OP_H +#define SYM_GEIGS_CHOLESKY_OP_H + +#include +#include "../DenseSymMatProd.h" +#include "../DenseCholesky.h" + +namespace Spectra { + +/// +/// \ingroup Operators +/// +/// This class defines the matrix operation for generalized eigen solver in the +/// Cholesky decomposition mode. It calculates \f$y=L^{-1}A(L')^{-1}x\f$ for any +/// vector \f$x\f$, where \f$L\f$ is the Cholesky decomposition of \f$B\f$. +/// This class is intended for internal use. +/// +template , + typename BOpType = DenseCholesky > +class SymGEigsCholeskyOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; // temporary working space + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. + /// + SymGEigsCholeskyOp(OpType& op, BOpType& Bop) : + m_op(op), m_Bop(Bop), m_cache(op.rows()) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_Bop.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_Bop.rows(); } + + /// + /// Perform the matrix operation \f$y=L^{-1}A(L')^{-1}x\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(L) * A * inv(L') * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_Bop.upper_triangular_solve(x_in, y_out); + m_op.perform_op(y_out, m_cache.data()); + m_Bop.lower_triangular_solve(m_cache.data(), y_out); + } +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_CHOLESKY_OP_H diff --git a/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h new file mode 100644 index 000000000..514269c7f --- /dev/null +++ b/gtsam/3rdparty/Spectra/MatOp/internal/SymGEigsRegInvOp.h @@ -0,0 +1,72 @@ +// Copyright (C) 2017-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_REG_INV_OP_H +#define SYM_GEIGS_REG_INV_OP_H + +#include +#include "../SparseSymMatProd.h" +#include "../SparseRegularInverse.h" + +namespace Spectra { + +/// +/// \ingroup Operators +/// +/// This class defines the matrix operation for generalized eigen solver in the +/// regular inverse mode. This class is intended for internal use. +/// +template , + typename BOpType = SparseRegularInverse > +class SymGEigsRegInvOp +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + OpType& m_op; + BOpType& m_Bop; + Vector m_cache; // temporary working space + +public: + /// + /// Constructor to create the matrix operation object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. + /// + SymGEigsRegInvOp(OpType& op, BOpType& Bop) : + m_op(op), m_Bop(Bop), m_cache(op.rows()) + {} + + /// + /// Return the number of rows of the underlying matrix. + /// + Index rows() const { return m_Bop.rows(); } + /// + /// Return the number of columns of the underlying matrix. + /// + Index cols() const { return m_Bop.rows(); } + + /// + /// Perform the matrix operation \f$y=B^{-1}Ax\f$. + /// + /// \param x_in Pointer to the \f$x\f$ vector. + /// \param y_out Pointer to the \f$y\f$ vector. + /// + // y_out = inv(B) * A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + m_op.perform_op(x_in, m_cache.data()); + m_Bop.solve(m_cache.data(), y_out); + } +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_REG_INV_OP_H diff --git a/gtsam/3rdparty/Spectra/SymEigsBase.h b/gtsam/3rdparty/Spectra/SymEigsBase.h new file mode 100644 index 000000000..9601425d5 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsBase.h @@ -0,0 +1,448 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_BASE_H +#define SYM_EIGS_BASE_H + +#include +#include // std::vector +#include // std::abs, std::pow, std::sqrt +#include // std::min, std::copy +#include // std::invalid_argument + +#include "Util/TypeTraits.h" +#include "Util/SelectionRule.h" +#include "Util/CompInfo.h" +#include "Util/SimpleRandom.h" +#include "MatOp/internal/ArnoldiOp.h" +#include "LinAlg/UpperHessenbergQR.h" +#include "LinAlg/TridiagEigen.h" +#include "LinAlg/Lanczos.h" + +namespace Spectra { + +/// +/// \defgroup EigenSolver Eigen Solvers +/// +/// Eigen solvers for different types of problems. +/// + +/// +/// \ingroup EigenSolver +/// +/// This is the base class for symmetric eigen solvers, mainly for internal use. +/// It is kept here to provide the documentation for member functions of concrete eigen solvers +/// such as SymEigsSolver and SymEigsShiftSolver. +/// +template +class SymEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef Eigen::Array Array; + typedef Eigen::Array BoolArray; + typedef Eigen::Map MapMat; + typedef Eigen::Map MapVec; + typedef Eigen::Map MapConstVec; + + typedef ArnoldiOp ArnoldiOpType; + typedef Lanczos LanczosFac; + +protected: + // clang-format off + OpType* m_op; // object to conduct matrix operation, + // e.g. matrix-vector product + const Index m_n; // dimension of matrix A + const Index m_nev; // number of eigenvalues requested + const Index m_ncv; // dimension of Krylov subspace in the Lanczos method + Index m_nmatop; // number of matrix operations called + Index m_niter; // number of restarting iterations + + LanczosFac m_fac; // Lanczos factorization + Vector m_ritz_val; // Ritz values + +private: + Matrix m_ritz_vec; // Ritz vectors + Vector m_ritz_est; // last row of m_ritz_vec, also called the Ritz estimates + BoolArray m_ritz_conv; // indicator of the convergence of Ritz values + int m_info; // status of the computation + + const Scalar m_near_0; // a very small value, but 1.0 / m_near_0 does not overflow + // ~= 1e-307 for the "double" type + const Scalar m_eps; // the machine precision, ~= 1e-16 for the "double" type + const Scalar m_eps23; // m_eps^(2/3), used to test the convergence + // clang-format on + + // Implicitly restarted Lanczos factorization + void restart(Index k) + { + if (k >= m_ncv) + return; + + TridiagQR decomp(m_ncv); + Matrix Q = Matrix::Identity(m_ncv, m_ncv); + + for (Index i = k; i < m_ncv; i++) + { + // QR decomposition of H-mu*I, mu is the shift + decomp.compute(m_fac.matrix_H(), m_ritz_val[i]); + + // Q -> Q * Qi + decomp.apply_YQ(Q); + // H -> Q'HQ + // Since QR = H - mu * I, we have H = QR + mu * I + // and therefore Q'HQ = RQ + mu * I + m_fac.compress_H(decomp); + } + + m_fac.compress_V(Q); + m_fac.factorize_from(k, m_ncv, m_nmatop); + + retrieve_ritzpair(); + } + + // Calculates the number of converged Ritz values + Index num_converged(Scalar tol) + { + // thresh = tol * max(m_eps23, abs(theta)), theta for Ritz value + Array thresh = tol * m_ritz_val.head(m_nev).array().abs().max(m_eps23); + Array resid = m_ritz_est.head(m_nev).array().abs() * m_fac.f_norm(); + // Converged "wanted" Ritz values + m_ritz_conv = (resid < thresh); + + return m_ritz_conv.cast().sum(); + } + + // Returns the adjusted nev for restarting + Index nev_adjusted(Index nconv) + { + using std::abs; + + Index nev_new = m_nev; + for (Index i = m_nev; i < m_ncv; i++) + if (abs(m_ritz_est[i]) < m_near_0) + nev_new++; + + // Adjust nev_new, according to dsaup2.f line 677~684 in ARPACK + nev_new += std::min(nconv, (m_ncv - nev_new) / 2); + if (nev_new == 1 && m_ncv >= 6) + nev_new = m_ncv / 2; + else if (nev_new == 1 && m_ncv > 2) + nev_new = 2; + + if (nev_new > m_ncv - 1) + nev_new = m_ncv - 1; + + return nev_new; + } + + // Retrieves and sorts Ritz values and Ritz vectors + void retrieve_ritzpair() + { + TridiagEigen decomp(m_fac.matrix_H()); + const Vector& evals = decomp.eigenvalues(); + const Matrix& evecs = decomp.eigenvectors(); + + SortEigenvalue sorting(evals.data(), evals.size()); + std::vector ind = sorting.index(); + + // For BOTH_ENDS, the eigenvalues are sorted according + // to the LARGEST_ALGE rule, so we need to move those smallest + // values to the left + // The order would be + // Largest => Smallest => 2nd largest => 2nd smallest => ... + // We keep this order since the first k values will always be + // the wanted collection, no matter k is nev_updated (used in restart()) + // or is nev (used in sort_ritzpair()) + if (SelectionRule == BOTH_ENDS) + { + std::vector ind_copy(ind); + for (Index i = 0; i < m_ncv; i++) + { + // If i is even, pick values from the left (large values) + // If i is odd, pick values from the right (small values) + if (i % 2 == 0) + ind[i] = ind_copy[i / 2]; + else + ind[i] = ind_copy[m_ncv - 1 - i / 2]; + } + } + + // Copy the Ritz values and vectors to m_ritz_val and m_ritz_vec, respectively + for (Index i = 0; i < m_ncv; i++) + { + m_ritz_val[i] = evals[ind[i]]; + m_ritz_est[i] = evecs(m_ncv - 1, ind[i]); + } + for (Index i = 0; i < m_nev; i++) + { + m_ritz_vec.col(i).noalias() = evecs.col(ind[i]); + } + } + +protected: + // Sorts the first nev Ritz pairs in the specified order + // This is used to return the final results + virtual void sort_ritzpair(int sort_rule) + { + // First make sure that we have a valid index vector + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + std::vector ind = sorting.index(); + + switch (sort_rule) + { + case LARGEST_ALGE: + break; + case LARGEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_ALGE: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + case SMALLEST_MAGN: + { + SortEigenvalue sorting(m_ritz_val.data(), m_nev); + ind = sorting.index(); + break; + } + default: + throw std::invalid_argument("unsupported sorting rule"); + } + + Vector new_ritz_val(m_ncv); + Matrix new_ritz_vec(m_ncv, m_nev); + BoolArray new_ritz_conv(m_nev); + + for (Index i = 0; i < m_nev; i++) + { + new_ritz_val[i] = m_ritz_val[ind[i]]; + new_ritz_vec.col(i).noalias() = m_ritz_vec.col(ind[i]); + new_ritz_conv[i] = m_ritz_conv[ind[i]]; + } + + m_ritz_val.swap(new_ritz_val); + m_ritz_vec.swap(new_ritz_vec); + m_ritz_conv.swap(new_ritz_conv); + } + +public: + /// \cond + + SymEigsBase(OpType* op, BOpType* Bop, Index nev, Index ncv) : + m_op(op), + m_n(m_op->rows()), + m_nev(nev), + m_ncv(ncv > m_n ? m_n : ncv), + m_nmatop(0), + m_niter(0), + m_fac(ArnoldiOpType(op, Bop), m_ncv), + m_info(NOT_COMPUTED), + m_near_0(TypeTraits::min() * Scalar(10)), + m_eps(Eigen::NumTraits::epsilon()), + m_eps23(Eigen::numext::pow(m_eps, Scalar(2.0) / 3)) + { + if (nev < 1 || nev > m_n - 1) + throw std::invalid_argument("nev must satisfy 1 <= nev <= n - 1, n is the size of matrix"); + + if (ncv <= nev || ncv > m_n) + throw std::invalid_argument("ncv must satisfy nev < ncv <= n, n is the size of matrix"); + } + + /// + /// Virtual destructor + /// + virtual ~SymEigsBase() {} + + /// \endcond + + /// + /// Initializes the solver by providing an initial residual vector. + /// + /// \param init_resid Pointer to the initial residual vector. + /// + /// **Spectra** (and also **ARPACK**) uses an iterative algorithm + /// to find eigenvalues. This function allows the user to provide the initial + /// residual vector. + /// + void init(const Scalar* init_resid) + { + // Reset all matrices/vectors to zero + m_ritz_val.resize(m_ncv); + m_ritz_vec.resize(m_ncv, m_nev); + m_ritz_est.resize(m_ncv); + m_ritz_conv.resize(m_nev); + + m_ritz_val.setZero(); + m_ritz_vec.setZero(); + m_ritz_est.setZero(); + m_ritz_conv.setZero(); + + m_nmatop = 0; + m_niter = 0; + + // Initialize the Lanczos factorization + MapConstVec v0(init_resid, m_n); + m_fac.init(v0, m_nmatop); + } + + /// + /// Initializes the solver by providing a random initial residual vector. + /// + /// This overloaded function generates a random initial residual vector + /// (with a fixed random seed) for the algorithm. Elements in the vector + /// follow independent Uniform(-0.5, 0.5) distribution. + /// + void init() + { + SimpleRandom rng(0); + Vector init_resid = rng.random_vec(m_n); + init(init_resid.data()); + } + + /// + /// Conducts the major computation procedure. + /// + /// \param maxit Maximum number of iterations allowed in the algorithm. + /// \param tol Precision parameter for the calculated eigenvalues. + /// \param sort_rule Rule to sort the eigenvalues and eigenvectors. + /// Supported values are + /// `Spectra::LARGEST_ALGE`, `Spectra::LARGEST_MAGN`, + /// `Spectra::SMALLEST_ALGE` and `Spectra::SMALLEST_MAGN`, + /// for example `LARGEST_ALGE` indicates that largest eigenvalues + /// come first. Note that this argument is only used to + /// **sort** the final result, and the **selection** rule + /// (e.g. selecting the largest or smallest eigenvalues in the + /// full spectrum) is specified by the template parameter + /// `SelectionRule` of SymEigsSolver. + /// + /// \return Number of converged eigenvalues. + /// + Index compute(Index maxit = 1000, Scalar tol = 1e-10, int sort_rule = LARGEST_ALGE) + { + // The m-step Lanczos factorization + m_fac.factorize_from(1, m_ncv, m_nmatop); + retrieve_ritzpair(); + // Restarting + Index i, nconv = 0, nev_adj; + for (i = 0; i < maxit; i++) + { + nconv = num_converged(tol); + if (nconv >= m_nev) + break; + + nev_adj = nev_adjusted(nconv); + restart(nev_adj); + } + // Sorting results + sort_ritzpair(sort_rule); + + m_niter += i + 1; + m_info = (nconv >= m_nev) ? SUCCESSFUL : NOT_CONVERGING; + + return std::min(m_nev, nconv); + } + + /// + /// Returns the status of the computation. + /// The full list of enumeration values can be found in \ref Enumerations. + /// + int info() const { return m_info; } + + /// + /// Returns the number of iterations used in the computation. + /// + Index num_iterations() const { return m_niter; } + + /// + /// Returns the number of matrix operations used in the computation. + /// + Index num_operations() const { return m_nmatop; } + + /// + /// Returns the converged eigenvalues. + /// + /// \return A vector containing the eigenvalues. + /// Returned vector type will be `Eigen::Vector`, depending on + /// the template parameter `Scalar` defined. + /// + Vector eigenvalues() const + { + const Index nconv = m_ritz_conv.cast().sum(); + Vector res(nconv); + + if (!nconv) + return res; + + Index j = 0; + for (Index i = 0; i < m_nev; i++) + { + if (m_ritz_conv[i]) + { + res[j] = m_ritz_val[i]; + j++; + } + } + + return res; + } + + /// + /// Returns the eigenvectors associated with the converged eigenvalues. + /// + /// \param nvec The number of eigenvectors to return. + /// + /// \return A matrix containing the eigenvectors. + /// Returned matrix type will be `Eigen::Matrix`, + /// depending on the template parameter `Scalar` defined. + /// + virtual Matrix eigenvectors(Index nvec) const + { + const Index nconv = m_ritz_conv.cast().sum(); + nvec = std::min(nvec, nconv); + Matrix res(m_n, nvec); + + if (!nvec) + return res; + + Matrix ritz_vec_conv(m_ncv, nvec); + Index j = 0; + for (Index i = 0; i < m_nev && j < nvec; i++) + { + if (m_ritz_conv[i]) + { + ritz_vec_conv.col(j).noalias() = m_ritz_vec.col(i); + j++; + } + } + + res.noalias() = m_fac.matrix_V() * ritz_vec_conv; + + return res; + } + + /// + /// Returns all converged eigenvectors. + /// + virtual Matrix eigenvectors() const + { + return eigenvectors(m_nev); + } +}; + +} // namespace Spectra + +#endif // SYM_EIGS_BASE_H diff --git a/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h b/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h new file mode 100644 index 000000000..e2b410cb1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsShiftSolver.h @@ -0,0 +1,203 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_SHIFT_SOLVER_H +#define SYM_EIGS_SHIFT_SOLVER_H + +#include + +#include "SymEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseSymShiftSolve.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for real symmetric matrices using +/// the **shift-and-invert mode**. The background information of the symmetric +/// eigen solver is documented in the SymEigsSolver class. Here we focus on +/// explaining the shift-and-invert mode. +/// +/// The shift-and-invert mode is based on the following fact: +/// If \f$\lambda\f$ and \f$x\f$ are a pair of eigenvalue and eigenvector of +/// matrix \f$A\f$, such that \f$Ax=\lambda x\f$, then for any \f$\sigma\f$, +/// we have +/// \f[(A-\sigma I)^{-1}x=\nu x\f] +/// where +/// \f[\nu=\frac{1}{\lambda-\sigma}\f] +/// which indicates that \f$(\nu, x)\f$ is an eigenpair of the matrix +/// \f$(A-\sigma I)^{-1}\f$. +/// +/// Therefore, if we pass the matrix operation \f$(A-\sigma I)^{-1}y\f$ +/// (rather than \f$Ay\f$) to the eigen solver, then we would get the desired +/// values of \f$\nu\f$, and \f$\lambda\f$ can also be easily obtained by noting +/// that \f$\lambda=\sigma+\nu^{-1}\f$. +/// +/// The reason why we need this type of manipulation is that +/// the algorithm of **Spectra** (and also **ARPACK**) +/// is good at finding eigenvalues with large magnitude, but may fail in looking +/// for eigenvalues that are close to zero. However, if we really need them, we +/// can set \f$\sigma=0\f$, find the largest eigenvalues of \f$A^{-1}\f$, and then +/// transform back to \f$\lambda\f$, since in this case largest values of \f$\nu\f$ +/// implies smallest values of \f$\lambda\f$. +/// +/// To summarize, in the shift-and-invert mode, the selection rule will apply to +/// \f$\nu=1/(\lambda-\sigma)\f$ rather than \f$\lambda\f$. So a selection rule +/// of `LARGEST_MAGN` combined with shift \f$\sigma\f$ will find eigenvalues of +/// \f$A\f$ that are closest to \f$\sigma\f$. But note that the eigenvalues() +/// method will always return the eigenvalues in the original problem (i.e., +/// returning \f$\lambda\f$ rather than \f$\nu\f$), and eigenvectors are the +/// same for both the original problem and the shifted-and-inverted problem. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the shifted-and-inverted eigenvalues. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseSymShiftSolve and +/// SparseSymShiftSolve, or define their +/// own that implements all the public member functions as in +/// DenseSymShiftSolve. +/// +/// Below is an example that illustrates the use of the shift-and-invert mode: +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // A size-10 diagonal matrix with elements 1, 2, ..., 10 +/// Eigen::MatrixXd M = Eigen::MatrixXd::Zero(10, 10); +/// for(int i = 0; i < M.rows(); i++) +/// M(i, i) = i + 1; +/// +/// // Construct matrix operation object using the wrapper class +/// DenseSymShiftSolve op(M); +/// +/// // Construct eigen solver object with shift 0 +/// // This will find eigenvalues that are closest to 0 +/// SymEigsShiftSolver< double, LARGEST_MAGN, +/// DenseSymShiftSolve > eigs(&op, 3, 6, 0.0); +/// +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (3.0, 2.0, 1.0) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +/// Also an example for user-supplied matrix shift-solve operation class: +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// // M = diag(1, 2, ..., 10) +/// class MyDiagonalTenShiftSolve +/// { +/// private: +/// double sigma_; +/// public: +/// int rows() { return 10; } +/// int cols() { return 10; } +/// void set_shift(double sigma) { sigma_ = sigma; } +/// // y_out = inv(A - sigma * I) * x_in +/// // inv(A - sigma * I) = diag(1/(1-sigma), 1/(2-sigma), ...) +/// void perform_op(double *x_in, double *y_out) +/// { +/// for(int i = 0; i < rows(); i++) +/// { +/// y_out[i] = x_in[i] / (i + 1 - sigma_); +/// } +/// } +/// }; +/// +/// int main() +/// { +/// MyDiagonalTenShiftSolve op; +/// // Find three eigenvalues that are closest to 3.14 +/// SymEigsShiftSolver eigs(&op, 3, 6, 3.14); +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (4.0, 3.0, 2.0) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +template > +class SymEigsShiftSolver : public SymEigsBase +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Array Array; + + const Scalar m_sigma; + + // First transform back the Ritz values, and then sort + void sort_ritzpair(int sort_rule) + { + Array m_ritz_val_org = Scalar(1.0) / this->m_ritz_val.head(this->m_nev).array() + m_sigma; + this->m_ritz_val.head(this->m_nev) = m_ritz_val_org; + SymEigsBase::sort_ritzpair(sort_rule); + } + +public: + /// + /// Constructor to create a eigen solver object using the shift-and-invert mode. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the shift-solve operation of \f$A\f$: calculating + /// \f$(A-\sigma I)^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseSymShiftSolve, or + /// define their own that implements all the public member functions + /// as in DenseSymShiftSolve. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv_` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// \param sigma The value of the shift. + /// + SymEigsShiftSolver(OpType* op, Index nev, Index ncv, Scalar sigma) : + SymEigsBase(op, NULL, nev, ncv), + m_sigma(sigma) + { + this->m_op->set_shift(m_sigma); + } +}; + +} // namespace Spectra + +#endif // SYM_EIGS_SHIFT_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/SymEigsSolver.h b/gtsam/3rdparty/Spectra/SymEigsSolver.h new file mode 100644 index 000000000..404df1e52 --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymEigsSolver.h @@ -0,0 +1,171 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_EIGS_SOLVER_H +#define SYM_EIGS_SOLVER_H + +#include + +#include "SymEigsBase.h" +#include "Util/SelectionRule.h" +#include "MatOp/DenseSymMatProd.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// +/// This class implements the eigen solver for real symmetric matrices, i.e., +/// to solve \f$Ax=\lambda x\f$ where \f$A\f$ is symmetric. +/// +/// **Spectra** is designed to calculate a specified number (\f$k\f$) +/// of eigenvalues of a large square matrix (\f$A\f$). Usually \f$k\f$ is much +/// less than the size of the matrix (\f$n\f$), so that only a few eigenvalues +/// and eigenvectors are computed. +/// +/// Rather than providing the whole \f$A\f$ matrix, the algorithm only requires +/// the matrix-vector multiplication operation of \f$A\f$. Therefore, users of +/// this solver need to supply a class that computes the result of \f$Av\f$ +/// for any given vector \f$v\f$. The name of this class should be given to +/// the template parameter `OpType`, and instance of this class passed to +/// the constructor of SymEigsSolver. +/// +/// If the matrix \f$A\f$ is already stored as a matrix object in **Eigen**, +/// for example `Eigen::MatrixXd`, then there is an easy way to construct such +/// matrix operation class, by using the built-in wrapper class DenseSymMatProd +/// which wraps an existing matrix object in **Eigen**. This is also the +/// default template parameter for SymEigsSolver. For sparse matrices, the +/// wrapper class SparseSymMatProd can be used similarly. +/// +/// If the users need to define their own matrix-vector multiplication operation +/// class, it should implement all the public member functions as in DenseSymMatProd. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// +/// Below is an example that demonstrates the usage of this class. +/// +/// \code{.cpp} +/// #include +/// #include +/// // is implicitly included +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to calculate the eigenvalues of M +/// Eigen::MatrixXd A = Eigen::MatrixXd::Random(10, 10); +/// Eigen::MatrixXd M = A + A.transpose(); +/// +/// // Construct matrix operation object using the wrapper class DenseSymMatProd +/// DenseSymMatProd op(M); +/// +/// // Construct eigen solver object, requesting the largest three eigenvalues +/// SymEigsSolver< double, LARGEST_ALGE, DenseSymMatProd > eigs(&op, 3, 6); +/// +/// // Initialize and compute +/// eigs.init(); +/// int nconv = eigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXd evalues; +/// if(eigs.info() == SUCCESSFUL) +/// evalues = eigs.eigenvalues(); +/// +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// +/// return 0; +/// } +/// \endcode +/// +/// And here is an example for user-supplied matrix operation class. +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// // M = diag(1, 2, ..., 10) +/// class MyDiagonalTen +/// { +/// public: +/// int rows() { return 10; } +/// int cols() { return 10; } +/// // y_out = M * x_in +/// void perform_op(double *x_in, double *y_out) +/// { +/// for(int i = 0; i < rows(); i++) +/// { +/// y_out[i] = x_in[i] * (i + 1); +/// } +/// } +/// }; +/// +/// int main() +/// { +/// MyDiagonalTen op; +/// SymEigsSolver eigs(&op, 3, 6); +/// eigs.init(); +/// eigs.compute(); +/// if(eigs.info() == SUCCESSFUL) +/// { +/// Eigen::VectorXd evalues = eigs.eigenvalues(); +/// // Will get (10, 9, 8) +/// std::cout << "Eigenvalues found:\n" << evalues << std::endl; +/// } +/// +/// return 0; +/// } +/// \endcode +/// +template > +class SymEigsSolver : public SymEigsBase +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the matrix operation object, which should implement + /// the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymEigsSolver(OpType* op, Index nev, Index ncv) : + SymEigsBase(op, NULL, nev, ncv) + {} +}; + +} // namespace Spectra + +#endif // SYM_EIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/SymGEigsSolver.h b/gtsam/3rdparty/Spectra/SymGEigsSolver.h new file mode 100644 index 000000000..68aa37cfc --- /dev/null +++ b/gtsam/3rdparty/Spectra/SymGEigsSolver.h @@ -0,0 +1,326 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SYM_GEIGS_SOLVER_H +#define SYM_GEIGS_SOLVER_H + +#include "SymEigsBase.h" +#include "Util/GEigsMode.h" +#include "MatOp/internal/SymGEigsCholeskyOp.h" +#include "MatOp/internal/SymGEigsRegInvOp.h" + +namespace Spectra { + +/// +/// \defgroup GEigenSolver Generalized Eigen Solvers +/// +/// Generalized eigen solvers for different types of problems. +/// + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices, i.e., to solve \f$Ax=\lambda Bx\f$ where \f$A\f$ is symmetric and +/// \f$B\f$ is positive definite. +/// +/// There are two modes of this solver, specified by the template parameter +/// GEigsMode. See the pages for the specialized classes for details. +/// - The Cholesky mode assumes that \f$B\f$ can be factorized using Cholesky +/// decomposition, which is the preferred mode when the decomposition is +/// available. (This can be easily done in Eigen using the dense or sparse +/// Cholesky solver.) +/// See \ref SymGEigsSolver "SymGEigsSolver (Cholesky mode)" for more details. +/// - The regular inverse mode requires the matrix-vector product \f$Bv\f$ and the +/// linear equation solving operation \f$B^{-1}v\f$. This mode should only be +/// used when the Cholesky decomposition of \f$B\f$ is hard to implement, or +/// when computing \f$B^{-1}v\f$ is much faster than the Cholesky decomposition. +/// See \ref SymGEigsSolver "SymGEigsSolver (Regular inverse mode)" for more details. + +// Empty class template +template +class SymGEigsSolver +{}; + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices using Cholesky decomposition, i.e., to solve \f$Ax=\lambda Bx\f$ +/// where \f$A\f$ is symmetric and \f$B\f$ is positive definite with the Cholesky +/// decomposition \f$B=LL'\f$. +/// +/// This solver requires two matrix operation objects: one for \f$A\f$ that implements +/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the lower +/// and upper triangular solving \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$. +/// +/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation +/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and +/// the second operation can be created using the DenseCholesky or SparseCholesky +/// classes. If the users need to define their own operation classes, then they +/// should implement all the public member functions as in those built-in classes. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either +/// use the wrapper classes such as DenseCholesky and +/// SparseCholesky, or define their +/// own that implements all the public member functions as in +/// DenseCholesky. +/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver +/// it is Spectra::GEIGS_CHOLESKY. +/// +/// Below is an example that demonstrates the usage of this class. +/// +/// \code{.cpp} +/// #include +/// #include +/// #include +/// #include +/// #include +/// #include +/// #include +/// +/// using namespace Spectra; +/// +/// int main() +/// { +/// // We are going to solve the generalized eigenvalue problem A * x = lambda * B * x +/// const int n = 100; +/// +/// // Define the A matrix +/// Eigen::MatrixXd M = Eigen::MatrixXd::Random(n, n); +/// Eigen::MatrixXd A = M + M.transpose(); +/// +/// // Define the B matrix, a band matrix with 2 on the diagonal and 1 on the subdiagonals +/// Eigen::SparseMatrix B(n, n); +/// B.reserve(Eigen::VectorXi::Constant(n, 3)); +/// for(int i = 0; i < n; i++) +/// { +/// B.insert(i, i) = 2.0; +/// if(i > 0) +/// B.insert(i - 1, i) = 1.0; +/// if(i < n - 1) +/// B.insert(i + 1, i) = 1.0; +/// } +/// +/// // Construct matrix operation object using the wrapper classes +/// DenseSymMatProd op(A); +/// SparseCholesky Bop(B); +/// +/// // Construct generalized eigen solver object, requesting the largest three generalized eigenvalues +/// SymGEigsSolver, SparseCholesky, GEIGS_CHOLESKY> +/// geigs(&op, &Bop, 3, 6); +/// +/// // Initialize and compute +/// geigs.init(); +/// int nconv = geigs.compute(); +/// +/// // Retrieve results +/// Eigen::VectorXd evalues; +/// Eigen::MatrixXd evecs; +/// if(geigs.info() == SUCCESSFUL) +/// { +/// evalues = geigs.eigenvalues(); +/// evecs = geigs.eigenvectors(); +/// } +/// +/// std::cout << "Generalized eigenvalues found:\n" << evalues << std::endl; +/// std::cout << "Generalized eigenvectors found:\n" << evecs.topRows(10) << std::endl; +/// +/// // Verify results using the generalized eigen solver in Eigen +/// Eigen::MatrixXd Bdense = B; +/// Eigen::GeneralizedSelfAdjointEigenSolver es(A, Bdense); +/// +/// std::cout << "Generalized eigenvalues:\n" << es.eigenvalues().tail(3) << std::endl; +/// std::cout << "Generalized eigenvectors:\n" << es.eigenvectors().rightCols(3).topRows(10) << std::endl; +/// +/// return 0; +/// } +/// \endcode + +// Partial specialization for GEigsMode = GEIGS_CHOLESKY +template +class SymGEigsSolver : + public SymEigsBase, IdentityBOp> +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + BOpType* m_Bop; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. It + /// should implement the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper classes such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. It + /// represents a Cholesky decomposition of \f$B\f$, and should + /// implement the lower and upper triangular solving operations: + /// calculating \f$L^{-1}v\f$ and \f$(L')^{-1}v\f$ for any vector + /// \f$v\f$, where \f$LL'=B\f$. Users could either + /// create the object from the wrapper classes such as DenseCholesky, or + /// define their own that implements all the public member functions + /// as in DenseCholesky. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) : + SymEigsBase, IdentityBOp>( + new SymGEigsCholeskyOp(*op, *Bop), NULL, nev, ncv), + m_Bop(Bop) + {} + + /// \cond + + ~SymGEigsSolver() + { + // m_op contains the constructed SymGEigsCholeskyOp object + delete this->m_op; + } + + Matrix eigenvectors(Index nvec) const + { + Matrix res = SymEigsBase, IdentityBOp>::eigenvectors(nvec); + Vector tmp(res.rows()); + const Index nconv = res.cols(); + for (Index i = 0; i < nconv; i++) + { + m_Bop->upper_triangular_solve(&res(0, i), tmp.data()); + res.col(i).noalias() = tmp; + } + + return res; + } + + Matrix eigenvectors() const + { + return SymGEigsSolver::eigenvectors(this->m_nev); + } + + /// \endcond +}; + +/// +/// \ingroup GEigenSolver +/// +/// This class implements the generalized eigen solver for real symmetric +/// matrices in the regular inverse mode, i.e., to solve \f$Ax=\lambda Bx\f$ +/// where \f$A\f$ is symmetric, and \f$B\f$ is positive definite with the operations +/// defined below. +/// +/// This solver requires two matrix operation objects: one for \f$A\f$ that implements +/// the matrix multiplication \f$Av\f$, and one for \f$B\f$ that implements the +/// matrix-vector product \f$Bv\f$ and the linear equation solving operation \f$B^{-1}v\f$. +/// +/// If \f$A\f$ and \f$B\f$ are stored as Eigen matrices, then the first operation +/// can be created using the DenseSymMatProd or SparseSymMatProd classes, and +/// the second operation can be created using the SparseRegularInverse class. There is no +/// wrapper class for a dense \f$B\f$ matrix since in this case the Cholesky mode +/// is always preferred. If the users need to define their own operation classes, then they +/// should implement all the public member functions as in those built-in classes. +/// +/// \tparam Scalar The element type of the matrix. +/// Currently supported types are `float`, `double` and `long double`. +/// \tparam SelectionRule An enumeration value indicating the selection rule of +/// the requested eigenvalues, for example `LARGEST_MAGN` +/// to retrieve eigenvalues with the largest magnitude. +/// The full list of enumeration values can be found in +/// \ref Enumerations. +/// \tparam OpType The name of the matrix operation class for \f$A\f$. Users could either +/// use the wrapper classes such as DenseSymMatProd and +/// SparseSymMatProd, or define their +/// own that implements all the public member functions as in +/// DenseSymMatProd. +/// \tparam BOpType The name of the matrix operation class for \f$B\f$. Users could either +/// use the wrapper class SparseRegularInverse, or define their +/// own that implements all the public member functions as in +/// SparseRegularInverse. +/// \tparam GEigsMode Mode of the generalized eigen solver. In this solver +/// it is Spectra::GEIGS_REGULAR_INVERSE. +/// + +// Partial specialization for GEigsMode = GEIGS_REGULAR_INVERSE +template +class SymGEigsSolver : + public SymEigsBase, BOpType> +{ +private: + typedef Eigen::Index Index; + +public: + /// + /// Constructor to create a solver object. + /// + /// \param op Pointer to the \f$A\f$ matrix operation object. It + /// should implement the matrix-vector multiplication operation of \f$A\f$: + /// calculating \f$Av\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper classes such as DenseSymMatProd, or + /// define their own that implements all the public member functions + /// as in DenseSymMatProd. + /// \param Bop Pointer to the \f$B\f$ matrix operation object. It should + /// implement the multiplication operation \f$Bv\f$ and the linear equation + /// solving operation \f$B^{-1}v\f$ for any vector \f$v\f$. Users could either + /// create the object from the wrapper class SparseRegularInverse, or + /// define their own that implements all the public member functions + /// as in SparseRegularInverse. + /// \param nev Number of eigenvalues requested. This should satisfy \f$1\le nev \le n-1\f$, + /// where \f$n\f$ is the size of matrix. + /// \param ncv Parameter that controls the convergence speed of the algorithm. + /// Typically a larger `ncv` means faster convergence, but it may + /// also result in greater memory use and more matrix operations + /// in each iteration. This parameter must satisfy \f$nev < ncv \le n\f$, + /// and is advised to take \f$ncv \ge 2\cdot nev\f$. + /// + SymGEigsSolver(OpType* op, BOpType* Bop, Index nev, Index ncv) : + SymEigsBase, BOpType>( + new SymGEigsRegInvOp(*op, *Bop), Bop, nev, ncv) + {} + + /// \cond + ~SymGEigsSolver() + { + // m_op contains the constructed SymGEigsRegInvOp object + delete this->m_op; + } + /// \endcond +}; + +} // namespace Spectra + +#endif // SYM_GEIGS_SOLVER_H diff --git a/gtsam/3rdparty/Spectra/Util/CompInfo.h b/gtsam/3rdparty/Spectra/Util/CompInfo.h new file mode 100644 index 000000000..07b8399a1 --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/CompInfo.h @@ -0,0 +1,35 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef COMP_INFO_H +#define COMP_INFO_H + +namespace Spectra { + +/// +/// \ingroup Enumerations +/// +/// The enumeration to report the status of computation. +/// +enum COMPUTATION_INFO +{ + SUCCESSFUL = 0, ///< Computation was successful. + + NOT_COMPUTED, ///< Used in eigen solvers, indicating that computation + ///< has not been conducted. Users should call + ///< the `compute()` member function of solvers. + + NOT_CONVERGING, ///< Used in eigen solvers, indicating that some eigenvalues + ///< did not converge. The `compute()` + ///< function returns the number of converged eigenvalues. + + NUMERICAL_ISSUE ///< Used in Cholesky decomposition, indicating that the + ///< matrix is not positive definite. +}; + +} // namespace Spectra + +#endif // COMP_INFO_H diff --git a/gtsam/3rdparty/Spectra/Util/GEigsMode.h b/gtsam/3rdparty/Spectra/Util/GEigsMode.h new file mode 100644 index 000000000..a547ac0bf --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/GEigsMode.h @@ -0,0 +1,32 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef GEIGS_MODE_H +#define GEIGS_MODE_H + +namespace Spectra { + +/// +/// \ingroup Enumerations +/// +/// The enumeration to specify the mode of generalized eigenvalue solver. +/// +enum GEIGS_MODE +{ + GEIGS_CHOLESKY = 0, ///< Using Cholesky decomposition to solve generalized eigenvalues. + + GEIGS_REGULAR_INVERSE, ///< Regular inverse mode for generalized eigenvalue solver. + + GEIGS_SHIFT_INVERT, ///< Shift-and-invert mode for generalized eigenvalue solver. + + GEIGS_BUCKLING, ///< Buckling mode for generalized eigenvalue solver. + + GEIGS_CAYLEY ///< Cayley transformation mode for generalized eigenvalue solver. +}; + +} // namespace Spectra + +#endif // GEIGS_MODE_H diff --git a/gtsam/3rdparty/Spectra/Util/SelectionRule.h b/gtsam/3rdparty/Spectra/Util/SelectionRule.h new file mode 100644 index 000000000..237950b4d --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/SelectionRule.h @@ -0,0 +1,275 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SELECTION_RULE_H +#define SELECTION_RULE_H + +#include // std::vector +#include // std::abs +#include // std::sort +#include // std::complex +#include // std::pair +#include // std::invalid_argument + +namespace Spectra { + +/// +/// \defgroup Enumerations +/// +/// Enumeration types for the selection rule of eigenvalues. +/// + +/// +/// \ingroup Enumerations +/// +/// The enumeration of selection rules of desired eigenvalues. +/// +enum SELECT_EIGENVALUE +{ + LARGEST_MAGN = 0, ///< Select eigenvalues with largest magnitude. Magnitude + ///< means the absolute value for real numbers and norm for + ///< complex numbers. Applies to both symmetric and general + ///< eigen solvers. + + LARGEST_REAL, ///< Select eigenvalues with largest real part. Only for general eigen solvers. + + LARGEST_IMAG, ///< Select eigenvalues with largest imaginary part (in magnitude). Only for general eigen solvers. + + LARGEST_ALGE, ///< Select eigenvalues with largest algebraic value, considering + ///< any negative sign. Only for symmetric eigen solvers. + + SMALLEST_MAGN, ///< Select eigenvalues with smallest magnitude. Applies to both symmetric and general + ///< eigen solvers. + + SMALLEST_REAL, ///< Select eigenvalues with smallest real part. Only for general eigen solvers. + + SMALLEST_IMAG, ///< Select eigenvalues with smallest imaginary part (in magnitude). Only for general eigen solvers. + + SMALLEST_ALGE, ///< Select eigenvalues with smallest algebraic value. Only for symmetric eigen solvers. + + BOTH_ENDS ///< Select eigenvalues half from each end of the spectrum. When + ///< `nev` is odd, compute more from the high end. Only for symmetric eigen solvers. +}; + +/// +/// \ingroup Enumerations +/// +/// The enumeration of selection rules of desired eigenvalues. Alias for `SELECT_EIGENVALUE`. +/// +enum SELECT_EIGENVALUE_ALIAS +{ + WHICH_LM = 0, ///< Alias for `LARGEST_MAGN` + WHICH_LR, ///< Alias for `LARGEST_REAL` + WHICH_LI, ///< Alias for `LARGEST_IMAG` + WHICH_LA, ///< Alias for `LARGEST_ALGE` + WHICH_SM, ///< Alias for `SMALLEST_MAGN` + WHICH_SR, ///< Alias for `SMALLEST_REAL` + WHICH_SI, ///< Alias for `SMALLEST_IMAG` + WHICH_SA, ///< Alias for `SMALLEST_ALGE` + WHICH_BE ///< Alias for `BOTH_ENDS` +}; + +/// \cond + +// Get the element type of a "scalar" +// ElemType => double +// ElemType< std::complex > => double +template +class ElemType +{ +public: + typedef T type; +}; + +template +class ElemType > +{ +public: + typedef T type; +}; + +// When comparing eigenvalues, we first calculate the "target" +// to sort. For example, if we want to choose the eigenvalues with +// largest magnitude, the target will be -abs(x). +// The minus sign is due to the fact that std::sort() sorts in ascending order. + +// Default target: throw an exception +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + throw std::invalid_argument("incompatible selection rule"); + return -abs(val); + } +}; + +// Specialization for LARGEST_MAGN +// This covers [float, double, complex] x [LARGEST_MAGN] +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + return -abs(val); + } +}; + +// Specialization for LARGEST_REAL +// This covers [complex] x [LARGEST_REAL] +template +class SortingTarget, LARGEST_REAL> +{ +public: + static RealType get(const std::complex& val) + { + return -val.real(); + } +}; + +// Specialization for LARGEST_IMAG +// This covers [complex] x [LARGEST_IMAG] +template +class SortingTarget, LARGEST_IMAG> +{ +public: + static RealType get(const std::complex& val) + { + using std::abs; + return -abs(val.imag()); + } +}; + +// Specialization for LARGEST_ALGE +// This covers [float, double] x [LARGEST_ALGE] +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return -val; + } +}; + +// Here BOTH_ENDS is the same as LARGEST_ALGE, but +// we need some additional steps, which are done in +// SymEigsSolver.h => retrieve_ritzpair(). +// There we move the smallest values to the proper locations. +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return -val; + } +}; + +// Specialization for SMALLEST_MAGN +// This covers [float, double, complex] x [SMALLEST_MAGN] +template +class SortingTarget +{ +public: + static typename ElemType::type get(const Scalar& val) + { + using std::abs; + return abs(val); + } +}; + +// Specialization for SMALLEST_REAL +// This covers [complex] x [SMALLEST_REAL] +template +class SortingTarget, SMALLEST_REAL> +{ +public: + static RealType get(const std::complex& val) + { + return val.real(); + } +}; + +// Specialization for SMALLEST_IMAG +// This covers [complex] x [SMALLEST_IMAG] +template +class SortingTarget, SMALLEST_IMAG> +{ +public: + static RealType get(const std::complex& val) + { + using std::abs; + return abs(val.imag()); + } +}; + +// Specialization for SMALLEST_ALGE +// This covers [float, double] x [SMALLEST_ALGE] +template +class SortingTarget +{ +public: + static Scalar get(const Scalar& val) + { + return val; + } +}; + +// Sort eigenvalues and return the order index +template +class PairComparator +{ +public: + bool operator()(const PairType& v1, const PairType& v2) + { + return v1.first < v2.first; + } +}; + +template +class SortEigenvalue +{ +private: + typedef typename ElemType::type TargetType; // Type of the sorting target, will be + // a floating number type, e.g. "double" + typedef std::pair PairType; // Type of the sorting pair, including + // the sorting target and the index + + std::vector pair_sort; + +public: + SortEigenvalue(const T* start, int size) : + pair_sort(size) + { + for (int i = 0; i < size; i++) + { + pair_sort[i].first = SortingTarget::get(start[i]); + pair_sort[i].second = i; + } + PairComparator comp; + std::sort(pair_sort.begin(), pair_sort.end(), comp); + } + + std::vector index() + { + std::vector ind(pair_sort.size()); + for (unsigned int i = 0; i < ind.size(); i++) + ind[i] = pair_sort[i].second; + + return ind; + } +}; + +/// \endcond + +} // namespace Spectra + +#endif // SELECTION_RULE_H diff --git a/gtsam/3rdparty/Spectra/Util/SimpleRandom.h b/gtsam/3rdparty/Spectra/Util/SimpleRandom.h new file mode 100644 index 000000000..83fa7c86f --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/SimpleRandom.h @@ -0,0 +1,91 @@ +// Copyright (C) 2016-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef SIMPLE_RANDOM_H +#define SIMPLE_RANDOM_H + +#include + +/// \cond + +namespace Spectra { + +// We need a simple pseudo random number generator here: +// 1. It is used to generate initial and restarted residual vector. +// 2. It is not necessary to be so "random" and advanced. All we hope +// is that the residual vector is not in the space spanned by the +// current Krylov space. This should be met almost surely. +// 3. We don't want to call RNG in C++, since we actually want the +// algorithm to be deterministic. Also, calling RNG in C/C++ is not +// allowed in R packages submitted to CRAN. +// 4. The method should be as simple as possible, so an LCG is enough. +// 5. Based on public domain code by Ray Gardner +// http://stjarnhimlen.se/snippets/rg_rand.c + +template +class SimpleRandom +{ +private: + typedef Eigen::Index Index; + typedef Eigen::Matrix Vector; + + const unsigned int m_a; // multiplier + const unsigned long m_max; // 2^31 - 1 + long m_rand; + + inline long next_long_rand(long seed) + { + unsigned long lo, hi; + + lo = m_a * (long) (seed & 0xFFFF); + hi = m_a * (long) ((unsigned long) seed >> 16); + lo += (hi & 0x7FFF) << 16; + if (lo > m_max) + { + lo &= m_max; + ++lo; + } + lo += hi >> 15; + if (lo > m_max) + { + lo &= m_max; + ++lo; + } + return (long) lo; + } + +public: + SimpleRandom(unsigned long init_seed) : + m_a(16807), + m_max(2147483647L), + m_rand(init_seed ? (init_seed & m_max) : 1) + {} + + Scalar random() + { + m_rand = next_long_rand(m_rand); + return Scalar(m_rand) / Scalar(m_max) - Scalar(0.5); + } + + // Vector of random numbers of type Scalar + // Ranging from -0.5 to 0.5 + Vector random_vec(const Index len) + { + Vector res(len); + for (Index i = 0; i < len; i++) + { + m_rand = next_long_rand(m_rand); + res[i] = Scalar(m_rand) / Scalar(m_max) - Scalar(0.5); + } + return res; + } +}; + +} // namespace Spectra + +/// \endcond + +#endif // SIMPLE_RANDOM_H diff --git a/gtsam/3rdparty/Spectra/Util/TypeTraits.h b/gtsam/3rdparty/Spectra/Util/TypeTraits.h new file mode 100644 index 000000000..29288c5a6 --- /dev/null +++ b/gtsam/3rdparty/Spectra/Util/TypeTraits.h @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2019 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef TYPE_TRAITS_H +#define TYPE_TRAITS_H + +#include +#include + +/// \cond + +namespace Spectra { + +// For a real value type "Scalar", we want to know its smallest +// positive value, i.e., std::numeric_limits::min(). +// However, we must take non-standard value types into account, +// so we rely on Eigen::NumTraits. +// +// Eigen::NumTraits has defined epsilon() and lowest(), but +// lowest() means negative highest(), which is a very small +// negative value. +// +// Therefore, we manually define this limit, and use eplison()^3 +// to mimic it for non-standard types. + +// Generic definition +template +struct TypeTraits +{ + static inline Scalar min() + { + return Eigen::numext::pow(Eigen::NumTraits::epsilon(), Scalar(3)); + } +}; + +// Full specialization +template <> +struct TypeTraits +{ + static inline float min() + { + return std::numeric_limits::min(); + } +}; + +template <> +struct TypeTraits +{ + static inline double min() + { + return std::numeric_limits::min(); + } +}; + +template <> +struct TypeTraits +{ + static inline long double min() + { + return std::numeric_limits::min(); + } +}; + +} // namespace Spectra + +/// \endcond + +#endif // TYPE_TRAITS_H diff --git a/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h b/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h new file mode 100644 index 000000000..69c4d92c0 --- /dev/null +++ b/gtsam/3rdparty/Spectra/contrib/LOBPCGSolver.h @@ -0,0 +1,552 @@ +// Written by Anna Araslanova +// Modified by Yixuan Qiu +// License: MIT + +#ifndef LOBPCG_SOLVER +#define LOBPCG_SOLVER + +#include +#include + +#include +#include +#include +#include +#include + +#include "../SymGEigsSolver.h" + +namespace Spectra { + +/// +/// \ingroup EigenSolver +/// + +/// *** METHOD +/// The class represent the LOBPCG algorithm, which was invented by Andrew Knyazev +/// Theoretical background of the procedure can be found in the articles below +/// - Knyazev, A.V., 2001. Toward the optimal preconditioned eigensolver : Locally optimal block preconditioned conjugate gradient method.SIAM journal on scientific computing, 23(2), pp.517 - 541. +/// - Knyazev, A.V., Argentati, M.E., Lashuk, I. and Ovtchinnikov, E.E., 2007. Block locally optimal preconditioned eigenvalue xolvers(BLOPEX) in HYPRE and PETSc.SIAM Journal on Scientific Computing, 29(5), pp.2224 - 2239. +/// +/// *** CONDITIONS OF USE +/// Locally Optimal Block Preconditioned Conjugate Gradient(LOBPCG) is a method for finding the M smallest eigenvalues +/// and eigenvectors of a large symmetric positive definite generalized eigenvalue problem +/// \f$Ax=\lambda Bx,\f$ +/// where \f$A_{NxN}\f$ is a symmetric matrix, \f$B\f$ is symmetric and positive - definite. \f$A and B\f$ are also assumed large and sparse +/// \f$\textit{M}\f$ should be \f$\<< textit{N}\f$ (at least \f$\textit{5M} < \textit{N} \f$) +/// +/// *** ARGUMENTS +/// Eigen::SparseMatrix A; // N*N - Ax = lambda*Bx, lrage and sparse +/// Eigen::SparseMatrix X; // N*M - initial approximations to eigenvectors (random in general case) +/// Spectra::LOBPCGSolver solver(A, X); +/// *Eigen::SparseMatrix B; // N*N - Ax = lambda*Bx, sparse, positive definite +/// solver.setConstraints(B); +/// *Eigen::SparseMatrix Y; // N*K - constraints, already found eigenvectors +/// solver.setB(B); +/// *Eigen::SparseMatrix T; // N*N - preconditioner ~ A^-1 +/// solver.setPreconditioner(T); +/// +/// *** OUTCOMES +/// solver.solve(); // compute eigenpairs // void +/// solver.info(); // state of converjance // int +/// solver.residuals(); // get residuals to evaluate biases // Eigen::Matrix +/// solver.eigenvalues(); // get eigenvalues // Eigen::Matrix +/// solver.eigenvectors(); // get eigenvectors // Eigen::Matrix +/// +/// *** EXAMPLE +/// \code{.cpp} +/// #include +/// +/// // random A +/// Matrix a; +/// a = (Matrix::Random(10, 10).array() > 0.6).cast() * Matrix::Random(10, 10).array() * 5; +/// a = Matrix((a).triangularView()) + Matrix((a).triangularView()).transpose(); +/// for (int i = 0; i < 10; i++) +/// a(i, i) = i + 0.5; +/// std::cout << a << "\n"; +/// Eigen::SparseMatrix A(a.sparseView()); +/// // random X +/// Eigen::Matrix x; +/// x = Matrix::Random(10, 2).array(); +/// Eigen::SparseMatrix X(x.sparseView()); +/// // solve Ax = lambda*x +/// Spectra::LOBPCGSolver solver(A, X); +/// solver.compute(10, 1e-4); // 10 iterations, L2_tolerance = 1e-4*N +/// std::cout << "info\n" << solver.info() << std::endl; +/// std::cout << "eigenvalues\n" << solver.eigenvalues() << std::endl; +/// std::cout << "eigenvectors\n" << solver.eigenvectors() << std::endl; +/// std::cout << "residuals\n" << solver.residuals() << std::endl; +/// \endcode +/// + +template +class LOBPCGSolver +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + + typedef std::complex Complex; + typedef Eigen::Matrix ComplexMatrix; + typedef Eigen::Matrix ComplexVector; + + typedef Eigen::SparseMatrix SparseMatrix; + typedef Eigen::SparseMatrix SparseComplexMatrix; + + const int m_n; // dimension of matrix A + const int m_nev; // number of eigenvalues requested + SparseMatrix A, X; + SparseMatrix m_Y, m_B, m_preconditioner; + bool flag_with_constraints, flag_with_B, flag_with_preconditioner; + +public: + SparseMatrix m_residuals; + Matrix m_evectors; + Vector m_evalues; + int m_info; + +private: + // B-orthonormalize matrix M + int orthogonalizeInPlace(SparseMatrix& M, SparseMatrix& B, + SparseMatrix& true_BM, bool has_true_BM = false) + { + SparseMatrix BM; + + if (has_true_BM == false) + { + if (flag_with_B) + { + BM = B * M; + } + else + { + BM = M; + } + } + else + { + BM = true_BM; + } + + Eigen::SimplicialLDLT chol_MBM(M.transpose() * BM); + + if (chol_MBM.info() != SUCCESSFUL) + { + // LDLT decomposition fail + m_info = chol_MBM.info(); + return chol_MBM.info(); + } + + SparseComplexMatrix Upper_MBM = chol_MBM.matrixU().template cast(); + ComplexVector D_MBM_vec = chol_MBM.vectorD().template cast(); + + D_MBM_vec = D_MBM_vec.cwiseSqrt(); + + for (int i = 0; i < D_MBM_vec.rows(); i++) + { + D_MBM_vec(i) = Complex(1.0, 0.0) / D_MBM_vec(i); + } + + SparseComplexMatrix D_MBM_mat(D_MBM_vec.asDiagonal()); + + SparseComplexMatrix U_inv(Upper_MBM.rows(), Upper_MBM.cols()); + U_inv.setIdentity(); + Upper_MBM.template triangularView().solveInPlace(U_inv); + + SparseComplexMatrix right_product = U_inv * D_MBM_mat; + M = M * right_product.real(); + if (flag_with_B) + { + true_BM = B * M; + } + else + { + true_BM = M; + } + + return SUCCESSFUL; + } + + void applyConstraintsInPlace(SparseMatrix& X, SparseMatrix& Y, + SparseMatrix& B) + { + SparseMatrix BY; + if (flag_with_B) + { + BY = B * Y; + } + else + { + BY = Y; + } + + SparseMatrix YBY = Y.transpose() * BY; + SparseMatrix BYX = BY.transpose() * X; + + SparseMatrix YBY_XYX = (Matrix(YBY).bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Matrix(BYX))).sparseView(); + X = X - Y * YBY_XYX; + } + + /* + return + 'AB + CD' + */ + Matrix stack_4_matricies(Matrix A, Matrix B, + Matrix C, Matrix D) + { + Matrix result(A.rows() + C.rows(), A.cols() + B.cols()); + result.topLeftCorner(A.rows(), A.cols()) = A; + result.topRightCorner(B.rows(), B.cols()) = B; + result.bottomLeftCorner(C.rows(), C.cols()) = C; + result.bottomRightCorner(D.rows(), D.cols()) = D; + return result; + } + + Matrix stack_9_matricies(Matrix A, Matrix B, Matrix C, + Matrix D, Matrix E, Matrix F, + Matrix G, Matrix H, Matrix I) + { + Matrix result(A.rows() + D.rows() + G.rows(), A.cols() + B.cols() + C.cols()); + result.block(0, 0, A.rows(), A.cols()) = A; + result.block(0, A.cols(), B.rows(), B.cols()) = B; + result.block(0, A.cols() + B.cols(), C.rows(), C.cols()) = C; + result.block(A.rows(), 0, D.rows(), D.cols()) = D; + result.block(A.rows(), A.cols(), E.rows(), E.cols()) = E; + result.block(A.rows(), A.cols() + B.cols(), F.rows(), F.cols()) = F; + result.block(A.rows() + D.rows(), 0, G.rows(), G.cols()) = G; + result.block(A.rows() + D.rows(), A.cols(), H.rows(), H.cols()) = H; + result.block(A.rows() + D.rows(), A.cols() + B.cols(), I.rows(), I.cols()) = I; + + return result; + } + + void sort_epairs(Vector& evalues, Matrix& evectors, int SelectionRule) + { + std::function cmp; + if (SelectionRule == SMALLEST_ALGE) + cmp = std::less{}; + else + cmp = std::greater{}; + + std::map epairs(cmp); + for (int i = 0; i < m_evectors.cols(); ++i) + epairs.insert(std::make_pair(evalues(i), evectors.col(i))); + + int i = 0; + for (auto& epair : epairs) + { + evectors.col(i) = epair.second; + evalues(i) = epair.first; + i++; + } + } + + void removeColumns(SparseMatrix& matrix, std::vector& colToRemove) + { + // remove columns through matrix multiplication + SparseMatrix new_matrix(matrix.cols(), matrix.cols() - int(colToRemove.size())); + int iCol = 0; + std::vector> tripletList; + tripletList.reserve(matrix.cols() - int(colToRemove.size())); + + for (int iRow = 0; iRow < matrix.cols(); iRow++) + { + if (std::find(colToRemove.begin(), colToRemove.end(), iRow) == colToRemove.end()) + { + tripletList.push_back(Eigen::Triplet(iRow, iCol, 1)); + iCol++; + } + } + + new_matrix.setFromTriplets(tripletList.begin(), tripletList.end()); + matrix = matrix * new_matrix; + } + + int checkConvergence_getBlocksize(SparseMatrix& m_residuals, Scalar tolerance_L2, std::vector& columnsToDelete) + { + // square roots from sum of squares by column + int BlockSize = m_nev; + Scalar sum, buffer; + + for (int iCol = 0; iCol < m_nev; iCol++) + { + sum = 0; + for (int iRow = 0; iRow < m_n; iRow++) + { + buffer = m_residuals.coeff(iRow, iCol); + sum += buffer * buffer; + } + + if (sqrt(sum) < tolerance_L2) + { + BlockSize--; + columnsToDelete.push_back(iCol); + } + } + return BlockSize; + } + +public: + LOBPCGSolver(const SparseMatrix& A, const SparseMatrix X) : + m_n(A.rows()), + m_nev(X.cols()), + m_info(NOT_COMPUTED), + flag_with_constraints(false), + flag_with_B(false), + flag_with_preconditioner(false), + A(A), + X(X) + { + if (A.rows() != X.rows() || A.rows() != A.cols()) + throw std::invalid_argument("Wrong size"); + + //if (m_n < 5* m_nev) + // throw std::invalid_argument("The problem size is small compared to the block size. Use standard eigensolver"); + } + + void setConstraints(const SparseMatrix& Y) + { + m_Y = Y; + flag_with_constraints = true; + } + + void setB(const SparseMatrix& B) + { + if (B.rows() != A.rows() || B.cols() != A.cols()) + throw std::invalid_argument("Wrong size"); + m_B = B; + flag_with_B = true; + } + + void setPreconditioner(const SparseMatrix& preconditioner) + { + m_preconditioner = preconditioner; + flag_with_preconditioner = true; + } + + void compute(int maxit = 10, Scalar tol_div_n = 1e-7) + { + Scalar tolerance_L2 = tol_div_n * m_n; + int BlockSize; + int max_iter = std::min(m_n, maxit); + + SparseMatrix directions, AX, AR, BX, AD, ADD, DD, BDD, BD, XAD, RAD, DAD, XBD, RBD, BR, sparse_eVecX, sparse_eVecR, sparse_eVecD, inverse_matrix; + Matrix XAR, RAR, XBR, gramA, gramB, eVecX, eVecR, eVecD; + std::vector columnsToDelete; + + if (flag_with_constraints) + { + // Apply the constraints Y to X + applyConstraintsInPlace(X, m_Y, m_B); + } + + // Make initial vectors orthonormal + // implicit BX declaration + if (orthogonalizeInPlace(X, m_B, BX) != SUCCESSFUL) + { + max_iter = 0; + } + + AX = A * X; + // Solve the following NxN eigenvalue problem for all N eigenvalues and -vectors: + // first approximation via a dense problem + Eigen::EigenSolver eigs(Matrix(X.transpose() * AX)); + + if (eigs.info() != SUCCESSFUL) + { + m_info = eigs.info(); + max_iter = 0; + } + else + { + m_evalues = eigs.eigenvalues().real(); + m_evectors = eigs.eigenvectors().real(); + sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE); + sparse_eVecX = m_evectors.sparseView(); + + X = X * sparse_eVecX; + AX = AX * sparse_eVecX; + BX = BX * sparse_eVecX; + } + + for (int iter_num = 0; iter_num < max_iter; iter_num++) + { + m_residuals.resize(m_n, m_nev); + for (int i = 0; i < m_nev; i++) + { + m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i); + } + BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete); + + if (BlockSize == 0) + { + m_info = SUCCESSFUL; + break; + } + + // substitution of the original active mask + if (columnsToDelete.size() > 0) + { + removeColumns(m_residuals, columnsToDelete); + if (iter_num > 0) + { + removeColumns(directions, columnsToDelete); + removeColumns(AD, columnsToDelete); + removeColumns(BD, columnsToDelete); + } + columnsToDelete.clear(); // for next iteration + } + + if (flag_with_preconditioner) + { + // Apply the preconditioner to the residuals + m_residuals = m_preconditioner * m_residuals; + } + + if (flag_with_constraints) + { + // Apply the constraints Y to residuals + applyConstraintsInPlace(m_residuals, m_Y, m_B); + } + + if (orthogonalizeInPlace(m_residuals, m_B, BR) != SUCCESSFUL) + { + break; + } + AR = A * m_residuals; + + // Orthonormalize conjugate directions + if (iter_num > 0) + { + if (orthogonalizeInPlace(directions, m_B, BD, true) != SUCCESSFUL) + { + break; + } + AD = A * directions; + } + + // Perform the Rayleigh Ritz Procedure + XAR = Matrix(X.transpose() * AR); + RAR = Matrix(m_residuals.transpose() * AR); + XBR = Matrix(X.transpose() * BR); + + if (iter_num > 0) + { + XAD = X.transpose() * AD; + RAD = m_residuals.transpose() * AD; + DAD = directions.transpose() * AD; + XBD = X.transpose() * BD; + RBD = m_residuals.transpose() * BD; + + gramA = stack_9_matricies(m_evalues.asDiagonal(), XAR, XAD, XAR.transpose(), RAR, RAD, XAD.transpose(), RAD.transpose(), DAD.transpose()); + gramB = stack_9_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBD, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize), RBD, XBD.transpose(), RBD.transpose(), Matrix::Identity(BlockSize, BlockSize)); + } + else + { + gramA = stack_4_matricies(m_evalues.asDiagonal(), XAR, XAR.transpose(), RAR); + gramB = stack_4_matricies(Matrix::Identity(m_nev, m_nev), XBR, XBR.transpose(), Matrix::Identity(BlockSize, BlockSize)); + } + + //calculate the lowest/largest m eigenpairs; Solve the generalized eigenvalue problem. + DenseSymMatProd Aop(gramA); + DenseCholesky Bop(gramB); + + SymGEigsSolver, + DenseCholesky, GEIGS_CHOLESKY> + geigs(&Aop, &Bop, m_nev, std::min(10, int(gramA.rows()) - 1)); + + geigs.init(); + int nconv = geigs.compute(); + + //Mat evecs; + if (geigs.info() == SUCCESSFUL) + { + m_evalues = geigs.eigenvalues(); + m_evectors = geigs.eigenvectors(); + sort_epairs(m_evalues, m_evectors, SMALLEST_ALGE); + } + else + { + // Problem With General EgenVec + m_info = geigs.info(); + break; + } + + // Compute Ritz vectors + if (iter_num > 0) + { + eVecX = m_evectors.block(0, 0, m_nev, m_nev); + eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev); + eVecD = m_evectors.block(m_nev + BlockSize, 0, BlockSize, m_nev); + + sparse_eVecX = eVecX.sparseView(); + sparse_eVecR = eVecR.sparseView(); + sparse_eVecD = eVecD.sparseView(); + + DD = m_residuals * sparse_eVecR; // new conjugate directions + ADD = AR * sparse_eVecR; + BDD = BR * sparse_eVecR; + + DD = DD + directions * sparse_eVecD; + ADD = ADD + AD * sparse_eVecD; + BDD = BDD + BD * sparse_eVecD; + } + else + { + eVecX = m_evectors.block(0, 0, m_nev, m_nev); + eVecR = m_evectors.block(m_nev, 0, BlockSize, m_nev); + + sparse_eVecX = eVecX.sparseView(); + sparse_eVecR = eVecR.sparseView(); + + DD = m_residuals * sparse_eVecR; + ADD = AR * sparse_eVecR; + BDD = BR * sparse_eVecR; + } + + X = X * sparse_eVecX + DD; + AX = AX * sparse_eVecX + ADD; + BX = BX * sparse_eVecX + BDD; + + directions = DD; + AD = ADD; + BD = BDD; + + } // iteration loop + + // calculate last residuals + m_residuals.resize(m_n, m_nev); + for (int i = 0; i < m_nev; i++) + { + m_residuals.col(i) = AX.col(i) - m_evalues(i) * BX.col(i); + } + BlockSize = checkConvergence_getBlocksize(m_residuals, tolerance_L2, columnsToDelete); + + if (BlockSize == 0) + { + m_info = SUCCESSFUL; + } + } // compute + + Vector eigenvalues() + { + return m_evalues; + } + + Matrix eigenvectors() + { + return m_evectors; + } + + Matrix residuals() + { + return Matrix(m_residuals); + } + + int info() { return m_info; } +}; + +} // namespace Spectra + +#endif // LOBPCG_SOLVER diff --git a/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h b/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h new file mode 100644 index 000000000..2fab26b97 --- /dev/null +++ b/gtsam/3rdparty/Spectra/contrib/PartialSVDSolver.h @@ -0,0 +1,202 @@ +// Copyright (C) 2018 Yixuan Qiu +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at https://mozilla.org/MPL/2.0/. + +#ifndef PARTIAL_SVD_SOLVER_H +#define PARTIAL_SVD_SOLVER_H + +#include +#include "../SymEigsSolver.h" + +namespace Spectra { + +// Abstract class for matrix operation +template +class SVDMatOp +{ +public: + virtual int rows() const = 0; + virtual int cols() const = 0; + + // y_out = A' * A * x_in or y_out = A * A' * x_in + virtual void perform_op(const Scalar* x_in, Scalar* y_out) = 0; + + virtual ~SVDMatOp() {} +}; + +// Operation of a tall matrix in SVD +// We compute the eigenvalues of A' * A +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template +class SVDTallMatOp : public SVDMatOp +{ +private: + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_dim; + Vector m_cache; + +public: + // Constructor + SVDTallMatOp(ConstGenericMatrix& mat) : + m_mat(mat), + m_dim(std::min(mat.rows(), mat.cols())), + m_cache(mat.rows()) + {} + + // These are the rows and columns of A' * A + int rows() const { return m_dim; } + int cols() const { return m_dim; } + + // y_out = A' * A * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + MapConstVec x(x_in, m_mat.cols()); + MapVec y(y_out, m_mat.cols()); + m_cache.noalias() = m_mat * x; + y.noalias() = m_mat.transpose() * m_cache; + } +}; + +// Operation of a wide matrix in SVD +// We compute the eigenvalues of A * A' +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template +class SVDWideMatOp : public SVDMatOp +{ +private: + typedef Eigen::Matrix Vector; + typedef Eigen::Map MapConstVec; + typedef Eigen::Map MapVec; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_dim; + Vector m_cache; + +public: + // Constructor + SVDWideMatOp(ConstGenericMatrix& mat) : + m_mat(mat), + m_dim(std::min(mat.rows(), mat.cols())), + m_cache(mat.cols()) + {} + + // These are the rows and columns of A * A' + int rows() const { return m_dim; } + int cols() const { return m_dim; } + + // y_out = A * A' * x_in + void perform_op(const Scalar* x_in, Scalar* y_out) + { + MapConstVec x(x_in, m_mat.rows()); + MapVec y(y_out, m_mat.rows()); + m_cache.noalias() = m_mat.transpose() * x; + y.noalias() = m_mat * m_cache; + } +}; + +// Partial SVD solver +// MatrixType is either Eigen::Matrix or Eigen::SparseMatrix +template > +class PartialSVDSolver +{ +private: + typedef Eigen::Matrix Matrix; + typedef Eigen::Matrix Vector; + typedef const Eigen::Ref ConstGenericMatrix; + + ConstGenericMatrix m_mat; + const int m_m; + const int m_n; + SVDMatOp* m_op; + SymEigsSolver >* m_eigs; + int m_nconv; + Matrix m_evecs; + +public: + // Constructor + PartialSVDSolver(ConstGenericMatrix& mat, int ncomp, int ncv) : + m_mat(mat), m_m(mat.rows()), m_n(mat.cols()), m_evecs(0, 0) + { + // Determine the matrix type, tall or wide + if (m_m > m_n) + { + m_op = new SVDTallMatOp(mat); + } + else + { + m_op = new SVDWideMatOp(mat); + } + + // Solver object + m_eigs = new SymEigsSolver >(m_op, ncomp, ncv); + } + + // Destructor + virtual ~PartialSVDSolver() + { + delete m_eigs; + delete m_op; + } + + // Computation + int compute(int maxit = 1000, Scalar tol = 1e-10) + { + m_eigs->init(); + m_nconv = m_eigs->compute(maxit, tol); + + return m_nconv; + } + + // The converged singular values + Vector singular_values() const + { + Vector svals = m_eigs->eigenvalues().cwiseSqrt(); + + return svals; + } + + // The converged left singular vectors + Matrix matrix_U(int nu) + { + if (m_evecs.cols() < 1) + { + m_evecs = m_eigs->eigenvectors(); + } + nu = std::min(nu, m_nconv); + if (m_m <= m_n) + { + return m_evecs.leftCols(nu); + } + + return m_mat * (m_evecs.leftCols(nu).array().rowwise() / m_eigs->eigenvalues().head(nu).transpose().array().sqrt()).matrix(); + } + + // The converged right singular vectors + Matrix matrix_V(int nv) + { + if (m_evecs.cols() < 1) + { + m_evecs = m_eigs->eigenvectors(); + } + nv = std::min(nv, m_nconv); + if (m_m > m_n) + { + return m_evecs.leftCols(nv); + } + + return m_mat.transpose() * (m_evecs.leftCols(nv).array().rowwise() / m_eigs->eigenvalues().head(nv).transpose().array().sqrt()).matrix(); + } +}; + +} // namespace Spectra + +#endif // PARTIAL_SVD_SOLVER_H diff --git a/gtsam/3rdparty/metis/CMakeLists.txt b/gtsam/3rdparty/metis/CMakeLists.txt index de46165ff..dc26aecb2 100644 --- a/gtsam/3rdparty/metis/CMakeLists.txt +++ b/gtsam/3rdparty/metis/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 2.8) +cmake_minimum_required(VERSION 3.0) project(METIS) # Add flags for currect directory and below diff --git a/gtsam/3rdparty/metis/libmetis/CMakeLists.txt b/gtsam/3rdparty/metis/libmetis/CMakeLists.txt index 330e989fa..92f931b98 100644 --- a/gtsam/3rdparty/metis/libmetis/CMakeLists.txt +++ b/gtsam/3rdparty/metis/libmetis/CMakeLists.txt @@ -12,6 +12,7 @@ endif() if(WIN32) set_target_properties(metis-gtsam PROPERTIES PREFIX "" + COMPILE_FLAGS /w RUNTIME_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/../../../bin") endif() diff --git a/gtsam/CMakeLists.txt b/gtsam/CMakeLists.txt index 3d1bbd2a7..e0e037f52 100644 --- a/gtsam/CMakeLists.txt +++ b/gtsam/CMakeLists.txt @@ -14,8 +14,7 @@ set (gtsam_subdirs sam sfm slam - smart - navigation + navigation ) set(gtsam_srcs) @@ -134,12 +133,6 @@ endif() # paths so that the compiler uses GTSAM headers in our source directory instead # of any previously installed GTSAM headers. target_include_directories(gtsam BEFORE PUBLIC - # SuiteSparse_config - $ - $ - # CCOLAMD - $ - $ # main gtsam includes: $ $ @@ -148,6 +141,19 @@ target_include_directories(gtsam BEFORE PUBLIC # unit tests: $ ) +# 3rdparty libraries: use the "system" flag so they are included via "-isystem" +# and warnings (and warnings-considered-errors) in those headers are not +# reported as warnings/errors in our targets: +target_include_directories(gtsam SYSTEM BEFORE PUBLIC + # SuiteSparse_config + $ + $ + # Spectra + $ + # CCOLAMD + $ + $ +) if(GTSAM_SUPPORT_NESTED_DISSECTION) target_include_directories(gtsam BEFORE PUBLIC $ @@ -186,11 +192,17 @@ install( list(APPEND GTSAM_EXPORTED_TARGETS gtsam) set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) -# make sure that ccolamd compiles even in face of warnings +# Make sure that ccolamd compiles even in face of warnings +# and suppress all warnings from 3rd party code if Release build if(WIN32) - set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-w") + set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "/w") else() + if("${CMAKE_BUILD_TYPE}" STREQUAL "Release") + # Suppress all warnings from 3rd party sources. + set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-w -Wno-everything") + else() set_source_files_properties(${3rdparty_srcs} PROPERTIES COMPILE_FLAGS "-Wno-error") + endif() endif() # Create the matlab toolbox for the gtsam library @@ -205,5 +217,5 @@ if (GTSAM_INSTALL_MATLAB_TOOLBOX) endif() # Wrap - wrap_and_install_library(../gtsam.h "${GTSAM_ADDITIONAL_LIBRARIES}" "" "${mexFlags}") + wrap_and_install_library(gtsam.i "${GTSAM_ADDITIONAL_LIBRARIES}" "" "${mexFlags}") endif () diff --git a/gtsam/base/DSFMap.h b/gtsam/base/DSFMap.h index 1d6bfdefa..3a9d21aad 100644 --- a/gtsam/base/DSFMap.h +++ b/gtsam/base/DSFMap.h @@ -21,6 +21,7 @@ #include // Provides size_t #include #include +#include namespace gtsam { @@ -120,4 +121,12 @@ class IndexPair : public std::pair { inline size_t i() const { return first; }; inline size_t j() const { return second; }; }; + +typedef std::vector IndexPairVector; +typedef std::set IndexPairSet; + +inline IndexPairVector IndexPairSetAsArray(IndexPairSet& set) { return IndexPairVector(set.begin(), set.end()); } + +typedef std::map IndexPairSetMap; +typedef DSFMap DSFMapIndexPair; } // namespace gtsam diff --git a/gtsam/base/DSFVector.h b/gtsam/base/DSFVector.h index fdbd96dc8..6e9bc5c6b 100644 --- a/gtsam/base/DSFVector.h +++ b/gtsam/base/DSFVector.h @@ -55,11 +55,6 @@ class GTSAM_EXPORT DSFBase { /// Merge the sets containing i1 and i2. Does nothing if i1 and i2 are already in the same set. void merge(const size_t& i1, const size_t& i2); - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - inline size_t findSet(size_t key) const {return find(key);} - inline void makeUnionInPlace(const size_t& i1, const size_t& i2) {return merge(i1,i2);} -#endif }; /** diff --git a/gtsam/base/GenericValue.h b/gtsam/base/GenericValue.h index e1cb3bc2c..0e928765f 100644 --- a/gtsam/base/GenericValue.h +++ b/gtsam/base/GenericValue.h @@ -30,6 +30,14 @@ #include #include // operator typeid +#ifdef _WIN32 +#define GENERICVALUE_VISIBILITY +#else +// This will trigger a LNKxxxx on MSVC, so disable for MSVC build +// Please refer to https://github.com/borglab/gtsam/blob/develop/Using-GTSAM-EXPORT.md +#define GENERICVALUE_VISIBILITY GTSAM_EXPORT +#endif + namespace gtsam { /** @@ -70,7 +78,7 @@ class GenericValue: public Value { } /// equals implementing generic Value interface - virtual bool equals_(const Value& p, double tol = 1e-9) const { + bool equals_(const Value& p, double tol = 1e-9) const override { // Cast the base class Value pointer to a templated generic class pointer const GenericValue& genericValue2 = static_cast(p); // Return the result of using the equals traits for the derived class @@ -83,15 +91,15 @@ class GenericValue: public Value { } /// Virtual print function, uses traits - virtual void print(const std::string& str) const { - std::cout << "(" << demangle(typeid(T).name()) << ") "; + void print(const std::string& str) const override { + std::cout << "(" << demangle(typeid(T).name()) << ")\n"; traits::Print(value_, str); } /** * Create a duplicate object returned as a pointer to the generic Value interface. */ - virtual Value* clone_() const { + Value* clone_() const override { GenericValue* ptr = new GenericValue(*this); // calls copy constructor to fill in return ptr; } @@ -99,19 +107,19 @@ class GenericValue: public Value { /** * Destroy and deallocate this object, only if it was originally allocated using clone_(). */ - virtual void deallocate_() const { + void deallocate_() const override { delete this; } /** * Clone this value (normal clone on the heap, delete with 'delete' operator) */ - virtual boost::shared_ptr clone() const { + boost::shared_ptr clone() const override { return boost::allocate_shared(Eigen::aligned_allocator(), *this); } /// Generic Value interface version of retract - virtual Value* retract_(const Vector& delta) const { + Value* retract_(const Vector& delta) const override { // Call retract on the derived class using the retract trait function const T retractResult = traits::Retract(GenericValue::value(), delta); @@ -122,7 +130,7 @@ class GenericValue: public Value { } /// Generic Value interface version of localCoordinates - virtual Vector localCoordinates_(const Value& value2) const { + Vector localCoordinates_(const Value& value2) const override { // Cast the base class Value pointer to a templated generic class pointer const GenericValue& genericValue2 = static_cast&>(value2); @@ -142,12 +150,12 @@ class GenericValue: public Value { } /// Return run-time dimensionality - virtual size_t dim() const { + size_t dim() const override { return traits::GetDimension(value_); } /// Assignment operator - virtual Value& operator=(const Value& rhs) { + Value& operator=(const Value& rhs) override { // Cast the base class Value pointer to a derived class pointer const GenericValue& derivedRhs = static_cast(rhs); @@ -181,7 +189,7 @@ class GenericValue: public Value { // Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html enum { NeedsToAlign = (sizeof(T) % 16) == 0 }; public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) + GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) }; /// use this macro instead of BOOST_CLASS_EXPORT for GenericValues @@ -198,4 +206,12 @@ const ValueType& Value::cast() const { return dynamic_cast&>(*this).value(); } +/** Functional constructor of GenericValue so T can be automatically deduced + */ +template +GenericValue genericValue(const T& v) { + return GenericValue(v); +} + + } /* namespace gtsam */ diff --git a/gtsam/base/Manifold.h b/gtsam/base/Manifold.h index f3653f099..dbe497005 100644 --- a/gtsam/base/Manifold.h +++ b/gtsam/base/Manifold.h @@ -167,62 +167,6 @@ struct FixedDimension { BOOST_STATIC_ASSERT_MSG(value != Eigen::Dynamic, "FixedDimension instantiated for dymanically-sized type."); }; - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -/// Helper class to construct the product manifold of two other manifolds, M1 and M2 -/// Deprecated because of limited usefulness, maximum obfuscation -template -class ProductManifold: public std::pair { - BOOST_CONCEPT_ASSERT((IsManifold)); - BOOST_CONCEPT_ASSERT((IsManifold)); - -protected: - enum { dimension1 = traits::dimension }; - enum { dimension2 = traits::dimension }; - -public: - enum { dimension = dimension1 + dimension2 }; - inline static size_t Dim() { return dimension;} - inline size_t dim() const { return dimension;} - - typedef Eigen::Matrix TangentVector; - typedef OptionalJacobian ChartJacobian; - - /// Default constructor needs default constructors to be defined - ProductManifold():std::pair(M1(),M2()) {} - - // Construct from two original manifold values - ProductManifold(const M1& m1, const M2& m2):std::pair(m1,m2) {} - - /// Retract delta to manifold - ProductManifold retract(const TangentVector& xi) const { - M1 m1 = traits::Retract(this->first, xi.template head()); - M2 m2 = traits::Retract(this->second, xi.template tail()); - return ProductManifold(m1,m2); - } - - /// Compute the coordinates in the tangent space - TangentVector localCoordinates(const ProductManifold& other) const { - typename traits::TangentVector v1 = traits::Local(this->first, other.first); - typename traits::TangentVector v2 = traits::Local(this->second, other.second); - TangentVector v; - v << v1, v2; - return v; - } - - // Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html - enum { NeedsToAlign = (sizeof(M1) % 16) == 0 || (sizeof(M2) % 16) == 0 - }; -public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) -}; - -// Define any direct product group to be a model of the multiplicative Group concept -template -struct traits > : internal::Manifold > { -}; -#endif - } // \ namespace gtsam ///** diff --git a/gtsam/base/Matrix.cpp b/gtsam/base/Matrix.cpp index e2d8f71d1..551bdac10 100644 --- a/gtsam/base/Matrix.cpp +++ b/gtsam/base/Matrix.cpp @@ -136,20 +136,24 @@ Vector operator^(const Matrix& A, const Vector & v) { return A.transpose() * v; } +const Eigen::IOFormat& matlabFormat() { + static const Eigen::IOFormat matlab( + Eigen::StreamPrecision, // precision + Eigen::DontAlignCols, // flags set such that rowSpacers are not added + ", ", // coeffSeparator + ";\n", // rowSeparator + "\t", // rowPrefix + "", // rowSuffix + "[\n", // matPrefix + "\n]" // matSuffix + ); + return matlab; +} + /* ************************************************************************* */ //3 argument call void print(const Matrix& A, const string &s, ostream& stream) { - static const Eigen::IOFormat matlab( - Eigen::StreamPrecision, // precision - 0, // flags - ", ", // coeffSeparator - ";\n", // rowSeparator - "\t", // rowPrefix - "", // rowSuffix - "[\n", // matPrefix - "\n]" // matSuffix - ); - cout << s << A.format(matlab) << endl; + cout << s << A.format(matlabFormat()) << endl; } /* ************************************************************************* */ diff --git a/gtsam/base/Matrix.h b/gtsam/base/Matrix.h index fa70e5b00..a3a14c6c3 100644 --- a/gtsam/base/Matrix.h +++ b/gtsam/base/Matrix.h @@ -23,14 +23,11 @@ // \callgraph #pragma once + #include #include #include -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -#include -#include -#include -#endif + #include #include #include @@ -76,6 +73,10 @@ GTSAM_MAKE_MATRIX_DEFS(9); typedef Eigen::Block SubMatrix; typedef Eigen::Block ConstSubMatrix; +// Matrix formatting arguments when printing. +// Akin to Matlab style. +const Eigen::IOFormat& matlabFormat(); + /** * equals with a tolerance */ @@ -89,7 +90,7 @@ bool equal_with_abs_tol(const Eigen::DenseBase& A, const Eigen::DenseBas for(size_t i=0; i - void save(Archive & ar, const gtsam::Matrix & m, unsigned int /*version*/) { + template + void save(Archive & ar, + const Eigen::Matrix & m, + const unsigned int /*version*/) { const size_t rows = m.rows(), cols = m.cols(); ar << BOOST_SERIALIZATION_NVP(rows); ar << BOOST_SERIALIZATION_NVP(cols); ar << make_nvp("data", make_array(m.data(), m.size())); } - template - void load(Archive & ar, gtsam::Matrix & m, unsigned int /*version*/) { + template + void load(Archive & ar, + Eigen::Matrix & m, + const unsigned int /*version*/) { size_t rows, cols; ar >> BOOST_SERIALIZATION_NVP(rows); ar >> BOOST_SERIALIZATION_NVP(cols); @@ -566,8 +580,25 @@ namespace boost { ar >> make_nvp("data", make_array(m.data(), m.size())); } - } // namespace serialization -} // namespace boost + // templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix); + template + void serialize(Archive & ar, + Eigen::Matrix & m, + const unsigned int version) { + split_free(ar, m, version); + } -BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Matrix); + // specialized to Matrix for MATLAB wrapper + template + void serialize(Archive& ar, gtsam::Matrix& m, const unsigned int version) { + split_free(ar, m, version); + } + } // namespace serialization +} // namespace boost diff --git a/gtsam/base/OptionalJacobian.h b/gtsam/base/OptionalJacobian.h index 602c5915e..4b580f82e 100644 --- a/gtsam/base/OptionalJacobian.h +++ b/gtsam/base/OptionalJacobian.h @@ -112,7 +112,7 @@ class OptionalJacobian { // template // OptionalJacobian(Eigen::Block block) : map_(nullptr) { ?? } - /// Return true is allocated, false if default constructor was used + /// Return true if allocated, false if default constructor was used operator bool() const { return map_.data() != nullptr; } @@ -197,7 +197,7 @@ class OptionalJacobian { #endif - /// Return true is allocated, false if default constructor was used + /// Return true if allocated, false if default constructor was used operator bool() const { return pointer_!=nullptr; } diff --git a/gtsam/base/SymmetricBlockMatrix.h b/gtsam/base/SymmetricBlockMatrix.h index 1ec9a5ad3..4e030606d 100644 --- a/gtsam/base/SymmetricBlockMatrix.h +++ b/gtsam/base/SymmetricBlockMatrix.h @@ -76,7 +76,7 @@ namespace gtsam { blockStart_(0) { fillOffsets(dimensions.begin(), dimensions.end(), appendOneDimension); - matrix_.setZero(variableColOffsets_.back(), variableColOffsets_.back()); + matrix_.resize(variableColOffsets_.back(), variableColOffsets_.back()); assertInvariants(); } @@ -86,7 +86,7 @@ namespace gtsam { blockStart_(0) { fillOffsets(firstBlockDim, lastBlockDim, appendOneDimension); - matrix_.setZero(variableColOffsets_.back(), variableColOffsets_.back()); + matrix_.resize(variableColOffsets_.back(), variableColOffsets_.back()); assertInvariants(); } @@ -95,7 +95,7 @@ namespace gtsam { SymmetricBlockMatrix(const CONTAINER& dimensions, const Matrix& matrix, bool appendOneDimension = false) : blockStart_(0) { - matrix_.setZero(matrix.rows(), matrix.cols()); + matrix_.resize(matrix.rows(), matrix.cols()); matrix_.triangularView() = matrix.triangularView(); fillOffsets(dimensions.begin(), dimensions.end(), appendOneDimension); if(matrix_.rows() != matrix_.cols()) @@ -416,4 +416,3 @@ namespace gtsam { class CholeskyFailed; } - diff --git a/gtsam/base/TestableAssertions.h b/gtsam/base/TestableAssertions.h index 1a31aa284..88dd619e5 100644 --- a/gtsam/base/TestableAssertions.h +++ b/gtsam/base/TestableAssertions.h @@ -23,6 +23,7 @@ #include #include #include +#include #include namespace gtsam { @@ -349,4 +350,44 @@ bool assert_inequal(const V& expected, const V& actual, double tol = 1e-9) { return false; } +/** + * Capture std out via cout stream and compare against string. + */ +template +bool assert_stdout_equal(const std::string& expected, const V& actual) { + // Redirect output to buffer so we can compare + std::stringstream buffer; + // Save the original output stream so we can reset later + std::streambuf* old = std::cout.rdbuf(buffer.rdbuf()); + + // We test against actual std::cout for faithful reproduction + std::cout << actual; + + // Get output string and reset stdout + std::string actual_ = buffer.str(); + std::cout.rdbuf(old); + + return assert_equal(expected, actual_); +} + +/** + * Capture print function output and compare against string. + */ +template +bool assert_print_equal(const std::string& expected, const V& actual) { + // Redirect output to buffer so we can compare + std::stringstream buffer; + // Save the original output stream so we can reset later + std::streambuf* old = std::cout.rdbuf(buffer.rdbuf()); + + // We test against actual std::cout for faithful reproduction + actual.print(); + + // Get output string and reset stdout + std::string actual_ = buffer.str(); + std::cout.rdbuf(old); + + return assert_equal(expected, actual_); +} + } // \namespace gtsam diff --git a/gtsam/base/ThreadsafeException.h b/gtsam/base/ThreadsafeException.h index 0f7c6131d..744759f5b 100644 --- a/gtsam/base/ThreadsafeException.h +++ b/gtsam/base/ThreadsafeException.h @@ -71,12 +71,12 @@ public std::exception String(description.begin(), description.end())) { } - /// Default destructor doesn't have the throw() - virtual ~ThreadsafeException() throw () { + /// Default destructor doesn't have the noexcept + virtual ~ThreadsafeException() noexcept { } public: - virtual const char* what() const throw () { + const char* what() const noexcept override { return description_ ? description_->c_str() : ""; } }; @@ -113,8 +113,8 @@ class InvalidArgumentThreadsafe: public ThreadsafeException< class CholeskyFailed : public gtsam::ThreadsafeException { public: - CholeskyFailed() throw() {} - virtual ~CholeskyFailed() throw() {} + CholeskyFailed() noexcept {} + virtual ~CholeskyFailed() noexcept {} }; } // namespace gtsam diff --git a/gtsam/base/Vector.cpp b/gtsam/base/Vector.cpp index beec030ba..658ab9a0d 100644 --- a/gtsam/base/Vector.cpp +++ b/gtsam/base/Vector.cpp @@ -39,7 +39,7 @@ namespace gtsam { * 1. https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ * 2. https://floating-point-gui.de/errors/comparison/ * ************************************************************************* */ -bool fpEqual(double a, double b, double tol) { +bool fpEqual(double a, double b, double tol, bool check_relative_also) { using std::abs; using std::isnan; using std::isinf; @@ -48,7 +48,7 @@ bool fpEqual(double a, double b, double tol) { double larger = (abs(b) > abs(a)) ? abs(b) : abs(a); // handle NaNs - if(std::isnan(a) || isnan(b)) { + if(isnan(a) || isnan(b)) { return isnan(a) && isnan(b); } // handle inf @@ -60,13 +60,15 @@ bool fpEqual(double a, double b, double tol) { else if(a == 0 || b == 0 || (abs(a) + abs(b)) < DOUBLE_MIN_NORMAL) { return abs(a-b) <= tol * DOUBLE_MIN_NORMAL; } - // Check if the numbers are really close - // Needed when comparing numbers near zero or tol is in vicinity - else if(abs(a-b) <= tol) { + // Check if the numbers are really close. + // Needed when comparing numbers near zero or tol is in vicinity. + else if (abs(a - b) <= tol) { return true; } - // Use relative error - else if(abs(a-b) <= tol * min(larger, std::numeric_limits::max())) { + // Check for relative error + else if (abs(a - b) <= + tol * min(larger, std::numeric_limits::max()) && + check_relative_also) { return true; } diff --git a/gtsam/base/Vector.h b/gtsam/base/Vector.h index 81be36c0a..ed90a7126 100644 --- a/gtsam/base/Vector.h +++ b/gtsam/base/Vector.h @@ -85,9 +85,15 @@ static_assert( * respectively for the comparison to be true. * If one is NaN/Inf and the other is not, returns false. * + * @param check_relative_also is a flag which toggles additional checking for + * relative error. This means that if either the absolute error or the relative + * error is within the tolerance, the result will be true. + * By default, the flag is true. + * * Return true if two numbers are close wrt tol. */ -GTSAM_EXPORT bool fpEqual(double a, double b, double tol); +GTSAM_EXPORT bool fpEqual(double a, double b, double tol, + bool check_relative_also = true); /** * print without optional string, must specify cout yourself @@ -256,26 +262,6 @@ GTSAM_EXPORT Vector concatVectors(const std::list& vs); * concatenate Vectors */ GTSAM_EXPORT Vector concatVectors(size_t nrVectors, ...); - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -inline Vector abs(const Vector& v){return v.cwiseAbs();} -inline Vector basis(size_t n, size_t i) { return Vector::Unit(n,i); } -inline Vector delta(size_t n, size_t i, double value){ return Vector::Unit(n, i) * value;} -inline size_t dim(const Vector& v) { return v.size(); } -inline Vector ediv(const Vector &a, const Vector &b) {assert (b.size()==a.size()); return a.cwiseQuotient(b);} -inline Vector esqrt(const Vector& v) { return v.cwiseSqrt();} -inline Vector emul(const Vector &a, const Vector &b) {assert (b.size()==a.size()); return a.cwiseProduct(b);} -inline double max(const Vector &a){return a.maxCoeff();} -inline double norm_2(const Vector& v) {return v.norm();} -inline Vector ones(size_t n) { return Vector::Ones(n); } -inline Vector reciprocal(const Vector &a) {return a.array().inverse();} -inline Vector repeat(size_t n, double value) {return Vector::Constant(n, value);} -inline const Vector sub(const Vector &v, size_t i1, size_t i2) {return v.segment(i1,i2-i1);} -inline void subInsert(Vector& fullVector, const Vector& subVector, size_t i) {fullVector.segment(i, subVector.size()) = subVector;} -inline double sum(const Vector &a){return a.sum();} -inline bool zero(const Vector& v){ return v.isZero(); } -inline Vector zero(size_t n) { return Vector::Zero(n); } -#endif } // namespace gtsam #include diff --git a/gtsam/base/make_shared.h b/gtsam/base/make_shared.h new file mode 100644 index 000000000..5281eec6d --- /dev/null +++ b/gtsam/base/make_shared.h @@ -0,0 +1,67 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file make_shared.h + * @brief make_shared trampoline function to ensure proper alignment + * @author Fan Jiang + */ + +#pragma once + +#include + +#include + +#include + +#include + +namespace gtsam { + /// An shorthand alias for accessing the ::type inside std::enable_if that can be used in a template directly + template + using enable_if_t = typename std::enable_if::type; +} + +namespace gtsam { + + /** + * Add our own `make_shared` as a layer of wrapping on `boost::make_shared` + * This solves the problem with the stock `make_shared` that custom alignment is not respected, causing SEGFAULTs + * at runtime, which is notoriously hard to debug. + * + * Explanation + * =============== + * The template `needs_eigen_aligned_allocator::value` will evaluate to `std::true_type` if the type alias + * `_eigen_aligned_allocator_trait = void` is present in a class, which is automatically added by the + * `GTSAM_MAKE_ALIGNED_OPERATOR_NEW` macro. + * + * This function declaration will only be taken when the above condition is true, so if some object does not need to + * be aligned, `gtsam::make_shared` will fall back to the next definition, which is a simple wrapper for + * `boost::make_shared`. + * + * @tparam T The type of object being constructed + * @tparam Args Type of the arguments of the constructor + * @param args Arguments of the constructor + * @return The object created as a boost::shared_ptr + */ + template + gtsam::enable_if_t::value, boost::shared_ptr> make_shared(Args &&... args) { + return boost::allocate_shared(Eigen::aligned_allocator(), std::forward(args)...); + } + + /// Fall back to the boost version if no need for alignment + template + gtsam::enable_if_t::value, boost::shared_ptr> make_shared(Args &&... args) { + return boost::make_shared(std::forward(args)...); + } + +} diff --git a/gtsam/base/numericalDerivative.h b/gtsam/base/numericalDerivative.h index 831bb6bcc..fc5531cdc 100644 --- a/gtsam/base/numericalDerivative.h +++ b/gtsam/base/numericalDerivative.h @@ -109,7 +109,7 @@ typename Eigen::Matrix numericalGradient( * @param delta increment for numerical derivative * Class Y is the output argument * Class X is the input argument - * int N is the dimension of the X input value if variable dimension type but known at test time + * @tparam int N is the dimension of the X input value if variable dimension type but known at test time * @return m*n Jacobian computed via central differencing */ @@ -167,15 +167,16 @@ typename internal::FixedSizeMatrix::type numericalDerivative11(Y (*h)(const * @param x2 second argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X1 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative21(const boost::function& h, const X1& x1, const X2& x2, double delta = 1e-5) { BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, _1, boost::cref(x2)), x1, delta); + return numericalDerivative11(boost::bind(h, _1, boost::cref(x2)), x1, delta); } /** use a raw C++ function pointer */ @@ -192,15 +193,16 @@ typename internal::FixedSizeMatrix::type numericalDerivative21(Y (*h)(cons * @param x2 n-dimensional second argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X2 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative22(boost::function h, const X1& x1, const X2& x2, double delta = 1e-5) { // BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), // "Template argument X1 must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X2 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), _1), x2, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), _1), x2, delta); } /** use a raw C++ function pointer */ @@ -219,8 +221,9 @@ typename internal::FixedSizeMatrix::type numericalDerivative22(Y (*h)(cons * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing * All classes Y,X1,X2,X3 need dim, expmap, logmap + * @tparam int N is the dimension of the X1 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative31( boost::function h, const X1& x1, const X2& x2, const X3& x3, double delta = 1e-5) { @@ -228,7 +231,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative31( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, _1, boost::cref(x2), boost::cref(x3)), x1, delta); + return numericalDerivative11(boost::bind(h, _1, boost::cref(x2), boost::cref(x3)), x1, delta); } template @@ -247,8 +250,9 @@ typename internal::FixedSizeMatrix::type numericalDerivative31(Y (*h)(cons * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing * All classes Y,X1,X2,X3 need dim, expmap, logmap + * @tparam int N is the dimension of the X2 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative32( boost::function h, const X1& x1, const X2& x2, const X3& x3, double delta = 1e-5) { @@ -256,7 +260,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative32( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X2 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), _1, boost::cref(x3)), x2, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), _1, boost::cref(x3)), x2, delta); } template @@ -275,8 +279,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative32(Y (* * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing * All classes Y,X1,X2,X3 need dim, expmap, logmap + * @tparam int N is the dimension of the X3 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative33( boost::function h, const X1& x1, const X2& x2, const X3& x3, double delta = 1e-5) { @@ -284,7 +289,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative33( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X3 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), _1), x3, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), _1), x3, delta); } template @@ -303,8 +308,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative33(Y (* * @param x4 fourth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X1 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative41( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) { @@ -312,7 +318,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative41( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, _1, boost::cref(x2), boost::cref(x3), boost::cref(x4)), x1, delta); + return numericalDerivative11(boost::bind(h, _1, boost::cref(x2), boost::cref(x3), boost::cref(x4)), x1, delta); } template @@ -330,8 +336,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative41(Y (* * @param x4 fourth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X2 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative42( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) { @@ -339,7 +346,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative42( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X2 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), _1, boost::cref(x3), boost::cref(x4)), x2, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), _1, boost::cref(x3), boost::cref(x4)), x2, delta); } template @@ -357,8 +364,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative42(Y (* * @param x4 fourth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X3 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative43( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) { @@ -366,7 +374,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative43( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X3 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), _1, boost::cref(x4)), x3, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), _1, boost::cref(x4)), x3, delta); } template @@ -384,8 +392,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative43(Y (* * @param x4 n-dimensional fourth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X4 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative44( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, double delta = 1e-5) { @@ -393,7 +402,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative44( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X4 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), _1), x4, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), _1), x4, delta); } template @@ -412,8 +421,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative44(Y (* * @param x5 fifth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X1 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative51( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) { @@ -421,7 +431,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative51( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, _1, boost::cref(x2), boost::cref(x3), boost::cref(x4), boost::cref(x5)), x1, delta); + return numericalDerivative11(boost::bind(h, _1, boost::cref(x2), boost::cref(x3), boost::cref(x4), boost::cref(x5)), x1, delta); } template @@ -440,8 +450,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative51(Y (* * @param x5 fifth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X2 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative52( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) { @@ -449,7 +460,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative52( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), _1, boost::cref(x3), boost::cref(x4), boost::cref(x5)), x2, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), _1, boost::cref(x3), boost::cref(x4), boost::cref(x5)), x2, delta); } template @@ -468,8 +479,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative52(Y (* * @param x5 fifth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X3 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative53( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) { @@ -477,7 +489,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative53( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), _1, boost::cref(x4), boost::cref(x5)), x3, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), _1, boost::cref(x4), boost::cref(x5)), x3, delta); } template @@ -496,8 +508,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative53(Y (* * @param x5 fifth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X4 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative54( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) { @@ -505,7 +518,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative54( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), _1, boost::cref(x5)), x4, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), _1, boost::cref(x5)), x4, delta); } template @@ -524,8 +537,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative54(Y (* * @param x5 fifth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X5 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative55( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) { @@ -533,7 +547,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative55( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), boost::cref(x4), _1), x5, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), boost::cref(x4), _1), x5, delta); } template @@ -553,8 +567,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative55(Y (* * @param x6 sixth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X1 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative61( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { @@ -562,7 +577,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative61( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, _1, boost::cref(x2), boost::cref(x3), boost::cref(x4), boost::cref(x5), boost::cref(x6)), x1, delta); + return numericalDerivative11(boost::bind(h, _1, boost::cref(x2), boost::cref(x3), boost::cref(x4), boost::cref(x5), boost::cref(x6)), x1, delta); } template @@ -582,8 +597,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative61(Y (* * @param x6 sixth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X2 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative62( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { @@ -591,7 +607,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative62( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), _1, boost::cref(x3), boost::cref(x4), boost::cref(x5), boost::cref(x6)), x2, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), _1, boost::cref(x3), boost::cref(x4), boost::cref(x5), boost::cref(x6)), x2, delta); } template @@ -608,11 +624,12 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative62(Y (* * @param x3 third argument value * @param x4 fourth argument value * @param x5 fifth argument value - * @param x6 sixth argument value + * @param x6 sixth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X3 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative63( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { @@ -620,7 +637,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative63( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), _1, boost::cref(x4), boost::cref(x5), boost::cref(x6)), x3, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), _1, boost::cref(x4), boost::cref(x5), boost::cref(x6)), x3, delta); } template @@ -640,8 +657,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative63(Y (* * @param x6 sixth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X4 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative64( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { @@ -649,7 +667,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative64( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), _1, boost::cref(x5), boost::cref(x6)), x4, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), _1, boost::cref(x5), boost::cref(x6)), x4, delta); } template @@ -669,8 +687,9 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative64(Y (* * @param x6 sixth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X5 input value if variable dimension type but known at test time */ -template +template::dimension> typename internal::FixedSizeMatrix::type numericalDerivative65( boost::function h, const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { @@ -678,7 +697,7 @@ typename internal::FixedSizeMatrix::type numericalDerivative65( "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), boost::cref(x4), _1, boost::cref(x6)), x5, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), boost::cref(x4), _1, boost::cref(x6)), x5, delta); } template @@ -698,16 +717,18 @@ inline typename internal::FixedSizeMatrix::type numericalDerivative65(Y (* * @param x6 sixth argument value * @param delta increment for numerical derivative * @return m*n Jacobian computed via central differencing + * @tparam int N is the dimension of the X6 input value if variable dimension type but known at test time */ -template -typename internal::FixedSizeMatrix::type numericalDerivative66( - boost::function h, const X1& x1, - const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) { +template::dimension> +typename internal::FixedSizeMatrix::type numericalDerivative66( + boost::function h, + const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, + double delta = 1e-5) { BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument Y must be a manifold type."); BOOST_STATIC_ASSERT_MSG( (boost::is_base_of::structure_category>::value), "Template argument X1 must be a manifold type."); - return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), boost::cref(x4), boost::cref(x5), _1), x6, delta); + return numericalDerivative11(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), boost::cref(x4), boost::cref(x5), _1), x6, delta); } template diff --git a/gtsam/base/serialization.h b/gtsam/base/serialization.h index e475465de..f589ecc5e 100644 --- a/gtsam/base/serialization.h +++ b/gtsam/base/serialization.h @@ -42,123 +42,218 @@ namespace gtsam { -// Serialization directly to strings in compressed format -template -std::string serialize(const T& input) { - std::ostringstream out_archive_stream; +/** @name Standard serialization + * Serialization in default compressed format + */ +///@{ +/// serializes to a stream +template +void serializeToStream(const T& input, std::ostream& out_archive_stream) { boost::archive::text_oarchive out_archive(out_archive_stream); out_archive << input; - return out_archive_stream.str(); } -template -void deserialize(const std::string& serialized, T& output) { - std::istringstream in_archive_stream(serialized); +/// deserializes from a stream +template +void deserializeFromStream(std::istream& in_archive_stream, T& output) { boost::archive::text_iarchive in_archive(in_archive_stream); in_archive >> output; } -template +/// serializes to a string +template +std::string serializeToString(const T& input) { + std::ostringstream out_archive_stream; + serializeToStream(input, out_archive_stream); + return out_archive_stream.str(); +} + +/// deserializes from a string +template +void deserializeFromString(const std::string& serialized, T& output) { + std::istringstream in_archive_stream(serialized); + deserializeFromStream(in_archive_stream, output); +} + +/// serializes to a file +template bool serializeToFile(const T& input, const std::string& filename) { std::ofstream out_archive_stream(filename.c_str()); - if (!out_archive_stream.is_open()) - return false; - boost::archive::text_oarchive out_archive(out_archive_stream); - out_archive << input; + if (!out_archive_stream.is_open()) return false; + serializeToStream(input, out_archive_stream); out_archive_stream.close(); return true; } -template +/// deserializes from a file +template bool deserializeFromFile(const std::string& filename, T& output) { std::ifstream in_archive_stream(filename.c_str()); - if (!in_archive_stream.is_open()) - return false; - boost::archive::text_iarchive in_archive(in_archive_stream); - in_archive >> output; + if (!in_archive_stream.is_open()) return false; + deserializeFromStream(in_archive_stream, output); in_archive_stream.close(); return true; } -// Serialization to XML format with named structures -template -std::string serializeXML(const T& input, const std::string& name="data") { +/// serializes to a string +template +std::string serialize(const T& input) { + return serializeToString(input); +} + +/// deserializes from a string +template +void deserialize(const std::string& serialized, T& output) { + deserializeFromString(serialized, output); +} +///@} + +/** @name XML Serialization + * Serialization to XML format with named structures + */ +///@{ +/// serializes to a stream in XML +template +void serializeToXMLStream(const T& input, std::ostream& out_archive_stream, + const std::string& name = "data") { + boost::archive::xml_oarchive out_archive(out_archive_stream); + out_archive << boost::serialization::make_nvp(name.c_str(), input); +} + +/// deserializes from a stream in XML +template +void deserializeFromXMLStream(std::istream& in_archive_stream, T& output, + const std::string& name = "data") { + boost::archive::xml_iarchive in_archive(in_archive_stream); + in_archive >> boost::serialization::make_nvp(name.c_str(), output); +} + +/// serializes to a string in XML +template +std::string serializeToXMLString(const T& input, + const std::string& name = "data") { std::ostringstream out_archive_stream; - // braces to flush out_archive as it goes out of scope before taking str() - // fixes crash with boost 1.66-1.68 - // see https://github.com/boostorg/serialization/issues/82 - { - boost::archive::xml_oarchive out_archive(out_archive_stream); - out_archive << boost::serialization::make_nvp(name.c_str(), input); - } + serializeToXMLStream(input, out_archive_stream, name); return out_archive_stream.str(); } -template -void deserializeXML(const std::string& serialized, T& output, const std::string& name="data") { +/// deserializes from a string in XML +template +void deserializeFromXMLString(const std::string& serialized, T& output, + const std::string& name = "data") { std::istringstream in_archive_stream(serialized); - boost::archive::xml_iarchive in_archive(in_archive_stream); - in_archive >> boost::serialization::make_nvp(name.c_str(), output); + deserializeFromXMLStream(in_archive_stream, output, name); } -template -bool serializeToXMLFile(const T& input, const std::string& filename, const std::string& name="data") { +/// serializes to an XML file +template +bool serializeToXMLFile(const T& input, const std::string& filename, + const std::string& name = "data") { std::ofstream out_archive_stream(filename.c_str()); - if (!out_archive_stream.is_open()) - return false; - boost::archive::xml_oarchive out_archive(out_archive_stream); - out_archive << boost::serialization::make_nvp(name.c_str(), input);; + if (!out_archive_stream.is_open()) return false; + serializeToXMLStream(input, out_archive_stream, name); out_archive_stream.close(); return true; } -template -bool deserializeFromXMLFile(const std::string& filename, T& output, const std::string& name="data") { +/// deserializes from an XML file +template +bool deserializeFromXMLFile(const std::string& filename, T& output, + const std::string& name = "data") { std::ifstream in_archive_stream(filename.c_str()); - if (!in_archive_stream.is_open()) - return false; - boost::archive::xml_iarchive in_archive(in_archive_stream); - in_archive >> boost::serialization::make_nvp(name.c_str(), output); + if (!in_archive_stream.is_open()) return false; + deserializeFromXMLStream(in_archive_stream, output, name); in_archive_stream.close(); return true; } -// Serialization to binary format with named structures -template -std::string serializeBinary(const T& input, const std::string& name="data") { - std::ostringstream out_archive_stream; +/// serializes to a string in XML +template +std::string serializeXML(const T& input, + const std::string& name = "data") { + return serializeToXMLString(input, name); +} + +/// deserializes from a string in XML +template +void deserializeXML(const std::string& serialized, T& output, + const std::string& name = "data") { + deserializeFromXMLString(serialized, output, name); +} +///@} + +/** @name Binary Serialization + * Serialization to binary format with named structures + */ +///@{ +/// serializes to a stream in binary +template +void serializeToBinaryStream(const T& input, std::ostream& out_archive_stream, + const std::string& name = "data") { boost::archive::binary_oarchive out_archive(out_archive_stream); out_archive << boost::serialization::make_nvp(name.c_str(), input); - return out_archive_stream.str(); } -template -void deserializeBinary(const std::string& serialized, T& output, const std::string& name="data") { - std::istringstream in_archive_stream(serialized); +/// deserializes from a stream in binary +template +void deserializeFromBinaryStream(std::istream& in_archive_stream, T& output, + const std::string& name = "data") { boost::archive::binary_iarchive in_archive(in_archive_stream); in_archive >> boost::serialization::make_nvp(name.c_str(), output); } -template -bool serializeToBinaryFile(const T& input, const std::string& filename, const std::string& name="data") { +/// serializes to a string in binary +template +std::string serializeToBinaryString(const T& input, + const std::string& name = "data") { + std::ostringstream out_archive_stream; + serializeToBinaryStream(input, out_archive_stream, name); + return out_archive_stream.str(); +} + +/// deserializes from a string in binary +template +void deserializeFromBinaryString(const std::string& serialized, T& output, + const std::string& name = "data") { + std::istringstream in_archive_stream(serialized); + deserializeFromBinaryStream(in_archive_stream, output, name); +} + +/// serializes to a binary file +template +bool serializeToBinaryFile(const T& input, const std::string& filename, + const std::string& name = "data") { std::ofstream out_archive_stream(filename.c_str()); - if (!out_archive_stream.is_open()) - return false; - boost::archive::binary_oarchive out_archive(out_archive_stream); - out_archive << boost::serialization::make_nvp(name.c_str(), input); + if (!out_archive_stream.is_open()) return false; + serializeToBinaryStream(input, out_archive_stream, name); out_archive_stream.close(); return true; } -template -bool deserializeFromBinaryFile(const std::string& filename, T& output, const std::string& name="data") { +/// deserializes from a binary file +template +bool deserializeFromBinaryFile(const std::string& filename, T& output, + const std::string& name = "data") { std::ifstream in_archive_stream(filename.c_str()); - if (!in_archive_stream.is_open()) - return false; - boost::archive::binary_iarchive in_archive(in_archive_stream); - in_archive >> boost::serialization::make_nvp(name.c_str(), output); + if (!in_archive_stream.is_open()) return false; + deserializeFromBinaryStream(in_archive_stream, output, name); in_archive_stream.close(); return true; } -} // \namespace gtsam +/// serializes to a string in binary +template +std::string serializeBinary(const T& input, + const std::string& name = "data") { + return serializeToBinaryString(input, name); +} + +/// deserializes from a string in binary +template +void deserializeBinary(const std::string& serialized, T& output, + const std::string& name = "data") { + deserializeFromBinaryString(serialized, output, name); +} +///@} + +} // namespace gtsam diff --git a/gtsam/base/serializationTestHelpers.h b/gtsam/base/serializationTestHelpers.h index 06b3462e9..5994a5e51 100644 --- a/gtsam/base/serializationTestHelpers.h +++ b/gtsam/base/serializationTestHelpers.h @@ -26,6 +26,7 @@ #include #include +#include // whether to print the serialized text to stdout @@ -40,22 +41,37 @@ T create() { return T(); } +// Creates or empties a folder in the build folder and returns the relative path +boost::filesystem::path resetFilesystem( + boost::filesystem::path folder = "actual") { + boost::filesystem::remove_all(folder); + boost::filesystem::create_directory(folder); + return folder; +} + // Templated round-trip serialization template void roundtrip(const T& input, T& output) { - // Serialize std::string serialized = serialize(input); if (verbose) std::cout << serialized << std::endl << std::endl; - deserialize(serialized, output); } -// This version requires equality operator +// Templated round-trip serialization using a file +template +void roundtripFile(const T& input, T& output) { + boost::filesystem::path path = resetFilesystem()/"graph.dat"; + serializeToFile(input, path.string()); + deserializeFromFile(path.string(), output); +} + +// This version requires equality operator and uses string and file round-trips template bool equality(const T& input = T()) { - T output = create(); + T output = create(), outputf = create(); roundtrip(input,output); - return input==output; + roundtripFile(input,outputf); + return (input==output) && (input==outputf); } // This version requires Testable @@ -77,20 +93,26 @@ bool equalsDereferenced(const T& input) { // Templated round-trip serialization using XML template void roundtripXML(const T& input, T& output) { - // Serialize std::string serialized = serializeXML(input); if (verbose) std::cout << serialized << std::endl << std::endl; - - // De-serialize deserializeXML(serialized, output); } +// Templated round-trip serialization using XML File +template +void roundtripXMLFile(const T& input, T& output) { + boost::filesystem::path path = resetFilesystem()/"graph.xml"; + serializeToXMLFile(input, path.string()); + deserializeFromXMLFile(path.string(), output); +} + // This version requires equality operator template bool equalityXML(const T& input = T()) { - T output = create(); + T output = create(), outputf = create(); roundtripXML(input,output); - return input==output; + roundtripXMLFile(input,outputf); + return (input==output) && (input==outputf); } // This version requires Testable @@ -112,20 +134,26 @@ bool equalsDereferencedXML(const T& input = T()) { // Templated round-trip serialization using XML template void roundtripBinary(const T& input, T& output) { - // Serialize std::string serialized = serializeBinary(input); if (verbose) std::cout << serialized << std::endl << std::endl; - - // De-serialize deserializeBinary(serialized, output); } +// Templated round-trip serialization using Binary file +template +void roundtripBinaryFile(const T& input, T& output) { + boost::filesystem::path path = resetFilesystem()/"graph.bin"; + serializeToBinaryFile(input, path.string()); + deserializeFromBinaryFile(path.string(), output); +} + // This version requires equality operator template bool equalityBinary(const T& input = T()) { - T output = create(); + T output = create(), outputf = create(); roundtripBinary(input,output); - return input==output; + roundtripBinaryFile(input,outputf); + return (input==output) && (input==outputf); } // This version requires Testable diff --git a/gtsam/base/tests/testMatrix.cpp b/gtsam/base/tests/testMatrix.cpp index 468e842f2..a7c218705 100644 --- a/gtsam/base/tests/testMatrix.cpp +++ b/gtsam/base/tests/testMatrix.cpp @@ -1163,6 +1163,19 @@ TEST(Matrix , IsVectorSpace) { BOOST_CONCEPT_ASSERT((IsVectorSpace)); } +TEST(Matrix, AbsoluteError) { + double a = 2000, b = 1997, tol = 1e-1; + bool isEqual; + + // Test only absolute error + isEqual = fpEqual(a, b, tol, false); + EXPECT(!isEqual); + + // Test relative error as well + isEqual = fpEqual(a, b, tol); + EXPECT(isEqual); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/base/tests/testNumericalDerivative.cpp b/gtsam/base/tests/testNumericalDerivative.cpp index d27e43040..6d6f41fdd 100644 --- a/gtsam/base/tests/testNumericalDerivative.cpp +++ b/gtsam/base/tests/testNumericalDerivative.cpp @@ -143,6 +143,13 @@ Vector6 f6(const double x1, const double x2, const double x3, const double x4, return result; } +Vector g6(const double x1, const double x2, const double x3, const double x4, + const double x5, const double x6) { + Vector result(6); + result << sin(x1), cos(x2), x3 * x3, x4 * x4 * x4, sqrt(x5), sin(x6) - cos(x6); + return result; +} + /* ************************************************************************* */ // TEST(testNumericalDerivative, numeriDerivative61) { @@ -153,6 +160,14 @@ TEST(testNumericalDerivative, numeriDerivative61) { double, double, double, double>(f6, x1, x2, x3, x4, x5, x6); EXPECT(assert_equal(expected61, actual61, 1e-5)); + + Matrix expected61Dynamic = Matrix::Zero(6, 1); + expected61Dynamic(0, 0) = cos(x1); + Matrix actual61Dynamic = + numericalDerivative61(g6, x1, x2, x3, x4, x5, x6); + + EXPECT(assert_equal(expected61Dynamic, actual61Dynamic, 1e-5)); } /* ************************************************************************* */ @@ -165,6 +180,13 @@ TEST(testNumericalDerivative, numeriDerivative62) { double, double, double>(f6, x1, x2, x3, x4, x5, x6); EXPECT(assert_equal(expected62, actual62, 1e-5)); + + Matrix expected62Dynamic = Matrix::Zero(6, 1); + expected62Dynamic(1, 0) = -sin(x2); + Matrix61 actual62Dynamic = numericalDerivative62(f6, x1, x2, x3, x4, x5, x6); + + EXPECT(assert_equal(expected62Dynamic, actual62Dynamic, 1e-5)); } /* ************************************************************************* */ @@ -177,6 +199,14 @@ TEST(testNumericalDerivative, numeriDerivative63) { double, double, double>(f6, x1, x2, x3, x4, x5, x6); EXPECT(assert_equal(expected63, actual63, 1e-5)); + + Matrix expected63Dynamic = Matrix::Zero(6, 1); + expected63Dynamic(2, 0) = 2 * x3; + Matrix61 actual63Dynamic = + numericalDerivative63(f6, x1, x2, x3, x4, x5, x6); + + EXPECT(assert_equal(expected63Dynamic, actual63Dynamic, 1e-5)); } /* ************************************************************************* */ @@ -189,6 +219,14 @@ TEST(testNumericalDerivative, numeriDerivative64) { double, double, double>(f6, x1, x2, x3, x4, x5, x6); EXPECT(assert_equal(expected64, actual64, 1e-5)); + + Matrix expected64Dynamic = Matrix::Zero(6, 1); + expected64Dynamic(3, 0) = 3 * x4 * x4; + Matrix61 actual64Dynamic = + numericalDerivative64(f6, x1, x2, x3, x4, x5, x6); + + EXPECT(assert_equal(expected64Dynamic, actual64Dynamic, 1e-5)); } /* ************************************************************************* */ @@ -201,6 +239,14 @@ TEST(testNumericalDerivative, numeriDerivative65) { double, double, double>(f6, x1, x2, x3, x4, x5, x6); EXPECT(assert_equal(expected65, actual65, 1e-5)); + + Matrix expected65Dynamic = Matrix::Zero(6, 1); + expected65Dynamic(4, 0) = 0.5 / sqrt(x5); + Matrix61 actual65Dynamic = + numericalDerivative65(f6, x1, x2, x3, x4, x5, x6); + + EXPECT(assert_equal(expected65Dynamic, actual65Dynamic, 1e-5)); } /* ************************************************************************* */ @@ -213,6 +259,14 @@ TEST(testNumericalDerivative, numeriDerivative66) { double, double, double>(f6, x1, x2, x3, x4, x5, x6); EXPECT(assert_equal(expected66, actual66, 1e-5)); + + Matrix expected66Dynamic = Matrix::Zero(6, 1); + expected66Dynamic(5, 0) = cos(x6) + sin(x6); + Matrix61 actual66Dynamic = + numericalDerivative66(f6, x1, x2, x3, x4, x5, x6); + + EXPECT(assert_equal(expected66Dynamic, actual66Dynamic, 1e-5)); } /* ************************************************************************* */ diff --git a/gtsam/base/timing.cpp b/gtsam/base/timing.cpp index b33f06045..4b41ea937 100644 --- a/gtsam/base/timing.cpp +++ b/gtsam/base/timing.cpp @@ -89,7 +89,7 @@ void TimingOutline::print(const std::string& outline) const { childOrder[child.second->myOrder_] = child.second; } // Print children - for(const ChildOrder::value_type order_child: childOrder) { + for(const ChildOrder::value_type& order_child: childOrder) { std::string childOutline(outline); childOutline += "| "; order_child.second->print(childOutline); diff --git a/gtsam/base/treeTraversal/parallelTraversalTasks.h b/gtsam/base/treeTraversal/parallelTraversalTasks.h index b52d44e2a..87d5b0d4c 100644 --- a/gtsam/base/treeTraversal/parallelTraversalTasks.h +++ b/gtsam/base/treeTraversal/parallelTraversalTasks.h @@ -57,7 +57,7 @@ namespace gtsam { makeNewTasks(makeNewTasks), isPostOrderPhase(false) {} - tbb::task* execute() + tbb::task* execute() override { if(isPostOrderPhase) { @@ -144,7 +144,7 @@ namespace gtsam { roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost), problemSizeThreshold(problemSizeThreshold) {} - tbb::task* execute() + tbb::task* execute() override { typedef PreOrderTask PreOrderTask; // Create data and tasks for our children diff --git a/gtsam/base/types.h b/gtsam/base/types.h index 2fa6eebb6..aaada3cee 100644 --- a/gtsam/base/types.h +++ b/gtsam/base/types.h @@ -28,6 +28,7 @@ #include #include +#include #ifdef GTSAM_USE_TBB #include @@ -54,7 +55,7 @@ namespace gtsam { /// Function to demangle type name of variable, e.g. demangle(typeid(x).name()) - std::string demangle(const char* name); + std::string GTSAM_EXPORT demangle(const char* name); /// Integer nonlinear key type typedef std::uint64_t Key; @@ -230,3 +231,50 @@ namespace std { #ifdef ERROR #undef ERROR #endif + +namespace gtsam { + + /// Convenience void_t as we assume C++11, it will not conflict the std one in C++17 as this is in `gtsam::` + template using void_t = void; + + /** + * A SFINAE trait to mark classes that need special alignment. + * + * This is required to make boost::make_shared and etc respect alignment, which is essential for the Python + * wrappers to work properly. + * + * Explanation + * ============= + * When a GTSAM type is not declared with the type alias `_eigen_aligned_allocator_trait = void`, the first template + * will be taken so `needs_eigen_aligned_allocator` will be resolved to `std::false_type`. + * + * Otherwise, it will resolve to the second template, which will be resolved to `std::true_type`. + * + * Please refer to `gtsam/base/make_shared.h` for an example. + */ + template> + struct needs_eigen_aligned_allocator : std::false_type { + }; + template + struct needs_eigen_aligned_allocator> : std::true_type { + }; + +} + +/** + * This marks a GTSAM object to require alignment. With this macro an object will automatically be allocated in aligned + * memory when one uses `gtsam::make_shared`. It reduces future misalignment problems that is hard to debug. + * See https://eigen.tuxfamily.org/dox/group__DenseMatrixManipulation__Alignement.html for detailed explanation. + */ +#define GTSAM_MAKE_ALIGNED_OPERATOR_NEW \ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW \ + using _eigen_aligned_allocator_trait = void; + +/** + * This marks a GTSAM object to require alignment. With this macro an object will automatically be allocated in aligned + * memory when one uses `gtsam::make_shared`. It reduces future misalignment problems that is hard to debug. + * See https://eigen.tuxfamily.org/dox/group__DenseMatrixManipulation__Alignement.html for detailed explanation. + */ +#define GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \ + using _eigen_aligned_allocator_trait = void; diff --git a/gtsam/config.h.in b/gtsam/config.h.in index b480996ec..9d1bd4ebd 100644 --- a/gtsam/config.h.in +++ b/gtsam/config.h.in @@ -70,10 +70,7 @@ #cmakedefine GTSAM_THROW_CHEIRALITY_EXCEPTION // Make sure dependent projects that want it can see deprecated functions -#cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V4 - -// Publish flag about Eigen typedef -#cmakedefine GTSAM_TYPEDEF_POINTS_TO_VECTORS +#cmakedefine GTSAM_ALLOW_DEPRECATED_SINCE_V41 // Support Metis-based nested dissection #cmakedefine GTSAM_SUPPORT_NESTED_DISSECTION diff --git a/gtsam/discrete/DecisionTree-inl.h b/gtsam/discrete/DecisionTree-inl.h index 2efd069cc..dd10500e6 100644 --- a/gtsam/discrete/DecisionTree-inl.h +++ b/gtsam/discrete/DecisionTree-inl.h @@ -66,42 +66,42 @@ namespace gtsam { } /// Leaf-Leaf equality - bool sameLeaf(const Leaf& q) const { + bool sameLeaf(const Leaf& q) const override { return constant_ == q.constant_; } /// polymorphic equality: is q is a leaf, could be - bool sameLeaf(const Node& q) const { + bool sameLeaf(const Node& q) const override { return (q.isLeaf() && q.sameLeaf(*this)); } /** equality up to tolerance */ - bool equals(const Node& q, double tol) const { + bool equals(const Node& q, double tol) const override { const Leaf* other = dynamic_cast (&q); if (!other) return false; return std::abs(double(this->constant_ - other->constant_)) < tol; } /** print */ - void print(const std::string& s) const { + void print(const std::string& s) const override { bool showZero = true; if (showZero || constant_) std::cout << s << " Leaf " << constant_ << std::endl; } /** to graphviz file */ - void dot(std::ostream& os, bool showZero) const { + void dot(std::ostream& os, bool showZero) const override { if (showZero || constant_) os << "\"" << this->id() << "\" [label=\"" << boost::format("%4.2g") % constant_ << "\", shape=box, rank=sink, height=0.35, fixedsize=true]\n"; // width=0.55, } /** evaluate */ - const Y& operator()(const Assignment& x) const { + const Y& operator()(const Assignment& x) const override { return constant_; } /** apply unary operator */ - NodePtr apply(const Unary& op) const { + NodePtr apply(const Unary& op) const override { NodePtr f(new Leaf(op(constant_))); return f; } @@ -111,27 +111,27 @@ namespace gtsam { // Simply calls apply on argument to call correct virtual method: // fL.apply_f_op_g(gL) -> gL.apply_g_op_fL(fL) (below) // fL.apply_f_op_g(gC) -> gC.apply_g_op_fL(fL) (Choice) - NodePtr apply_f_op_g(const Node& g, const Binary& op) const { + NodePtr apply_f_op_g(const Node& g, const Binary& op) const override { return g.apply_g_op_fL(*this, op); } // Applying binary operator to two leaves results in a leaf - NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const { + NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const override { NodePtr h(new Leaf(op(fL.constant_, constant_))); // fL op gL return h; } // If second argument is a Choice node, call it's apply with leaf as second - NodePtr apply_g_op_fC(const Choice& fC, const Binary& op) const { + NodePtr apply_g_op_fC(const Choice& fC, const Binary& op) const override { return fC.apply_fC_op_gL(*this, op); // operand order back to normal } /** choose a branch, create new memory ! */ - NodePtr choose(const L& label, size_t index) const { + NodePtr choose(const L& label, size_t index) const override { return NodePtr(new Leaf(constant())); } - bool isLeaf() const { return true; } + bool isLeaf() const override { return true; } }; // Leaf @@ -175,7 +175,7 @@ namespace gtsam { return f; } - bool isLeaf() const { return false; } + bool isLeaf() const override { return false; } /** Constructor, given choice label and mandatory expected branch count */ Choice(const L& label, size_t count) : @@ -236,7 +236,7 @@ namespace gtsam { } /** print (as a tree) */ - void print(const std::string& s) const { + void print(const std::string& s) const override { std::cout << s << " Choice("; // std::cout << this << ","; std::cout << label_ << ") " << std::endl; @@ -245,7 +245,7 @@ namespace gtsam { } /** output to graphviz (as a a graph) */ - void dot(std::ostream& os, bool showZero) const { + void dot(std::ostream& os, bool showZero) const override { os << "\"" << this->id() << "\" [shape=circle, label=\"" << label_ << "\"]\n"; for (size_t i = 0; i < branches_.size(); i++) { @@ -266,17 +266,17 @@ namespace gtsam { } /// Choice-Leaf equality: always false - bool sameLeaf(const Leaf& q) const { + bool sameLeaf(const Leaf& q) const override { return false; } /// polymorphic equality: if q is a leaf, could be... - bool sameLeaf(const Node& q) const { + bool sameLeaf(const Node& q) const override { return (q.isLeaf() && q.sameLeaf(*this)); } /** equality up to tolerance */ - bool equals(const Node& q, double tol) const { + bool equals(const Node& q, double tol) const override { const Choice* other = dynamic_cast (&q); if (!other) return false; if (this->label_ != other->label_) return false; @@ -288,7 +288,7 @@ namespace gtsam { } /** evaluate */ - const Y& operator()(const Assignment& x) const { + const Y& operator()(const Assignment& x) const override { #ifndef NDEBUG typename Assignment::const_iterator it = x.find(label_); if (it == x.end()) { @@ -314,7 +314,7 @@ namespace gtsam { } /** apply unary operator */ - NodePtr apply(const Unary& op) const { + NodePtr apply(const Unary& op) const override { boost::shared_ptr r(new Choice(label_, *this, op)); return Unique(r); } @@ -324,12 +324,12 @@ namespace gtsam { // Simply calls apply on argument to call correct virtual method: // fC.apply_f_op_g(gL) -> gL.apply_g_op_fC(fC) -> (Leaf) // fC.apply_f_op_g(gC) -> gC.apply_g_op_fC(fC) -> (below) - NodePtr apply_f_op_g(const Node& g, const Binary& op) const { + NodePtr apply_f_op_g(const Node& g, const Binary& op) const override { return g.apply_g_op_fC(*this, op); } // If second argument of binary op is Leaf node, recurse on branches - NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const { + NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const override { boost::shared_ptr h(new Choice(label(), nrChoices())); for(NodePtr branch: branches_) h->push_back(fL.apply_f_op_g(*branch, op)); @@ -337,7 +337,7 @@ namespace gtsam { } // If second argument of binary op is Choice, call constructor - NodePtr apply_g_op_fC(const Choice& fC, const Binary& op) const { + NodePtr apply_g_op_fC(const Choice& fC, const Binary& op) const override { boost::shared_ptr h(new Choice(fC, *this, op)); return Unique(h); } @@ -352,7 +352,7 @@ namespace gtsam { } /** choose a branch, recursively */ - NodePtr choose(const L& label, size_t index) const { + NodePtr choose(const L& label, size_t index) const override { if (label_ == label) return branches_[index]; // choose branch diff --git a/gtsam/discrete/DecisionTreeFactor.cpp b/gtsam/discrete/DecisionTreeFactor.cpp index f83349436..b7b9d7034 100644 --- a/gtsam/discrete/DecisionTreeFactor.cpp +++ b/gtsam/discrete/DecisionTreeFactor.cpp @@ -69,7 +69,7 @@ namespace gtsam { for(Key j: f.keys()) cs[j] = f.cardinality(j); // Convert map into keys DiscreteKeys keys; - for(const DiscreteKey& key: cs) + for(const std::pair& key: cs) keys.push_back(key); // apply operand ADT result = ADT::apply(f, op); diff --git a/gtsam/discrete/DecisionTreeFactor.h b/gtsam/discrete/DecisionTreeFactor.h index 54cce56be..d1696a281 100644 --- a/gtsam/discrete/DecisionTreeFactor.h +++ b/gtsam/discrete/DecisionTreeFactor.h @@ -69,23 +69,23 @@ namespace gtsam { /// @{ /// equality - bool equals(const DiscreteFactor& other, double tol = 1e-9) const; + bool equals(const DiscreteFactor& other, double tol = 1e-9) const override; // print - virtual void print(const std::string& s = "DecisionTreeFactor:\n", - const KeyFormatter& formatter = DefaultKeyFormatter) const; + void print(const std::string& s = "DecisionTreeFactor:\n", + const KeyFormatter& formatter = DefaultKeyFormatter) const override; /// @} /// @name Standard Interface /// @{ /// Value is just look up in AlgebraicDecisonTree - virtual double operator()(const Values& values) const { + double operator()(const Values& values) const override { return Potentials::operator()(values); } /// multiply two factors - DecisionTreeFactor operator*(const DecisionTreeFactor& f) const { + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override { return apply(f, ADT::Ring::mul); } @@ -95,7 +95,7 @@ namespace gtsam { } /// Convert into a decisiontree - virtual DecisionTreeFactor toDecisionTreeFactor() const { + DecisionTreeFactor toDecisionTreeFactor() const override { return *this; } diff --git a/gtsam/discrete/DiscreteBayesNet.h b/gtsam/discrete/DiscreteBayesNet.h index dcc336f89..237caf745 100644 --- a/gtsam/discrete/DiscreteBayesNet.h +++ b/gtsam/discrete/DiscreteBayesNet.h @@ -20,13 +20,14 @@ #include #include #include +#include #include #include namespace gtsam { /** A Bayes net made from linear-Discrete densities */ - class GTSAM_EXPORT DiscreteBayesNet: public FactorGraph + class GTSAM_EXPORT DiscreteBayesNet: public BayesNet { public: diff --git a/gtsam/discrete/DiscreteBayesTree.cpp b/gtsam/discrete/DiscreteBayesTree.cpp index bed50a470..990d10dbe 100644 --- a/gtsam/discrete/DiscreteBayesTree.cpp +++ b/gtsam/discrete/DiscreteBayesTree.cpp @@ -29,13 +29,32 @@ namespace gtsam { template class BayesTreeCliqueBase; template class BayesTree; + /* ************************************************************************* */ + double DiscreteBayesTreeClique::evaluate( + const DiscreteConditional::Values& values) const { + // evaluate all conditionals and multiply + double result = (*conditional_)(values); + for (const auto& child : children) { + result *= child->evaluate(values); + } + return result; + } /* ************************************************************************* */ - bool DiscreteBayesTree::equals(const This& other, double tol) const - { + bool DiscreteBayesTree::equals(const This& other, double tol) const { return Base::equals(other, tol); } + /* ************************************************************************* */ + double DiscreteBayesTree::evaluate( + const DiscreteConditional::Values& values) const { + double result = 1.0; + for (const auto& root : roots_) { + result *= root->evaluate(values); + } + return result; + } + } // \namespace gtsam diff --git a/gtsam/discrete/DiscreteBayesTree.h b/gtsam/discrete/DiscreteBayesTree.h index 0df6ab476..29da5817e 100644 --- a/gtsam/discrete/DiscreteBayesTree.h +++ b/gtsam/discrete/DiscreteBayesTree.h @@ -11,7 +11,8 @@ /** * @file DiscreteBayesTree.h - * @brief Discrete Bayes Tree, the result of eliminating a DiscreteJunctionTree + * @brief Discrete Bayes Tree, the result of eliminating a + * DiscreteJunctionTree * @brief DiscreteBayesTree * @author Frank Dellaert * @author Richard Roberts @@ -22,45 +23,63 @@ #include #include #include +#include #include +#include + namespace gtsam { - // Forward declarations - class DiscreteConditional; - class VectorValues; - - /* ************************************************************************* */ - /** A clique in a DiscreteBayesTree */ - class GTSAM_EXPORT DiscreteBayesTreeClique : - public BayesTreeCliqueBase - { - public: - typedef DiscreteBayesTreeClique This; - typedef BayesTreeCliqueBase Base; - typedef boost::shared_ptr shared_ptr; - typedef boost::weak_ptr weak_ptr; - DiscreteBayesTreeClique() {} - DiscreteBayesTreeClique(const boost::shared_ptr& conditional) : Base(conditional) {} - }; - - /* ************************************************************************* */ - /** A Bayes tree representing a Discrete density */ - class GTSAM_EXPORT DiscreteBayesTree : - public BayesTree - { - private: - typedef BayesTree Base; - - public: - typedef DiscreteBayesTree This; - typedef boost::shared_ptr shared_ptr; - - /** Default constructor, creates an empty Bayes tree */ - DiscreteBayesTree() {} - - /** Check equality */ - bool equals(const This& other, double tol = 1e-9) const; - }; - -} +// Forward declarations +class DiscreteConditional; +class VectorValues; + +/* ************************************************************************* */ +/** A clique in a DiscreteBayesTree */ +class GTSAM_EXPORT DiscreteBayesTreeClique + : public BayesTreeCliqueBase { + public: + typedef DiscreteBayesTreeClique This; + typedef BayesTreeCliqueBase + Base; + typedef boost::shared_ptr shared_ptr; + typedef boost::weak_ptr weak_ptr; + DiscreteBayesTreeClique() {} + virtual ~DiscreteBayesTreeClique() {} + DiscreteBayesTreeClique( + const boost::shared_ptr& conditional) + : Base(conditional) {} + + /// print index signature only + void printSignature( + const std::string& s = "Clique: ", + const KeyFormatter& formatter = DefaultKeyFormatter) const { + conditional_->printSignature(s, formatter); + } + + //** evaluate conditional probability of subtree for given Values */ + double evaluate(const DiscreteConditional::Values& values) const; +}; + +/* ************************************************************************* */ +/** A Bayes tree representing a Discrete density */ +class GTSAM_EXPORT DiscreteBayesTree + : public BayesTree { + private: + typedef BayesTree Base; + + public: + typedef DiscreteBayesTree This; + typedef boost::shared_ptr shared_ptr; + + /** Default constructor, creates an empty Bayes tree */ + DiscreteBayesTree() {} + + /** Check equality */ + bool equals(const This& other, double tol = 1e-9) const; + + //** evaluate probability for given Values */ + double evaluate(const DiscreteConditional::Values& values) const; +}; + +} // namespace gtsam diff --git a/gtsam/discrete/DiscreteConditional.cpp b/gtsam/discrete/DiscreteConditional.cpp index 2ab3054a8..ac7c58405 100644 --- a/gtsam/discrete/DiscreteConditional.cpp +++ b/gtsam/discrete/DiscreteConditional.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include using namespace std; @@ -61,16 +62,26 @@ DiscreteConditional::DiscreteConditional(const DecisionTreeFactor& joint, } /* ******************************************************************************** */ -DiscreteConditional::DiscreteConditional(const Signature& signature) : - BaseFactor(signature.discreteKeysParentsFirst(), signature.cpt()), BaseConditional( - 1) { -} +DiscreteConditional::DiscreteConditional(const Signature& signature) + : BaseFactor(signature.discreteKeys(), signature.cpt()), + BaseConditional(1) {} /* ******************************************************************************** */ -void DiscreteConditional::print(const std::string& s, - const KeyFormatter& formatter) const { - std::cout << s << std::endl; - Potentials::print(s); +void DiscreteConditional::print(const string& s, + const KeyFormatter& formatter) const { + cout << s << " P( "; + for (const_iterator it = beginFrontals(); it != endFrontals(); ++it) { + cout << formatter(*it) << " "; + } + if (nrParents()) { + cout << "| "; + for (const_iterator it = beginParents(); it != endParents(); ++it) { + cout << formatter(*it) << " "; + } + } + cout << ")"; + Potentials::print(""); + cout << endl; } /* ******************************************************************************** */ @@ -173,55 +184,28 @@ size_t DiscreteConditional::solve(const Values& parentsValues) const { /* ******************************************************************************** */ size_t DiscreteConditional::sample(const Values& parentsValues) const { - - static mt19937 rng(2); // random number generator - - bool debug = ISDEBUG("DiscreteConditional::sample"); + static mt19937 rng(2); // random number generator // Get the correct conditional density - ADT pFS = choose(parentsValues); // P(F|S=parentsValues) - if (debug) - GTSAM_PRINT(pFS); + ADT pFS = choose(parentsValues); // P(F|S=parentsValues) - // get cumulative distribution function (cdf) - // TODO, only works for one key now, seems horribly slow this way + // TODO(Duy): only works for one key now, seems horribly slow this way assert(nrFrontals() == 1); - Key j = (firstFrontalKey()); - size_t nj = cardinality(j); - vector cdf(nj); + Key key = firstFrontalKey(); + size_t nj = cardinality(key); + vector p(nj); Values frontals; - double sum = 0; for (size_t value = 0; value < nj; value++) { - frontals[j] = value; - double pValueS = pFS(frontals); // P(F=value|S=parentsValues) - sum += pValueS; // accumulate - if (debug) - cout << sum << " "; - if (pValueS == 1) { - if (debug) - cout << "--> " << value << endl; - return value; // shortcut exit + frontals[key] = value; + p[value] = pFS(frontals); // P(F=value|S=parentsValues) + if (p[value] == 1.0) { + return value; // shortcut exit } - cdf[value] = sum; } - - // inspired by http://www.boost.org/doc/libs/1_46_1/doc/html/boost_random/tutorial.html - uniform_real_distribution dist(0, cdf.back()); - size_t sampled = lower_bound(cdf.begin(), cdf.end(), dist(rng)) - cdf.begin(); - if (debug) - cout << "-> " << sampled << endl; - - return sampled; - - return 0; + std::discrete_distribution distribution(p.begin(), p.end()); + return distribution(rng); } -/* ******************************************************************************** */ -//void DiscreteConditional::permuteWithInverse( -// const Permutation& inversePermutation) { -// IndexConditionalOrdered::permuteWithInverse(inversePermutation); -// Potentials::permuteWithInverse(inversePermutation); -//} /* ******************************************************************************** */ }// namespace diff --git a/gtsam/discrete/DiscreteConditional.h b/gtsam/discrete/DiscreteConditional.h index 3da8d0a82..8299fab2c 100644 --- a/gtsam/discrete/DiscreteConditional.h +++ b/gtsam/discrete/DiscreteConditional.h @@ -24,6 +24,8 @@ #include #include +#include + namespace gtsam { /** @@ -83,17 +85,24 @@ class GTSAM_EXPORT DiscreteConditional: public DecisionTreeFactor, /// GTSAM-style print void print(const std::string& s = "Discrete Conditional: ", - const KeyFormatter& formatter = DefaultKeyFormatter) const; + const KeyFormatter& formatter = DefaultKeyFormatter) const override; /// GTSAM-style equals - bool equals(const DiscreteFactor& other, double tol = 1e-9) const; + bool equals(const DiscreteFactor& other, double tol = 1e-9) const override; /// @} /// @name Standard Interface /// @{ + /// print index signature only + void printSignature( + const std::string& s = "Discrete Conditional: ", + const KeyFormatter& formatter = DefaultKeyFormatter) const { + static_cast(this)->print(s, formatter); + } + /// Evaluate, just look up in AlgebraicDecisonTree - virtual double operator()(const Values& values) const { + double operator()(const Values& values) const override { return Potentials::operator()(values); } diff --git a/gtsam/discrete/Potentials.cpp b/gtsam/discrete/Potentials.cpp index c4cdbe0ef..331a76c13 100644 --- a/gtsam/discrete/Potentials.cpp +++ b/gtsam/discrete/Potentials.cpp @@ -15,50 +15,52 @@ * @author Frank Dellaert */ -#include #include +#include + #include +#include + using namespace std; namespace gtsam { - // explicit instantiation - template class DecisionTree ; - template class AlgebraicDecisionTree ; +// explicit instantiation +template class DecisionTree; +template class AlgebraicDecisionTree; - /* ************************************************************************* */ - double Potentials::safe_div(const double& a, const double& b) { - // cout << boost::format("%g / %g = %g\n") % a % b % ((a == 0) ? 0 : (a / b)); - // The use for safe_div is when we divide the product factor by the sum factor. - // If the product or sum is zero, we accord zero probability to the event. - return (a == 0 || b == 0) ? 0 : (a / b); - } - - /* ******************************************************************************** */ - Potentials::Potentials() : - ADT(1.0) { - } - - /* ******************************************************************************** */ - Potentials::Potentials(const DiscreteKeys& keys, const ADT& decisionTree) : - ADT(decisionTree), cardinalities_(keys.cardinalities()) { - } +/* ************************************************************************* */ +double Potentials::safe_div(const double& a, const double& b) { + // cout << boost::format("%g / %g = %g\n") % a % b % ((a == 0) ? 0 : (a / b)); + // The use for safe_div is when we divide the product factor by the sum + // factor. If the product or sum is zero, we accord zero probability to the + // event. + return (a == 0 || b == 0) ? 0 : (a / b); +} - /* ************************************************************************* */ - bool Potentials::equals(const Potentials& other, double tol) const { - return ADT::equals(other, tol); - } +/* ******************************************************************************** + */ +Potentials::Potentials() : ADT(1.0) {} - /* ************************************************************************* */ - void Potentials::print(const string& s, - const KeyFormatter& formatter) const { - cout << s << "\n Cardinalities: "; - for(const DiscreteKey& key: cardinalities_) - cout << formatter(key.first) << "=" << formatter(key.second) << " "; - cout << endl; - ADT::print(" "); - } +/* ******************************************************************************** + */ +Potentials::Potentials(const DiscreteKeys& keys, const ADT& decisionTree) + : ADT(decisionTree), cardinalities_(keys.cardinalities()) {} + +/* ************************************************************************* */ +bool Potentials::equals(const Potentials& other, double tol) const { + return ADT::equals(other, tol); +} + +/* ************************************************************************* */ +void Potentials::print(const string& s, const KeyFormatter& formatter) const { + cout << s << "\n Cardinalities: {"; + for (const std::pair& key : cardinalities_) + cout << formatter(key.first) << ":" << key.second << ", "; + cout << "}" << endl; + ADT::print(" "); +} // // /* ************************************************************************* */ // template @@ -95,4 +97,4 @@ namespace gtsam { /* ************************************************************************* */ -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/discrete/Signature.cpp b/gtsam/discrete/Signature.cpp index 89e763703..94b160a29 100644 --- a/gtsam/discrete/Signature.cpp +++ b/gtsam/discrete/Signature.cpp @@ -122,28 +122,30 @@ namespace gtsam { key_(key) { } - DiscreteKeys Signature::discreteKeysParentsFirst() const { + DiscreteKeys Signature::discreteKeys() const { DiscreteKeys keys; - for(const DiscreteKey& key: parents_) - keys.push_back(key); keys.push_back(key_); + for (const DiscreteKey& key : parents_) keys.push_back(key); return keys; } KeyVector Signature::indices() const { KeyVector js; js.push_back(key_.first); - for(const DiscreteKey& key: parents_) - js.push_back(key.first); + for (const DiscreteKey& key : parents_) js.push_back(key.first); return js; } vector Signature::cpt() const { vector cpt; if (table_) { - for(const Row& row: *table_) - for(const double& x: row) - cpt.push_back(x); + const size_t nrStates = table_->at(0).size(); + for (size_t j = 0; j < nrStates; j++) { + for (const Row& row : *table_) { + assert(row.size() == nrStates); + cpt.push_back(row[j]); + } + } } return cpt; } diff --git a/gtsam/discrete/Signature.h b/gtsam/discrete/Signature.h index 587cd6b30..6c59b5bff 100644 --- a/gtsam/discrete/Signature.h +++ b/gtsam/discrete/Signature.h @@ -86,8 +86,8 @@ namespace gtsam { return parents_; } - /** All keys, with variable key last */ - DiscreteKeys discreteKeysParentsFirst() const; + /** All keys, with variable key first */ + DiscreteKeys discreteKeys() const; /** All key indices, with variable key first */ KeyVector indices() const; diff --git a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp index 753af16d8..0261ef833 100644 --- a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp +++ b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp @@ -132,7 +132,7 @@ TEST(ADT, example3) /** Convert Signature into CPT */ ADT create(const Signature& signature) { - ADT p(signature.discreteKeysParentsFirst(), signature.cpt()); + ADT p(signature.discreteKeys(), signature.cpt()); static size_t count = 0; const DiscreteKey& key = signature.key(); string dotfile = (boost::format("CPT-%03d-%d") % ++count % key.first).str(); @@ -181,19 +181,20 @@ TEST(ADT, joint) dot(joint, "Asia-ASTLBEX"); joint = apply(joint, pD, &mul); dot(joint, "Asia-ASTLBEXD"); - EXPECT_LONGS_EQUAL(346, (long)muls); + EXPECT_LONGS_EQUAL(346, muls); gttoc_(asiaJoint); tictoc_getNode(asiaJointNode, asiaJoint); elapsed = asiaJointNode->secs() + asiaJointNode->wall(); tictoc_reset_(); printCounts("Asia joint"); + // Form P(A,S,T,L) = P(A) P(S) P(T|A) P(L|S) ADT pASTL = pA; pASTL = apply(pASTL, pS, &mul); pASTL = apply(pASTL, pT, &mul); pASTL = apply(pASTL, pL, &mul); - // test combine + // test combine to check that P(A) = \sum_{S,T,L} P(A,S,T,L) ADT fAa = pASTL.combine(L, &add_).combine(T, &add_).combine(S, &add_); EXPECT(assert_equal(pA, fAa)); ADT fAb = pASTL.combine(S, &add_).combine(T, &add_).combine(L, &add_); diff --git a/gtsam/discrete/tests/testDiscreteBayesNet.cpp b/gtsam/discrete/tests/testDiscreteBayesNet.cpp index 5ed662332..2b440e5a0 100644 --- a/gtsam/discrete/tests/testDiscreteBayesNet.cpp +++ b/gtsam/discrete/tests/testDiscreteBayesNet.cpp @@ -18,110 +18,135 @@ #include #include -#include +#include #include +#include +#include #include -#include + #include +#include using namespace boost::assign; #include +#include +#include using namespace std; using namespace gtsam; /* ************************************************************************* */ -TEST(DiscreteBayesNet, Asia) -{ +TEST(DiscreteBayesNet, bayesNet) { + DiscreteBayesNet bayesNet; + DiscreteKey Parent(0, 2), Child(1, 2); + + auto prior = boost::make_shared(Parent % "6/4"); + CHECK(assert_equal(Potentials::ADT({Parent}, "0.6 0.4"), + (Potentials::ADT)*prior)); + bayesNet.push_back(prior); + + auto conditional = + boost::make_shared(Child | Parent = "7/3 8/2"); + EXPECT_LONGS_EQUAL(1, *(conditional->beginFrontals())); + Potentials::ADT expected(Child & Parent, "0.7 0.8 0.3 0.2"); + CHECK(assert_equal(expected, (Potentials::ADT)*conditional)); + bayesNet.push_back(conditional); + + DiscreteFactorGraph fg(bayesNet); + LONGS_EQUAL(2, fg.back()->size()); + + // Check the marginals + const double expectedMarginal[2]{0.4, 0.6 * 0.3 + 0.4 * 0.2}; + DiscreteMarginals marginals(fg); + for (size_t j = 0; j < 2; j++) { + Vector FT = marginals.marginalProbabilities(DiscreteKey(j, 2)); + EXPECT_DOUBLES_EQUAL(expectedMarginal[j], FT[1], 1e-3); + EXPECT_DOUBLES_EQUAL(FT[0], 1.0 - FT[1], 1e-9); + } +} + +/* ************************************************************************* */ +TEST(DiscreteBayesNet, Asia) { DiscreteBayesNet asia; -// DiscreteKey A("Asia"), S("Smoking"), T("Tuberculosis"), L("LungCancer"), B( -// "Bronchitis"), E("Either"), X("XRay"), D("Dyspnoea"); - DiscreteKey A(0,2), S(4,2), T(3,2), L(6,2), B(7,2), E(5,2), X(2,2), D(1,2); + DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2), + Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2); - // TODO: make a version that doesn't use the parser - asia.add(A % "99/1"); - asia.add(S % "50/50"); + asia.add(Asia % "99/1"); + asia.add(Smoking % "50/50"); - asia.add(T | A = "99/1 95/5"); - asia.add(L | S = "99/1 90/10"); - asia.add(B | S = "70/30 40/60"); + asia.add(Tuberculosis | Asia = "99/1 95/5"); + asia.add(LungCancer | Smoking = "99/1 90/10"); + asia.add(Bronchitis | Smoking = "70/30 40/60"); - asia.add((E | T, L) = "F T T T"); + asia.add((Either | Tuberculosis, LungCancer) = "F T T T"); - asia.add(X | E = "95/5 2/98"); - // next lines are same as asia.add((D | E, B) = "9/1 2/8 3/7 1/9"); - DiscreteConditional::shared_ptr actual = - boost::make_shared((D | E, B) = "9/1 2/8 3/7 1/9"); - asia.push_back(actual); - // GTSAM_PRINT(asia); + asia.add(XRay | Either = "95/5 2/98"); + asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9"); // Convert to factor graph DiscreteFactorGraph fg(asia); -// GTSAM_PRINT(fg); - LONGS_EQUAL(3,fg.back()->size()); - Potentials::ADT expected(B & D & E, "0.9 0.3 0.1 0.7 0.2 0.1 0.8 0.9"); - CHECK(assert_equal(expected,(Potentials::ADT)*actual)); + LONGS_EQUAL(3, fg.back()->size()); + + // Check the marginals we know (of the parent-less nodes) + DiscreteMarginals marginals(fg); + Vector2 va(0.99, 0.01), vs(0.5, 0.5); + EXPECT(assert_equal(va, marginals.marginalProbabilities(Asia))); + EXPECT(assert_equal(vs, marginals.marginalProbabilities(Smoking))); // Create solver and eliminate Ordering ordering; - ordering += Key(0),Key(1),Key(2),Key(3),Key(4),Key(5),Key(6),Key(7); + ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7); DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering); -// GTSAM_PRINT(*chordal); - DiscreteConditional expected2(B % "11/9"); - CHECK(assert_equal(expected2,*chordal->back())); + DiscreteConditional expected2(Bronchitis % "11/9"); + EXPECT(assert_equal(expected2, *chordal->back())); // solve DiscreteFactor::sharedValues actualMPE = chordal->optimize(); DiscreteFactor::Values expectedMPE; - insert(expectedMPE)(A.first, 0)(D.first, 0)(X.first, 0)(T.first, 0)(S.first, - 0)(E.first, 0)(L.first, 0)(B.first, 0); + insert(expectedMPE)(Asia.first, 0)(Dyspnea.first, 0)(XRay.first, 0)( + Tuberculosis.first, 0)(Smoking.first, 0)(Either.first, 0)( + LungCancer.first, 0)(Bronchitis.first, 0); EXPECT(assert_equal(expectedMPE, *actualMPE)); - // add evidence, we were in Asia and we have Dispnoea - fg.add(A, "0 1"); - fg.add(D, "0 1"); -// fg.product().dot("fg"); + // add evidence, we were in Asia and we have dyspnea + fg.add(Asia, "0 1"); + fg.add(Dyspnea, "0 1"); // solve again, now with evidence DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering); -// GTSAM_PRINT(*chordal2); DiscreteFactor::sharedValues actualMPE2 = chordal2->optimize(); DiscreteFactor::Values expectedMPE2; - insert(expectedMPE2)(A.first, 1)(D.first, 1)(X.first, 0)(T.first, 0)(S.first, - 1)(E.first, 0)(L.first, 0)(B.first, 1); + insert(expectedMPE2)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 0)( + Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 0)( + LungCancer.first, 0)(Bronchitis.first, 1); EXPECT(assert_equal(expectedMPE2, *actualMPE2)); // now sample from it DiscreteFactor::Values expectedSample; SETDEBUG("DiscreteConditional::sample", false); - insert(expectedSample)(A.first, 1)(D.first, 1)(X.first, 1)(T.first, 0)( - S.first, 1)(E.first, 1)(L.first, 1)(B.first, 0); + insert(expectedSample)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 1)( + Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 1)( + LungCancer.first, 1)(Bronchitis.first, 0); DiscreteFactor::sharedValues actualSample = chordal2->sample(); EXPECT(assert_equal(expectedSample, *actualSample)); } /* ************************************************************************* */ -TEST_UNSAFE(DiscreteBayesNet, Sugar) -{ - DiscreteKey T(0,2), L(1,2), E(2,2), D(3,2), C(8,3), S(7,2); +TEST_UNSAFE(DiscreteBayesNet, Sugar) { + DiscreteKey T(0, 2), L(1, 2), E(2, 2), C(8, 3), S(7, 2); DiscreteBayesNet bn; - // test some mistakes - // add(bn, D); - // add(bn, D | E); - // add(bn, D | E = "blah"); - // try logic bn.add((E | T, L) = "OR"); bn.add((E | T, L) = "AND"); - // // try multivalued - bn.add(C % "1/1/2"); - bn.add(C | S = "1/1/2 5/2/3"); + // try multivalued + bn.add(C % "1/1/2"); + bn.add(C | S = "1/1/2 5/2/3"); } /* ************************************************************************* */ @@ -130,4 +155,3 @@ int main() { return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ - diff --git a/gtsam/discrete/tests/testDiscreteBayesTree.cpp b/gtsam/discrete/tests/testDiscreteBayesTree.cpp index 93126f642..ecf485036 100644 --- a/gtsam/discrete/tests/testDiscreteBayesTree.cpp +++ b/gtsam/discrete/tests/testDiscreteBayesTree.cpp @@ -1,261 +1,228 @@ -///* ---------------------------------------------------------------------------- -// -// * GTSAM Copyright 2010, Georgia Tech Research Corporation, -// * Atlanta, Georgia 30332-0415 -// * All Rights Reserved -// * Authors: Frank Dellaert, et al. (see THANKS for the full author list) -// -// * See LICENSE for the license information -// -// * -------------------------------------------------------------------------- */ -// -///* -// * @file testDiscreteBayesTree.cpp -// * @date sept 15, 2012 -// * @author Frank Dellaert -// */ -// -//#include -//#include -//#include -// -//#include -//using namespace boost::assign; -// +/* ---------------------------------------------------------------------------- + +* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, +* Atlanta, Georgia 30332-0415 +* All Rights Reserved +* Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +* See LICENSE for the license information + +* -------------------------------------------------------------------------- */ + +/* + * @file testDiscreteBayesTree.cpp + * @date sept 15, 2012 + * @author Frank Dellaert + */ + +#include +#include +#include +#include +#include + +#include +using namespace boost::assign; + #include -// -//using namespace std; -//using namespace gtsam; -// -//static bool debug = false; -// -///** -// * Custom clique class to debug shortcuts -// */ -////class Clique: public BayesTreeCliqueBaseOrdered { -//// -////protected: -//// -////public: -//// -//// typedef BayesTreeCliqueBaseOrdered Base; -//// typedef boost::shared_ptr shared_ptr; -//// -//// // Constructors -//// Clique() { -//// } -//// Clique(const DiscreteConditional::shared_ptr& conditional) : -//// Base(conditional) { -//// } -//// Clique( -//// const std::pair& result) : -//// Base(result) { -//// } -//// -//// /// print index signature only -//// void printSignature(const std::string& s = "Clique: ", -//// const KeyFormatter& indexFormatter = DefaultKeyFormatter) const { -//// ((IndexConditionalOrdered::shared_ptr) conditional_)->print(s, indexFormatter); -//// } -//// -//// /// evaluate value of sub-tree -//// double evaluate(const DiscreteConditional::Values & values) { -//// double result = (*(this->conditional_))(values); -//// // evaluate all children and multiply into result -//// for(boost::shared_ptr c: children_) -//// result *= c->evaluate(values); -//// return result; -//// } -//// -////}; -// -////typedef BayesTreeOrdered DiscreteBayesTree; -//// -/////* ************************************************************************* */ -////double evaluate(const DiscreteBayesTree& tree, -//// const DiscreteConditional::Values & values) { -//// return tree.root()->evaluate(values); -////} -// -///* ************************************************************************* */ -// -//TEST_UNSAFE( DiscreteBayesTree, thinTree ) { -// -// const int nrNodes = 15; -// const size_t nrStates = 2; -// -// // define variables -// vector key; -// for (int i = 0; i < nrNodes; i++) { -// DiscreteKey key_i(i, nrStates); -// key.push_back(key_i); -// } -// -// // create a thin-tree Bayesnet, a la Jean-Guillaume -// DiscreteBayesNet bayesNet; -// bayesNet.add(key[14] % "1/3"); -// -// bayesNet.add(key[13] | key[14] = "1/3 3/1"); -// bayesNet.add(key[12] | key[14] = "3/1 3/1"); -// -// bayesNet.add((key[11] | key[13], key[14]) = "1/4 2/3 3/2 4/1"); -// bayesNet.add((key[10] | key[13], key[14]) = "1/4 3/2 2/3 4/1"); -// bayesNet.add((key[9] | key[12], key[14]) = "4/1 2/3 F 1/4"); -// bayesNet.add((key[8] | key[12], key[14]) = "T 1/4 3/2 4/1"); -// -// bayesNet.add((key[7] | key[11], key[13]) = "1/4 2/3 3/2 4/1"); -// bayesNet.add((key[6] | key[11], key[13]) = "1/4 3/2 2/3 4/1"); -// bayesNet.add((key[5] | key[10], key[13]) = "4/1 2/3 3/2 1/4"); -// bayesNet.add((key[4] | key[10], key[13]) = "2/3 1/4 3/2 4/1"); -// -// bayesNet.add((key[3] | key[9], key[12]) = "1/4 2/3 3/2 4/1"); -// bayesNet.add((key[2] | key[9], key[12]) = "1/4 8/2 2/3 4/1"); -// bayesNet.add((key[1] | key[8], key[12]) = "4/1 2/3 3/2 1/4"); -// bayesNet.add((key[0] | key[8], key[12]) = "2/3 1/4 3/2 4/1"); -// -//// if (debug) { -//// GTSAM_PRINT(bayesNet); -//// bayesNet.saveGraph("/tmp/discreteBayesNet.dot"); -//// } -// -// // create a BayesTree out of a Bayes net -// DiscreteBayesTree bayesTree(bayesNet); -// if (debug) { -// GTSAM_PRINT(bayesTree); -// bayesTree.saveGraph("/tmp/discreteBayesTree.dot"); -// } -// -// // Check whether BN and BT give the same answer on all configurations -// // Also calculate all some marginals -// Vector marginals = zero(15); -// double joint_12_14 = 0, joint_9_12_14 = 0, joint_8_12_14 = 0, joint_8_12 = 0, -// joint82 = 0, joint12 = 0, joint24 = 0, joint45 = 0, joint46 = 0, -// joint_4_11 = 0; -// vector allPosbValues = cartesianProduct( -// key[0] & key[1] & key[2] & key[3] & key[4] & key[5] & key[6] & key[7] -// & key[8] & key[9] & key[10] & key[11] & key[12] & key[13] & key[14]); -// for (size_t i = 0; i < allPosbValues.size(); ++i) { -// DiscreteFactor::Values x = allPosbValues[i]; -// double expected = evaluate(bayesNet, x); -// double actual = evaluate(bayesTree, x); -// DOUBLES_EQUAL(expected, actual, 1e-9); -// // collect marginals -// for (size_t i = 0; i < 15; i++) -// if (x[i]) -// marginals[i] += actual; -// // calculate shortcut 8 and 0 -// if (x[12] && x[14]) -// joint_12_14 += actual; -// if (x[9] && x[12] & x[14]) -// joint_9_12_14 += actual; -// if (x[8] && x[12] & x[14]) -// joint_8_12_14 += actual; -// if (x[8] && x[12]) -// joint_8_12 += actual; -// if (x[8] && x[2]) -// joint82 += actual; -// if (x[1] && x[2]) -// joint12 += actual; -// if (x[2] && x[4]) -// joint24 += actual; -// if (x[4] && x[5]) -// joint45 += actual; -// if (x[4] && x[6]) -// joint46 += actual; -// if (x[4] && x[11]) -// joint_4_11 += actual; -// } -// DiscreteFactor::Values all1 = allPosbValues.back(); -// -// Clique::shared_ptr R = bayesTree.root(); -// -// // check separator marginal P(S0) -// Clique::shared_ptr c = bayesTree[0]; -// DiscreteFactorGraph separatorMarginal0 = c->separatorMarginal(R, -// EliminateDiscrete); -// EXPECT_DOUBLES_EQUAL(joint_8_12, separatorMarginal0(all1), 1e-9); -// -// // check separator marginal P(S9), should be P(14) -// c = bayesTree[9]; -// DiscreteFactorGraph separatorMarginal9 = c->separatorMarginal(R, -// EliminateDiscrete); -// EXPECT_DOUBLES_EQUAL(marginals[14], separatorMarginal9(all1), 1e-9); -// -// // check separator marginal of root, should be empty -// c = bayesTree[11]; -// DiscreteFactorGraph separatorMarginal11 = c->separatorMarginal(R, -// EliminateDiscrete); -// EXPECT_LONGS_EQUAL(0, separatorMarginal11.size()); -// -// // check shortcut P(S9||R) to root -// c = bayesTree[9]; -// DiscreteBayesNet shortcut = c->shortcut(R, EliminateDiscrete); -// EXPECT_LONGS_EQUAL(0, shortcut.size()); -// -// // check shortcut P(S8||R) to root -// c = bayesTree[8]; -// shortcut = c->shortcut(R, EliminateDiscrete); -// EXPECT_DOUBLES_EQUAL(joint_12_14/marginals[14], evaluate(shortcut,all1), -// 1e-9); -// -// // check shortcut P(S2||R) to root -// c = bayesTree[2]; -// shortcut = c->shortcut(R, EliminateDiscrete); -// EXPECT_DOUBLES_EQUAL(joint_9_12_14/marginals[14], evaluate(shortcut,all1), -// 1e-9); -// -// // check shortcut P(S0||R) to root -// c = bayesTree[0]; -// shortcut = c->shortcut(R, EliminateDiscrete); -// EXPECT_DOUBLES_EQUAL(joint_8_12_14/marginals[14], evaluate(shortcut,all1), -// 1e-9); -// -// // calculate all shortcuts to root -// DiscreteBayesTree::Nodes cliques = bayesTree.nodes(); -// for(Clique::shared_ptr c: cliques) { -// DiscreteBayesNet shortcut = c->shortcut(R, EliminateDiscrete); -// if (debug) { -// c->printSignature(); -// shortcut.print("shortcut:"); -// } -// } -// -// // Check all marginals -// DiscreteFactor::shared_ptr marginalFactor; -// for (size_t i = 0; i < 15; i++) { -// marginalFactor = bayesTree.marginalFactor(i, EliminateDiscrete); -// double actual = (*marginalFactor)(all1); -// EXPECT_DOUBLES_EQUAL(marginals[i], actual, 1e-9); -// } -// -// DiscreteBayesNet::shared_ptr actualJoint; -// -// // Check joint P(8,2) TODO: not disjoint ! -//// actualJoint = bayesTree.jointBayesNet(8, 2, EliminateDiscrete); -//// EXPECT_DOUBLES_EQUAL(joint82, evaluate(*actualJoint,all1), 1e-9); -// -// // Check joint P(1,2) TODO: not disjoint ! -//// actualJoint = bayesTree.jointBayesNet(1, 2, EliminateDiscrete); -//// EXPECT_DOUBLES_EQUAL(joint12, evaluate(*actualJoint,all1), 1e-9); -// -// // Check joint P(2,4) -// actualJoint = bayesTree.jointBayesNet(2, 4, EliminateDiscrete); -// EXPECT_DOUBLES_EQUAL(joint24, evaluate(*actualJoint,all1), 1e-9); -// -// // Check joint P(4,5) TODO: not disjoint ! -//// actualJoint = bayesTree.jointBayesNet(4, 5, EliminateDiscrete); -//// EXPECT_DOUBLES_EQUAL(joint46, evaluate(*actualJoint,all1), 1e-9); -// -// // Check joint P(4,6) TODO: not disjoint ! -//// actualJoint = bayesTree.jointBayesNet(4, 6, EliminateDiscrete); -//// EXPECT_DOUBLES_EQUAL(joint46, evaluate(*actualJoint,all1), 1e-9); -// -// // Check joint P(4,11) -// actualJoint = bayesTree.jointBayesNet(4, 11, EliminateDiscrete); -// EXPECT_DOUBLES_EQUAL(joint_4_11, evaluate(*actualJoint,all1), 1e-9); -// -//} + +#include + +using namespace std; +using namespace gtsam; + +static bool debug = false; + +/* ************************************************************************* */ + +TEST_UNSAFE(DiscreteBayesTree, ThinTree) { + const int nrNodes = 15; + const size_t nrStates = 2; + + // define variables + vector key; + for (int i = 0; i < nrNodes; i++) { + DiscreteKey key_i(i, nrStates); + key.push_back(key_i); + } + + // create a thin-tree Bayesnet, a la Jean-Guillaume + DiscreteBayesNet bayesNet; + bayesNet.add(key[14] % "1/3"); + + bayesNet.add(key[13] | key[14] = "1/3 3/1"); + bayesNet.add(key[12] | key[14] = "3/1 3/1"); + + bayesNet.add((key[11] | key[13], key[14]) = "1/4 2/3 3/2 4/1"); + bayesNet.add((key[10] | key[13], key[14]) = "1/4 3/2 2/3 4/1"); + bayesNet.add((key[9] | key[12], key[14]) = "4/1 2/3 F 1/4"); + bayesNet.add((key[8] | key[12], key[14]) = "T 1/4 3/2 4/1"); + + bayesNet.add((key[7] | key[11], key[13]) = "1/4 2/3 3/2 4/1"); + bayesNet.add((key[6] | key[11], key[13]) = "1/4 3/2 2/3 4/1"); + bayesNet.add((key[5] | key[10], key[13]) = "4/1 2/3 3/2 1/4"); + bayesNet.add((key[4] | key[10], key[13]) = "2/3 1/4 3/2 4/1"); + + bayesNet.add((key[3] | key[9], key[12]) = "1/4 2/3 3/2 4/1"); + bayesNet.add((key[2] | key[9], key[12]) = "1/4 8/2 2/3 4/1"); + bayesNet.add((key[1] | key[8], key[12]) = "4/1 2/3 3/2 1/4"); + bayesNet.add((key[0] | key[8], key[12]) = "2/3 1/4 3/2 4/1"); + + if (debug) { + GTSAM_PRINT(bayesNet); + bayesNet.saveGraph("/tmp/discreteBayesNet.dot"); + } + + // create a BayesTree out of a Bayes net + auto bayesTree = DiscreteFactorGraph(bayesNet).eliminateMultifrontal(); + if (debug) { + GTSAM_PRINT(*bayesTree); + bayesTree->saveGraph("/tmp/discreteBayesTree.dot"); + } + + // Check frontals and parents + for (size_t i : {13, 14, 9, 3, 2, 8, 1, 0, 10, 5, 4}) { + auto clique_i = (*bayesTree)[i]; + EXPECT_LONGS_EQUAL(i, *(clique_i->conditional_->beginFrontals())); + } + + auto R = bayesTree->roots().front(); + + // Check whether BN and BT give the same answer on all configurations + vector allPosbValues = cartesianProduct( + key[0] & key[1] & key[2] & key[3] & key[4] & key[5] & key[6] & key[7] & + key[8] & key[9] & key[10] & key[11] & key[12] & key[13] & key[14]); + for (size_t i = 0; i < allPosbValues.size(); ++i) { + DiscreteFactor::Values x = allPosbValues[i]; + double expected = bayesNet.evaluate(x); + double actual = bayesTree->evaluate(x); + DOUBLES_EQUAL(expected, actual, 1e-9); + } + + // Calculate all some marginals for Values==all1 + Vector marginals = Vector::Zero(15); + double joint_12_14 = 0, joint_9_12_14 = 0, joint_8_12_14 = 0, joint_8_12 = 0, + joint82 = 0, joint12 = 0, joint24 = 0, joint45 = 0, joint46 = 0, + joint_4_11 = 0, joint_11_13 = 0, joint_11_13_14 = 0, + joint_11_12_13_14 = 0, joint_9_11_12_13 = 0, joint_8_11_12_13 = 0; + for (size_t i = 0; i < allPosbValues.size(); ++i) { + DiscreteFactor::Values x = allPosbValues[i]; + double px = bayesTree->evaluate(x); + for (size_t i = 0; i < 15; i++) + if (x[i]) marginals[i] += px; + if (x[12] && x[14]) { + joint_12_14 += px; + if (x[9]) joint_9_12_14 += px; + if (x[8]) joint_8_12_14 += px; + } + if (x[8] && x[12]) joint_8_12 += px; + if (x[2]) { + if (x[8]) joint82 += px; + if (x[1]) joint12 += px; + } + if (x[4]) { + if (x[2]) joint24 += px; + if (x[5]) joint45 += px; + if (x[6]) joint46 += px; + if (x[11]) joint_4_11 += px; + } + if (x[11] && x[13]) { + joint_11_13 += px; + if (x[8] && x[12]) joint_8_11_12_13 += px; + if (x[9] && x[12]) joint_9_11_12_13 += px; + if (x[14]) { + joint_11_13_14 += px; + if (x[12]) { + joint_11_12_13_14 += px; + } + } + } + } + DiscreteFactor::Values all1 = allPosbValues.back(); + + // check separator marginal P(S0) + auto clique = (*bayesTree)[0]; + DiscreteFactorGraph separatorMarginal0 = + clique->separatorMarginal(EliminateDiscrete); + DOUBLES_EQUAL(joint_8_12, separatorMarginal0(all1), 1e-9); + + // check separator marginal P(S9), should be P(14) + clique = (*bayesTree)[9]; + DiscreteFactorGraph separatorMarginal9 = + clique->separatorMarginal(EliminateDiscrete); + DOUBLES_EQUAL(marginals[14], separatorMarginal9(all1), 1e-9); + + // check separator marginal of root, should be empty + clique = (*bayesTree)[11]; + DiscreteFactorGraph separatorMarginal11 = + clique->separatorMarginal(EliminateDiscrete); + LONGS_EQUAL(0, separatorMarginal11.size()); + + // check shortcut P(S9||R) to root + clique = (*bayesTree)[9]; + DiscreteBayesNet shortcut = clique->shortcut(R, EliminateDiscrete); + LONGS_EQUAL(1, shortcut.size()); + DOUBLES_EQUAL(joint_11_13_14 / joint_11_13, shortcut.evaluate(all1), 1e-9); + + // check shortcut P(S8||R) to root + clique = (*bayesTree)[8]; + shortcut = clique->shortcut(R, EliminateDiscrete); + DOUBLES_EQUAL(joint_11_12_13_14 / joint_11_13, shortcut.evaluate(all1), 1e-9); + + // check shortcut P(S2||R) to root + clique = (*bayesTree)[2]; + shortcut = clique->shortcut(R, EliminateDiscrete); + DOUBLES_EQUAL(joint_9_11_12_13 / joint_11_13, shortcut.evaluate(all1), 1e-9); + + // check shortcut P(S0||R) to root + clique = (*bayesTree)[0]; + shortcut = clique->shortcut(R, EliminateDiscrete); + DOUBLES_EQUAL(joint_8_11_12_13 / joint_11_13, shortcut.evaluate(all1), 1e-9); + + // calculate all shortcuts to root + DiscreteBayesTree::Nodes cliques = bayesTree->nodes(); + for (auto clique : cliques) { + DiscreteBayesNet shortcut = clique.second->shortcut(R, EliminateDiscrete); + if (debug) { + clique.second->conditional_->printSignature(); + shortcut.print("shortcut:"); + } + } + + // Check all marginals + DiscreteFactor::shared_ptr marginalFactor; + for (size_t i = 0; i < 15; i++) { + marginalFactor = bayesTree->marginalFactor(i, EliminateDiscrete); + double actual = (*marginalFactor)(all1); + DOUBLES_EQUAL(marginals[i], actual, 1e-9); + } + + DiscreteBayesNet::shared_ptr actualJoint; + + // Check joint P(8, 2) + actualJoint = bayesTree->jointBayesNet(8, 2, EliminateDiscrete); + DOUBLES_EQUAL(joint82, actualJoint->evaluate(all1), 1e-9); + + // Check joint P(1, 2) + actualJoint = bayesTree->jointBayesNet(1, 2, EliminateDiscrete); + DOUBLES_EQUAL(joint12, actualJoint->evaluate(all1), 1e-9); + + // Check joint P(2, 4) + actualJoint = bayesTree->jointBayesNet(2, 4, EliminateDiscrete); + DOUBLES_EQUAL(joint24, actualJoint->evaluate(all1), 1e-9); + + // Check joint P(4, 5) + actualJoint = bayesTree->jointBayesNet(4, 5, EliminateDiscrete); + DOUBLES_EQUAL(joint45, actualJoint->evaluate(all1), 1e-9); + + // Check joint P(4, 6) + actualJoint = bayesTree->jointBayesNet(4, 6, EliminateDiscrete); + DOUBLES_EQUAL(joint46, actualJoint->evaluate(all1), 1e-9); + + // Check joint P(4, 11) + actualJoint = bayesTree->jointBayesNet(4, 11, EliminateDiscrete); + DOUBLES_EQUAL(joint_4_11, actualJoint->evaluate(all1), 1e-9); +} /* ************************************************************************* */ int main() { @@ -263,4 +230,3 @@ int main() { return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ - diff --git a/gtsam/discrete/tests/testDiscreteBayesTree.pdf b/gtsam/discrete/tests/testDiscreteBayesTree.pdf new file mode 100644 index 000000000..e8167d455 Binary files /dev/null and b/gtsam/discrete/tests/testDiscreteBayesTree.pdf differ diff --git a/gtsam/discrete/tests/testDiscreteConditional.cpp b/gtsam/discrete/tests/testDiscreteConditional.cpp index 888bf76df..3ac3ffc9e 100644 --- a/gtsam/discrete/tests/testDiscreteConditional.cpp +++ b/gtsam/discrete/tests/testDiscreteConditional.cpp @@ -16,9 +16,9 @@ * @date Feb 14, 2011 */ -#include #include #include +#include using namespace boost::assign; #include @@ -36,6 +36,11 @@ TEST( DiscreteConditional, constructors) DiscreteConditional::shared_ptr expected1 = // boost::make_shared(X | Y = "1/1 2/3 1/4"); EXPECT(expected1); + EXPECT_LONGS_EQUAL(0, *(expected1->beginFrontals())); + EXPECT_LONGS_EQUAL(2, *(expected1->beginParents())); + EXPECT(expected1->endParents() == expected1->end()); + EXPECT(expected1->endFrontals() == expected1->beginParents()); + DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8"); DiscreteConditional actual1(1, f1); EXPECT(assert_equal(*expected1, actual1, 1e-9)); @@ -43,71 +48,68 @@ TEST( DiscreteConditional, constructors) DecisionTreeFactor f2(X & Y & Z, "0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75"); DiscreteConditional actual2(1, f2); - DecisionTreeFactor::shared_ptr actual2factor = actual2.toFactor(); -// EXPECT(assert_equal(f2, *actual2factor, 1e-9)); + EXPECT(assert_equal(f2 / *f2.sum(1), *actual2.toFactor(), 1e-9)); } /* ************************************************************************* */ -TEST( DiscreteConditional, constructors_alt_interface) -{ - DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering ! +TEST(DiscreteConditional, constructors_alt_interface) { + DiscreteKey X(0, 2), Y(2, 3), Z(1, 2); // watch ordering ! Signature::Table table; Signature::Row r1, r2, r3; - r1 += 1.0, 1.0; r2 += 2.0, 3.0; r3 += 1.0, 4.0; + r1 += 1.0, 1.0; + r2 += 2.0, 3.0; + r3 += 1.0, 4.0; table += r1, r2, r3; - DiscreteConditional::shared_ptr expected1 = // - boost::make_shared(X | Y = table); - EXPECT(expected1); + auto actual1 = boost::make_shared(X | Y = table); + EXPECT(actual1); DecisionTreeFactor f1(X & Y, "0.5 0.4 0.2 0.5 0.6 0.8"); - DiscreteConditional actual1(1, f1); - EXPECT(assert_equal(*expected1, actual1, 1e-9)); + DiscreteConditional expected1(1, f1); + EXPECT(assert_equal(expected1, *actual1, 1e-9)); - DecisionTreeFactor f2(X & Y & Z, - "0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75"); + DecisionTreeFactor f2( + X & Y & Z, "0.2 0.5 0.3 0.6 0.4 0.7 0.25 0.55 0.35 0.65 0.45 0.75"); DiscreteConditional actual2(1, f2); - DecisionTreeFactor::shared_ptr actual2factor = actual2.toFactor(); -// EXPECT(assert_equal(f2, *actual2factor, 1e-9)); + EXPECT(assert_equal(f2 / *f2.sum(1), *actual2.toFactor(), 1e-9)); } /* ************************************************************************* */ -TEST( DiscreteConditional, constructors2) -{ +TEST(DiscreteConditional, constructors2) { // Declare keys and ordering - DiscreteKey C(0,2), B(1,2); - DecisionTreeFactor expected(C & B, "0.8 0.75 0.2 0.25"); + DiscreteKey C(0, 2), B(1, 2); + DecisionTreeFactor actual(C & B, "0.8 0.75 0.2 0.25"); Signature signature((C | B) = "4/1 3/1"); - DiscreteConditional actual(signature); - DecisionTreeFactor::shared_ptr actualFactor = actual.toFactor(); - EXPECT(assert_equal(expected, *actualFactor)); + DiscreteConditional expected(signature); + DecisionTreeFactor::shared_ptr expectedFactor = expected.toFactor(); + EXPECT(assert_equal(*expectedFactor, actual)); } /* ************************************************************************* */ -TEST( DiscreteConditional, constructors3) -{ +TEST(DiscreteConditional, constructors3) { // Declare keys and ordering - DiscreteKey C(0,2), B(1,2), A(2,2); - DecisionTreeFactor expected(C & B & A, "0.8 0.5 0.5 0.2 0.2 0.5 0.5 0.8"); + DiscreteKey C(0, 2), B(1, 2), A(2, 2); + DecisionTreeFactor actual(C & B & A, "0.8 0.5 0.5 0.2 0.2 0.5 0.5 0.8"); Signature signature((C | B, A) = "4/1 1/1 1/1 1/4"); - DiscreteConditional actual(signature); - DecisionTreeFactor::shared_ptr actualFactor = actual.toFactor(); - EXPECT(assert_equal(expected, *actualFactor)); + DiscreteConditional expected(signature); + DecisionTreeFactor::shared_ptr expectedFactor = expected.toFactor(); + EXPECT(assert_equal(*expectedFactor, actual)); } /* ************************************************************************* */ -TEST( DiscreteConditional, Combine) { +TEST(DiscreteConditional, Combine) { DiscreteKey A(0, 2), B(1, 2); vector c; c.push_back(boost::make_shared(A | B = "1/2 2/1")); c.push_back(boost::make_shared(B % "1/2")); DecisionTreeFactor factor(A & B, "0.111111 0.444444 0.222222 0.222222"); - DiscreteConditional expected(2, factor); - DiscreteConditional::shared_ptr actual = DiscreteConditional::Combine( - c.begin(), c.end()); - EXPECT(assert_equal(expected, *actual,1e-5)); + DiscreteConditional actual(2, factor); + auto expected = DiscreteConditional::Combine(c.begin(), c.end()); + EXPECT(assert_equal(*expected, actual, 1e-5)); } /* ************************************************************************* */ -int main() { TestResult tr; return TestRegistry::runAllTests(tr); } +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} /* ************************************************************************* */ - diff --git a/gtsam/discrete/tests/testDiscreteFactorGraph.cpp b/gtsam/discrete/tests/testDiscreteFactorGraph.cpp index 0fbf44097..1defd5acf 100644 --- a/gtsam/discrete/tests/testDiscreteFactorGraph.cpp +++ b/gtsam/discrete/tests/testDiscreteFactorGraph.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include diff --git a/gtsam/discrete/tests/testDiscreteMarginals.cpp b/gtsam/discrete/tests/testDiscreteMarginals.cpp index 4e9f956b6..e1eb92af3 100644 --- a/gtsam/discrete/tests/testDiscreteMarginals.cpp +++ b/gtsam/discrete/tests/testDiscreteMarginals.cpp @@ -146,8 +146,7 @@ TEST_UNSAFE( DiscreteMarginals, truss ) { /* ************************************************************************* */ // Second truss example with non-trivial factors -TEST_UNSAFE( DiscreteMarginals, truss2 ) { - +TEST_UNSAFE(DiscreteMarginals, truss2) { const int nrNodes = 5; const size_t nrStates = 2; @@ -160,40 +159,39 @@ TEST_UNSAFE( DiscreteMarginals, truss2 ) { // create graph and add three truss potentials DiscreteFactorGraph graph; - graph.add(key[0] & key[2] & key[4],"1 2 3 4 5 6 7 8"); - graph.add(key[1] & key[3] & key[4],"1 2 3 4 5 6 7 8"); - graph.add(key[2] & key[3] & key[4],"1 2 3 4 5 6 7 8"); + graph.add(key[0] & key[2] & key[4], "1 2 3 4 5 6 7 8"); + graph.add(key[1] & key[3] & key[4], "1 2 3 4 5 6 7 8"); + graph.add(key[2] & key[3] & key[4], "1 2 3 4 5 6 7 8"); // Calculate the marginals by brute force - vector allPosbValues = cartesianProduct( - key[0] & key[1] & key[2] & key[3] & key[4]); + vector allPosbValues = + cartesianProduct(key[0] & key[1] & key[2] & key[3] & key[4]); Vector T = Z_5x1, F = Z_5x1; for (size_t i = 0; i < allPosbValues.size(); ++i) { DiscreteFactor::Values x = allPosbValues[i]; double px = graph(x); - for (size_t j=0;j<5;j++) - if (x[j]) T[j]+=px; else F[j]+=px; - // cout << x[0] << " " << x[1] << " "<< x[2] << " " << x[3] << " " << x[4] << " :\t" << px << endl; + for (size_t j = 0; j < 5; j++) + if (x[j]) + T[j] += px; + else + F[j] += px; } // Check all marginals given by a sequential solver and Marginals -// DiscreteSequentialSolver solver(graph); + // DiscreteSequentialSolver solver(graph); DiscreteMarginals marginals(graph); - for (size_t j=0;j<5;j++) { - double sum = T[j]+F[j]; - T[j]/=sum; - F[j]/=sum; - -// // solver -// Vector actualV = solver.marginalProbabilities(key[j]); -// EXPECT(assert_equal((Vector(2) << F[j], T[j]), actualV)); + for (size_t j = 0; j < 5; j++) { + double sum = T[j] + F[j]; + T[j] /= sum; + F[j] /= sum; // Marginals vector table; - table += F[j],T[j]; - DecisionTreeFactor expectedM(key[j],table); + table += F[j], T[j]; + DecisionTreeFactor expectedM(key[j], table); DiscreteFactor::shared_ptr actualM = marginals(j); - EXPECT(assert_equal(expectedM, *boost::dynamic_pointer_cast(actualM))); + EXPECT(assert_equal( + expectedM, *boost::dynamic_pointer_cast(actualM))); } } diff --git a/gtsam/discrete/tests/testSignature.cpp b/gtsam/discrete/tests/testSignature.cpp index de47a00f3..049c455f7 100644 --- a/gtsam/discrete/tests/testSignature.cpp +++ b/gtsam/discrete/tests/testSignature.cpp @@ -11,36 +11,43 @@ /** * @file testSignature - * @brief Tests focusing on the details of Signatures to evaluate boost compliance + * @brief Tests focusing on the details of Signatures to evaluate boost + * compliance * @author Alex Cunningham * @date Sept 19th 2011 */ -#include #include - #include #include +#include +#include + using namespace std; using namespace gtsam; using namespace boost::assign; -DiscreteKey X(0,2), Y(1,3), Z(2,2); +DiscreteKey X(0, 2), Y(1, 3), Z(2, 2); /* ************************************************************************* */ TEST(testSignature, simple_conditional) { Signature sig(X | Y = "1/1 2/3 1/4"); + Signature::Table table = *sig.table(); + vector row[3]{{0.5, 0.5}, {0.4, 0.6}, {0.2, 0.8}}; + CHECK(row[0] == table[0]); + CHECK(row[1] == table[1]); + CHECK(row[2] == table[2]); DiscreteKey actKey = sig.key(); - LONGS_EQUAL((long)X.first, (long)actKey.first); + LONGS_EQUAL(X.first, actKey.first); - DiscreteKeys actKeys = sig.discreteKeysParentsFirst(); - LONGS_EQUAL(2, (long)actKeys.size()); - LONGS_EQUAL((long)Y.first, (long)actKeys.front().first); - LONGS_EQUAL((long)X.first, (long)actKeys.back().first); + DiscreteKeys actKeys = sig.discreteKeys(); + LONGS_EQUAL(2, actKeys.size()); + LONGS_EQUAL(X.first, actKeys.front().first); + LONGS_EQUAL(Y.first, actKeys.back().first); vector actCpt = sig.cpt(); - EXPECT_LONGS_EQUAL(6, (long)actCpt.size()); + EXPECT_LONGS_EQUAL(6, actCpt.size()); } /* ************************************************************************* */ @@ -54,17 +61,20 @@ TEST(testSignature, simple_conditional_nonparser) { Signature sig(X | Y = table); DiscreteKey actKey = sig.key(); - EXPECT_LONGS_EQUAL((long)X.first, (long)actKey.first); + EXPECT_LONGS_EQUAL(X.first, actKey.first); - DiscreteKeys actKeys = sig.discreteKeysParentsFirst(); - LONGS_EQUAL(2, (long)actKeys.size()); - LONGS_EQUAL((long)Y.first, (long)actKeys.front().first); - LONGS_EQUAL((long)X.first, (long)actKeys.back().first); + DiscreteKeys actKeys = sig.discreteKeys(); + LONGS_EQUAL(2, actKeys.size()); + LONGS_EQUAL(X.first, actKeys.front().first); + LONGS_EQUAL(Y.first, actKeys.back().first); vector actCpt = sig.cpt(); - EXPECT_LONGS_EQUAL(6, (long)actCpt.size()); + EXPECT_LONGS_EQUAL(6, actCpt.size()); } /* ************************************************************************* */ -int main() { TestResult tr; return TestRegistry::runAllTests(tr); } +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} /* ************************************************************************* */ diff --git a/gtsam/geometry/BearingRange.h b/gtsam/geometry/BearingRange.h index 7c73f3cbd..8db7abffe 100644 --- a/gtsam/geometry/BearingRange.h +++ b/gtsam/geometry/BearingRange.h @@ -162,7 +162,7 @@ struct BearingRange { NeedsToAlign = (sizeof(B) % 16) == 0 || (sizeof(R) % 16) == 0 }; public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) + GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) }; // Declare this to be both Testable and a Manifold diff --git a/gtsam/geometry/Cal3.cpp b/gtsam/geometry/Cal3.cpp new file mode 100644 index 000000000..41de47f46 --- /dev/null +++ b/gtsam/geometry/Cal3.cpp @@ -0,0 +1,75 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file Cal3.cpp + * @brief Common code for all calibration models. + * @author Frank Dellaert + */ + +#include + +#include +#include +#include + +namespace gtsam { + +/* ************************************************************************* */ +Cal3::Cal3(double fov, int w, int h) + : s_(0), u0_((double)w / 2.0), v0_((double)h / 2.0) { + double a = fov * M_PI / 360.0; // fov/2 in radians + fx_ = double(w) / (2.0 * tan(a)); + fy_ = fx_; +} + +/* ************************************************************************* */ +Cal3::Cal3(const std::string& path) { + const auto buffer = path + std::string("/calibration_info.txt"); + std::ifstream infile(buffer, std::ios::in); + + if (infile && !infile.eof()) { + infile >> fx_ >> fy_ >> s_ >> u0_ >> v0_; + } else { + throw std::runtime_error("Cal3: Unable to load the calibration"); + } + + infile.close(); +} + +/* ************************************************************************* */ +std::ostream& operator<<(std::ostream& os, const Cal3& cal) { + os << "fx: " << cal.fx() << ", fy: " << cal.fy() << ", s: " << cal.skew() + << ", px: " << cal.px() << ", py: " << cal.py(); + return os; +} + +/* ************************************************************************* */ +void Cal3::print(const std::string& s) const { gtsam::print((Matrix)K(), s); } + +/* ************************************************************************* */ +bool Cal3::equals(const Cal3& K, double tol) const { + return (std::fabs(fx_ - K.fx_) < tol && std::fabs(fy_ - K.fy_) < tol && + std::fabs(s_ - K.s_) < tol && std::fabs(u0_ - K.u0_) < tol && + std::fabs(v0_ - K.v0_) < tol); +} + +Matrix3 Cal3::inverse() const { + const double fxy = fx_ * fy_, sv0 = s_ * v0_, fyu0 = fy_ * u0_; + Matrix3 K_inverse; + K_inverse << 1.0 / fx_, -s_ / fxy, (sv0 - fyu0) / fxy, 0.0, 1.0 / fy_, + -v0_ / fy_, 0.0, 0.0, 1.0; + return K_inverse; +} + +/* ************************************************************************* */ + +} // \ namespace gtsam diff --git a/gtsam/geometry/Cal3.h b/gtsam/geometry/Cal3.h new file mode 100644 index 000000000..08ce4c1e6 --- /dev/null +++ b/gtsam/geometry/Cal3.h @@ -0,0 +1,206 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file Cal3.h + * @brief Common code for all Calibration models. + * @author Varun Agrawal + */ + +/** + * @addtogroup geometry + */ + +#pragma once + +#include + +namespace gtsam { + +/** + * Function which makes use of the Implicit Function Theorem to compute the + * Jacobians of `calibrate` using `uncalibrate`. + * This is useful when there are iterative operations in the `calibrate` + * function which make computing jacobians difficult. + * + * Given f(pi, pn) = uncalibrate(pn) - pi, and g(pi) = calibrate, we can + * easily compute the Jacobians: + * df/pi = -I (pn and pi are independent args) + * Dp = -inv(H_uncal_pn) * df/pi = -inv(H_uncal_pn) * (-I) = inv(H_uncal_pn) + * Dcal = -inv(H_uncal_pn) * df/K = -inv(H_uncal_pn) * H_uncal_K + * + * @tparam Cal Calibration model. + * @tparam Dim The number of parameters in the calibration model. + * @param p Calibrated point. + * @param Dcal optional 2*p Jacobian wrpt `p` Cal3DS2 parameters. + * @param Dp optional 2*2 Jacobian wrpt intrinsic coordinates. + */ +template +void calibrateJacobians(const Cal& calibration, const Point2& pn, + OptionalJacobian<2, Dim> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) { + if (Dcal || Dp) { + Eigen::Matrix H_uncal_K; + Matrix22 H_uncal_pn, H_uncal_pn_inv; + + // Compute uncalibrate Jacobians + calibration.uncalibrate(pn, Dcal ? &H_uncal_K : nullptr, H_uncal_pn); + + H_uncal_pn_inv = H_uncal_pn.inverse(); + + if (Dp) *Dp = H_uncal_pn_inv; + if (Dcal) *Dcal = -H_uncal_pn_inv * H_uncal_K; + } +} + +/** + * @brief Common base class for all calibration models. + * @addtogroup geometry + * \nosubgrouping + */ +class GTSAM_EXPORT Cal3 { + protected: + double fx_ = 1.0f, fy_ = 1.0f; ///< focal length + double s_ = 0.0f; ///< skew + double u0_ = 0.0f, v0_ = 0.0f; ///< principal point + + public: + enum { dimension = 5 }; + ///< shared pointer to calibration object + using shared_ptr = boost::shared_ptr; + + /// @name Standard Constructors + /// @{ + + /// Create a default calibration that leaves coordinates unchanged + Cal3() = default; + + /// constructor from doubles + Cal3(double fx, double fy, double s, double u0, double v0) + : fx_(fx), fy_(fy), s_(s), u0_(u0), v0_(v0) {} + + /// constructor from vector + Cal3(const Vector5& d) + : fx_(d(0)), fy_(d(1)), s_(d(2)), u0_(d(3)), v0_(d(4)) {} + + /** + * Easy constructor, takes fov in degrees, asssumes zero skew, unit aspect + * @param fov field of view in degrees + * @param w image width + * @param h image height + */ + Cal3(double fov, int w, int h); + + /// Virtual destructor + virtual ~Cal3() {} + + /// @} + /// @name Advanced Constructors + /// @{ + + /** + * Load calibration parameters from `calibration_info.txt` file located in + * `path` directory. + * + * The contents of calibration file should be the 5 parameters in order: + * `fx, fy, s, u0, v0` + * + * @param path path to directory containing `calibration_info.txt`. + */ + Cal3(const std::string& path); + + /// @} + /// @name Testable + /// @{ + + /// Output stream operator + GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Cal3& cal); + + /// print with optional string + virtual void print(const std::string& s = "") const; + + /// Check if equal up to specified tolerance + bool equals(const Cal3& K, double tol = 10e-9) const; + + /// @} + /// @name Standard Interface + /// @{ + + /// focal length x + inline double fx() const { return fx_; } + + /// focal length y + inline double fy() const { return fy_; } + + /// aspect ratio + inline double aspectRatio() const { return fx_ / fy_; } + + /// skew + inline double skew() const { return s_; } + + /// image center in x + inline double px() const { return u0_; } + + /// image center in y + inline double py() const { return v0_; } + + /// return the principal point + Point2 principalPoint() const { return Point2(u0_, v0_); } + + /// vectorized form (column-wise) + Vector5 vector() const { + Vector5 v; + v << fx_, fy_, s_, u0_, v0_; + return v; + } + + /// return calibration matrix K + virtual Matrix3 K() const { + Matrix3 K; + K << fx_, s_, u0_, 0.0, fy_, v0_, 0.0, 0.0, 1.0; + return K; + } + +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 + /** @deprecated The following function has been deprecated, use K above */ + Matrix3 matrix() const { return K(); } +#endif + + /// Return inverted calibration matrix inv(K) + Matrix3 inverse() const; + + /// return DOF, dimensionality of tangent space + inline virtual size_t dim() const { return Dim(); } + + /// return DOF, dimensionality of tangent space + inline static size_t Dim() { return dimension; } + + /// @} + /// @name Advanced Interface + /// @{ + + private: + /// Serialization function + friend class boost::serialization::access; + template + void serialize(Archive& ar, const unsigned int /*version*/) { + ar& BOOST_SERIALIZATION_NVP(fx_); + ar& BOOST_SERIALIZATION_NVP(fy_); + ar& BOOST_SERIALIZATION_NVP(s_); + ar& BOOST_SERIALIZATION_NVP(u0_); + ar& BOOST_SERIALIZATION_NVP(v0_); + } + + /// @} +}; + +} // \ namespace gtsam diff --git a/gtsam/geometry/Cal3Bundler.cpp b/gtsam/geometry/Cal3Bundler.cpp index 0d64d0184..e03562452 100644 --- a/gtsam/geometry/Cal3Bundler.cpp +++ b/gtsam/geometry/Cal3Bundler.cpp @@ -15,28 +15,19 @@ * @author ydjian */ -#include #include +#include +#include #include #include -#include namespace gtsam { -/* ************************************************************************* */ -Cal3Bundler::Cal3Bundler() : - f_(1), k1_(0), k2_(0), u0_(0), v0_(0) { -} - -/* ************************************************************************* */ -Cal3Bundler::Cal3Bundler(double f, double k1, double k2, double u0, double v0) : - f_(f), k1_(k1), k2_(k2), u0_(u0), v0_(v0) { -} - /* ************************************************************************* */ Matrix3 Cal3Bundler::K() const { + // This function is needed to ensure skew = 0; Matrix3 K; - K << f_, 0, u0_, 0, f_, v0_, 0, 0, 1; + K << fx_, 0, u0_, 0, fy_, v0_, 0, 0, 1.0; return K; } @@ -48,35 +39,42 @@ Vector4 Cal3Bundler::k() const { } /* ************************************************************************* */ -Vector3 Cal3Bundler::vector() const { - return Vector3(f_, k1_, k2_); +Vector3 Cal3Bundler::vector() const { return Vector3(fx_, k1_, k2_); } + +/* ************************************************************************* */ +std::ostream& operator<<(std::ostream& os, const Cal3Bundler& cal) { + os << "f: " << cal.fx() << ", k1: " << cal.k1() << ", k2: " << cal.k2() + << ", px: " << cal.px() << ", py: " << cal.py(); + return os; } /* ************************************************************************* */ void Cal3Bundler::print(const std::string& s) const { - gtsam::print((Vector)(Vector(5) << f_, k1_, k2_, u0_, v0_).finished(), s + ".K"); + gtsam::print((Vector)(Vector(5) << fx_, k1_, k2_, u0_, v0_).finished(), + s + ".K"); } /* ************************************************************************* */ bool Cal3Bundler::equals(const Cal3Bundler& K, double tol) const { - if (std::abs(f_ - K.f_) > tol || std::abs(k1_ - K.k1_) > tol - || std::abs(k2_ - K.k2_) > tol || std::abs(u0_ - K.u0_) > tol - || std::abs(v0_ - K.v0_) > tol) - return false; - return true; + const Cal3* base = dynamic_cast(&K); + return (Cal3::equals(*base, tol) && std::fabs(k1_ - K.k1_) < tol && + std::fabs(k2_ - K.k2_) < tol && std::fabs(u0_ - K.u0_) < tol && + std::fabs(v0_ - K.v0_) < tol); } /* ************************************************************************* */ -Point2 Cal3Bundler::uncalibrate(const Point2& p, // - OptionalJacobian<2, 3> Dcal, OptionalJacobian<2, 2> Dp) const { - // r = x^2 + y^2; - // g = (1 + k(1)*r + k(2)*r^2); +Point2 Cal3Bundler::uncalibrate(const Point2& p, OptionalJacobian<2, 3> Dcal, + OptionalJacobian<2, 2> Dp) const { + // r = x² + y²; + // g = (1 + k(1)*r + k(2)*r²); // pi(:,i) = g * pn(:,i) const double x = p.x(), y = p.y(); const double r = x * x + y * y; const double g = 1. + (k1_ + k2_ * r) * r; const double u = g * x, v = g * y; + const double f_ = fx_; + // Derivatives make use of intermediate variables above if (Dcal) { double rx = r * x, ry = r * y; @@ -94,21 +92,23 @@ Point2 Cal3Bundler::uncalibrate(const Point2& p, // } /* ************************************************************************* */ -Point2 Cal3Bundler::calibrate(const Point2& pi, const double tol) const { - // Copied from Cal3DS2 :-( - // but specialized with k1,k2 non-zero only and fx=fy and s=0 - const Point2 invKPi((pi.x() - u0_)/f_, (pi.y() - v0_)/f_); +Point2 Cal3Bundler::calibrate(const Point2& pi, OptionalJacobian<2, 3> Dcal, + OptionalJacobian<2, 2> Dp) const { + // Copied from Cal3DS2 + // but specialized with k1, k2 non-zero only and fx=fy and s=0 + double x = (pi.x() - u0_) / fx_, y = (pi.y() - v0_) / fx_; + const Point2 invKPi(x, y); - // initialize by ignoring the distortion at all, might be problematic for pixels around boundary - Point2 pn = invKPi; + // initialize by ignoring the distortion at all, might be problematic for + // pixels around boundary + Point2 pn(x, y); // iterate until the uncalibrate is close to the actual pixel coordinate const int maxIterations = 10; int iteration; for (iteration = 0; iteration < maxIterations; ++iteration) { - if (distance2(uncalibrate(pn), pi) <= tol) - break; - const double x = pn.x(), y = pn.y(), xx = x * x, yy = y * y; + if (distance2(uncalibrate(pn), pi) <= tol_) break; + const double px = pn.x(), py = pn.y(), xx = px * px, yy = py * py; const double rr = xx + yy; const double g = (1 + k1_ * rr + k2_ * rr * rr); pn = invKPi / g; @@ -116,7 +116,10 @@ Point2 Cal3Bundler::calibrate(const Point2& pi, const double tol) const { if (iteration >= maxIterations) throw std::runtime_error( - "Cal3Bundler::calibrate fails to converge. need a better initialization"); + "Cal3Bundler::calibrate fails to converge. need a better " + "initialization"); + + calibrateJacobians(*this, pn, Dcal, Dp); return pn; } @@ -145,14 +148,4 @@ Matrix25 Cal3Bundler::D2d_intrinsic_calibration(const Point2& p) const { return H; } -/* ************************************************************************* */ -Cal3Bundler Cal3Bundler::retract(const Vector& d) const { - return Cal3Bundler(f_ + d(0), k1_ + d(1), k2_ + d(2), u0_, v0_); -} - -/* ************************************************************************* */ -Vector3 Cal3Bundler::localCoordinates(const Cal3Bundler& T2) const { - return T2.vector() - vector(); -} - -} +} // \ namespace gtsam diff --git a/gtsam/geometry/Cal3Bundler.h b/gtsam/geometry/Cal3Bundler.h index 4787f565b..0016ded2d 100644 --- a/gtsam/geometry/Cal3Bundler.h +++ b/gtsam/geometry/Cal3Bundler.h @@ -14,10 +14,12 @@ * @brief Calibration used by Bundler * @date Sep 25, 2010 * @author Yong Dian Jian + * @author Varun Agrawal */ #pragma once +#include #include namespace gtsam { @@ -27,22 +29,23 @@ namespace gtsam { * @addtogroup geometry * \nosubgrouping */ -class GTSAM_EXPORT Cal3Bundler { +class GTSAM_EXPORT Cal3Bundler : public Cal3 { + private: + double k1_ = 0.0f, k2_ = 0.0f; ///< radial distortion + double tol_ = 1e-5; ///< tolerance value when calibrating -private: - double f_; ///< focal length - double k1_, k2_; ///< radial distortion - double u0_, v0_; ///< image center, not a parameter to be optimized but a constant - -public: + // NOTE: We use the base class fx to represent the common focal length. + // Also, image center parameters (u0, v0) are not optimized + // but are treated as constants. + public: enum { dimension = 3 }; /// @name Standard Constructors /// @{ /// Default constructor - Cal3Bundler(); + Cal3Bundler() = default; /** * Constructor @@ -51,8 +54,11 @@ class GTSAM_EXPORT Cal3Bundler { * @param k2 second radial distortion coefficient (quartic) * @param u0 optional image center (default 0), considered a constant * @param v0 optional image center (default 0), considered a constant + * @param tol optional calibration tolerance value */ - Cal3Bundler(double f, double k1, double k2, double u0 = 0, double v0 = 0); + Cal3Bundler(double f, double k1, double k2, double u0 = 0, double v0 = 0, + double tol = 1e-5) + : Cal3(f, f, 0, u0, v0), k1_(k1), k2_(k2), tol_(tol) {} virtual ~Cal3Bundler() {} @@ -60,8 +66,12 @@ class GTSAM_EXPORT Cal3Bundler { /// @name Testable /// @{ + /// Output stream operator + GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Cal3Bundler& cal); + /// print with optional string - void print(const std::string& s = "") const; + void print(const std::string& s = "") const override; /// assert equality up to a tolerance bool equals(const Cal3Bundler& K, double tol = 10e-9) const; @@ -70,55 +80,52 @@ class GTSAM_EXPORT Cal3Bundler { /// @name Standard Interface /// @{ - Matrix3 K() const; ///< Standard 3*3 calibration matrix - Vector4 k() const; ///< Radial distortion parameters (4 of them, 2 0) + /// distorsion parameter k1 + inline double k1() const { return k1_; } - Vector3 vector() const; + /// distorsion parameter k2 + inline double k2() const { return k2_; } - /// focal length x - inline double fx() const { - return f_; - } + /// image center in x + inline double px() const { return u0_; } - /// focal length y - inline double fy() const { - return f_; - } + /// image center in y + inline double py() const { return v0_; } - /// distorsion parameter k1 - inline double k1() const { - return k1_; - } + Matrix3 K() const override; ///< Standard 3*3 calibration matrix + Vector4 k() const; ///< Radial distortion parameters (4 of them, 2 0) - /// distorsion parameter k2 - inline double k2() const { - return k2_; - } + Vector3 vector() const; +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 /// get parameter u0 - inline double u0() const { - return u0_; - } + inline double u0() const { return u0_; } /// get parameter v0 - inline double v0() const { - return v0_; - } - + inline double v0() const { return v0_; } +#endif /** * @brief: convert intrinsic coordinates xy to image coordinates uv * Version of uncalibrate with derivatives * @param p point in intrinsic coordinates - * @param Dcal optional 2*3 Jacobian wrpt CalBundler parameters + * @param Dcal optional 2*3 Jacobian wrpt Cal3Bundler parameters * @param Dp optional 2*2 Jacobian wrpt intrinsic coordinates * @return point in image coordinates */ Point2 uncalibrate(const Point2& p, OptionalJacobian<2, 3> Dcal = boost::none, - OptionalJacobian<2, 2> Dp = boost::none) const; + OptionalJacobian<2, 2> Dp = boost::none) const; - /// Convert a pixel coordinate to ideal coordinate - Point2 calibrate(const Point2& pi, const double tol = 1e-5) const; + /** + * Convert a pixel coordinate to ideal coordinate xy + * @param p point in image coordinates + * @param tol optional tolerance threshold value for iterative minimization + * @param Dcal optional 2*3 Jacobian wrpt Cal3Bundler parameters + * @param Dp optional 2*2 Jacobian wrpt intrinsic coordinates + * @return point in intrinsic coordinates + */ + Point2 calibrate(const Point2& pi, OptionalJacobian<2, 3> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) const; /// @deprecated might be removed in next release, use uncalibrate Matrix2 D2d_intrinsic(const Point2& p) const; @@ -133,47 +140,45 @@ class GTSAM_EXPORT Cal3Bundler { /// @name Manifold /// @{ - /// Update calibration with tangent space delta - Cal3Bundler retract(const Vector& d) const; + /// return DOF, dimensionality of tangent space + virtual size_t dim() const override { return Dim(); } - /// Calculate local coordinates to another calibration - Vector3 localCoordinates(const Cal3Bundler& T2) const; + /// return DOF, dimensionality of tangent space + inline static size_t Dim() { return dimension; } - /// dimensionality - virtual size_t dim() const { - return 3; + /// Update calibration with tangent space delta + inline Cal3Bundler retract(const Vector& d) const { + return Cal3Bundler(fx_ + d(0), k1_ + d(1), k2_ + d(2), u0_, v0_); } - /// dimensionality - static size_t Dim() { - return 3; + /// Calculate local coordinates to another calibration + Vector3 localCoordinates(const Cal3Bundler& T2) const { + return T2.vector() - vector(); } -private: - + private: /// @} /// @name Advanced Interface /// @{ /** Serialization function */ friend class boost::serialization::access; - template - void serialize(Archive & ar, const unsigned int /*version*/) { - ar & BOOST_SERIALIZATION_NVP(f_); - ar & BOOST_SERIALIZATION_NVP(k1_); - ar & BOOST_SERIALIZATION_NVP(k2_); - ar & BOOST_SERIALIZATION_NVP(u0_); - ar & BOOST_SERIALIZATION_NVP(v0_); + template + void serialize(Archive& ar, const unsigned int /*version*/) { + ar& boost::serialization::make_nvp( + "Cal3Bundler", boost::serialization::base_object(*this)); + ar& BOOST_SERIALIZATION_NVP(k1_); + ar& BOOST_SERIALIZATION_NVP(k2_); + ar& BOOST_SERIALIZATION_NVP(tol_); } /// @} - }; -template<> +template <> struct traits : public internal::Manifold {}; -template<> +template <> struct traits : public internal::Manifold {}; -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/geometry/Cal3DS2.cpp b/gtsam/geometry/Cal3DS2.cpp index 070d16c6c..f93386ea7 100644 --- a/gtsam/geometry/Cal3DS2.cpp +++ b/gtsam/geometry/Cal3DS2.cpp @@ -13,28 +13,30 @@ * @file Cal3DS2.cpp * @date Feb 28, 2010 * @author ydjian + * @author Varun Agrawal */ -#include #include +#include +#include #include #include -#include namespace gtsam { /* ************************************************************************* */ -void Cal3DS2::print(const std::string& s_) const { - Base::print(s_); +std::ostream& operator<<(std::ostream& os, const Cal3DS2& cal) { + os << (Cal3DS2_Base&)cal; + return os; } +/* ************************************************************************* */ +void Cal3DS2::print(const std::string& s_) const { Base::print(s_); } + /* ************************************************************************* */ bool Cal3DS2::equals(const Cal3DS2& K, double tol) const { - if (std::abs(fx_ - K.fx_) > tol || std::abs(fy_ - K.fy_) > tol || std::abs(s_ - K.s_) > tol || - std::abs(u0_ - K.u0_) > tol || std::abs(v0_ - K.v0_) > tol || std::abs(k1_ - K.k1_) > tol || - std::abs(k2_ - K.k2_) > tol || std::abs(p1_ - K.p1_) > tol || std::abs(p2_ - K.p2_) > tol) - return false; - return true; + const Cal3DS2_Base* base = dynamic_cast(&K); + return Cal3DS2_Base::equals(*base, tol); } /* ************************************************************************* */ @@ -46,8 +48,5 @@ Cal3DS2 Cal3DS2::retract(const Vector& d) const { Vector Cal3DS2::localCoordinates(const Cal3DS2& T2) const { return T2.vector() - vector(); } - } /* ************************************************************************* */ - - diff --git a/gtsam/geometry/Cal3DS2.h b/gtsam/geometry/Cal3DS2.h index 3e62f758d..58d35c2ec 100644 --- a/gtsam/geometry/Cal3DS2.h +++ b/gtsam/geometry/Cal3DS2.h @@ -11,9 +11,11 @@ /** * @file Cal3DS2.h - * @brief Calibration of a camera with radial distortion, calculations in base class Cal3DS2_Base + * @brief Calibration of a camera with radial distortion, calculations in base + * class Cal3DS2_Base * @date Feb 28, 2010 * @author ydjian + * @autho Varun Agrawal */ #pragma once @@ -30,22 +32,20 @@ namespace gtsam { * \nosubgrouping */ class GTSAM_EXPORT Cal3DS2 : public Cal3DS2_Base { + using Base = Cal3DS2_Base; - typedef Cal3DS2_Base Base; - -public: - + public: enum { dimension = 9 }; /// @name Standard Constructors /// @{ /// Default Constructor with only unit focal length - Cal3DS2() : Base() {} + Cal3DS2() = default; - Cal3DS2(double fx, double fy, double s, double u0, double v0, - double k1, double k2, double p1 = 0.0, double p2 = 0.0) : - Base(fx, fy, s, u0, v0, k1, k2, p1, p2) {} + Cal3DS2(double fx, double fy, double s, double u0, double v0, double k1, + double k2, double p1 = 0.0, double p2 = 0.0, double tol = 1e-5) + : Base(fx, fy, s, u0, v0, k1, k2, p1, p2, tol) {} virtual ~Cal3DS2() {} @@ -53,14 +53,18 @@ class GTSAM_EXPORT Cal3DS2 : public Cal3DS2_Base { /// @name Advanced Constructors /// @{ - Cal3DS2(const Vector &v) : Base(v) {} + Cal3DS2(const Vector9& v) : Base(v) {} /// @} /// @name Testable /// @{ + /// Output stream operator + GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Cal3DS2& cal); + /// print with optional string - virtual void print(const std::string& s = "") const ; + void print(const std::string& s = "") const override; /// assert equality up to a tolerance bool equals(const Cal3DS2& K, double tol = 10e-9) const; @@ -70,52 +74,46 @@ class GTSAM_EXPORT Cal3DS2 : public Cal3DS2_Base { /// @{ /// Given delta vector, update calibration - Cal3DS2 retract(const Vector& d) const ; + Cal3DS2 retract(const Vector& d) const; /// Given a different calibration, calculate update to obtain it - Vector localCoordinates(const Cal3DS2& T2) const ; + Vector localCoordinates(const Cal3DS2& T2) const; /// Return dimensions of calibration manifold object - virtual size_t dim() const { return dimension ; } + virtual size_t dim() const override { return Dim(); } /// Return dimensions of calibration manifold object - static size_t Dim() { return dimension; } + inline static size_t Dim() { return dimension; } /// @} /// @name Clone /// @{ /// @return a deep copy of this object - virtual boost::shared_ptr clone() const { + boost::shared_ptr clone() const override { return boost::shared_ptr(new Cal3DS2(*this)); } /// @} - -private: - + private: /// @name Advanced Interface /// @{ /** Serialization function */ friend class boost::serialization::access; - template - void serialize(Archive & ar, const unsigned int /*version*/) - { - ar & boost::serialization::make_nvp("Cal3DS2", - boost::serialization::base_object(*this)); + template + void serialize(Archive& ar, const unsigned int /*version*/) { + ar& boost::serialization::make_nvp( + "Cal3DS2", boost::serialization::base_object(*this)); } /// @} - }; -template<> +template <> struct traits : public internal::Manifold {}; -template<> +template <> struct traits : public internal::Manifold {}; - } - diff --git a/gtsam/geometry/Cal3DS2_Base.cpp b/gtsam/geometry/Cal3DS2_Base.cpp index 6c03883ce..a3f7026b9 100644 --- a/gtsam/geometry/Cal3DS2_Base.cpp +++ b/gtsam/geometry/Cal3DS2_Base.cpp @@ -13,27 +13,17 @@ * @file Cal3DS2_Base.cpp * @date Feb 28, 2010 * @author ydjian + * @author Varun Agrawal */ -#include #include +#include +#include #include #include -#include namespace gtsam { -/* ************************************************************************* */ -Cal3DS2_Base::Cal3DS2_Base(const Vector &v): - fx_(v[0]), fy_(v[1]), s_(v[2]), u0_(v[3]), v0_(v[4]), k1_(v[5]), k2_(v[6]), p1_(v[7]), p2_(v[8]){} - -/* ************************************************************************* */ -Matrix3 Cal3DS2_Base::K() const { - Matrix3 K; - K << fx_, s_, u0_, 0.0, fy_, v0_, 0.0, 0.0, 1.0; - return K; -} - /* ************************************************************************* */ Vector9 Cal3DS2_Base::vector() const { Vector9 v; @@ -41,6 +31,14 @@ Vector9 Cal3DS2_Base::vector() const { return v; } +/* ************************************************************************* */ +std::ostream& operator<<(std::ostream& os, const Cal3DS2_Base& cal) { + os << (Cal3&)cal; + os << ", k1: " << cal.k1() << ", k2: " << cal.k2() << ", p1: " << cal.p1() + << ", p2: " << cal.p2(); + return os; +} + /* ************************************************************************* */ void Cal3DS2_Base::print(const std::string& s_) const { gtsam::print((Matrix)K(), s_ + ".K"); @@ -49,31 +47,30 @@ void Cal3DS2_Base::print(const std::string& s_) const { /* ************************************************************************* */ bool Cal3DS2_Base::equals(const Cal3DS2_Base& K, double tol) const { - if (std::abs(fx_ - K.fx_) > tol || std::abs(fy_ - K.fy_) > tol || std::abs(s_ - K.s_) > tol || - std::abs(u0_ - K.u0_) > tol || std::abs(v0_ - K.v0_) > tol || std::abs(k1_ - K.k1_) > tol || - std::abs(k2_ - K.k2_) > tol || std::abs(p1_ - K.p1_) > tol || std::abs(p2_ - K.p2_) > tol) - return false; - return true; + const Cal3* base = dynamic_cast(&K); + return Cal3::equals(*base, tol) && std::fabs(k1_ - K.k1_) < tol && + std::fabs(k2_ - K.k2_) < tol && std::fabs(p1_ - K.p1_) < tol && + std::fabs(p2_ - K.p2_) < tol; } /* ************************************************************************* */ -static Matrix29 D2dcalibration(double x, double y, double xx, - double yy, double xy, double rr, double r4, double pnx, double pny, - const Matrix2& DK) { +static Matrix29 D2dcalibration(double x, double y, double xx, double yy, + double xy, double rr, double r4, double pnx, + double pny, const Matrix2& DK) { Matrix25 DR1; DR1 << pnx, 0.0, pny, 1.0, 0.0, 0.0, pny, 0.0, 0.0, 1.0; Matrix24 DR2; - DR2 << x * rr, x * r4, 2 * xy, rr + 2 * xx, // - y * rr, y * r4, rr + 2 * yy, 2 * xy; + DR2 << x * rr, x * r4, 2 * xy, rr + 2 * xx, // + y * rr, y * r4, rr + 2 * yy, 2 * xy; Matrix29 D; D << DR1, DK * DR2; return D; } /* ************************************************************************* */ -static Matrix2 D2dintrinsic(double x, double y, double rr, - double g, double k1, double k2, double p1, double p2, - const Matrix2& DK) { +static Matrix2 D2dintrinsic(double x, double y, double rr, double g, double k1, + double k2, double p1, double p2, + const Matrix2& DK) { const double drdx = 2. * x; const double drdy = 2. * y; const double dgdx = k1 * drdx + k2 * 2. * rr * drdx; @@ -87,24 +84,23 @@ static Matrix2 D2dintrinsic(double x, double y, double rr, const double dDydy = 2. * p2 * x + p1 * (drdy + 4. * y); Matrix2 DR; - DR << g + x * dgdx + dDxdx, x * dgdy + dDxdy, // - y * dgdx + dDydx, g + y * dgdy + dDydy; + DR << g + x * dgdx + dDxdx, x * dgdy + dDxdy, // + y * dgdx + dDydx, g + y * dgdy + dDydy; return DK * DR; } /* ************************************************************************* */ -Point2 Cal3DS2_Base::uncalibrate(const Point2& p, - OptionalJacobian<2,9> H1, OptionalJacobian<2,2> H2) const { - - // rr = x^2 + y^2; - // g = (1 + k(1)*rr + k(2)*rr^2); - // dp = [2*k(3)*x*y + k(4)*(rr + 2*x^2); 2*k(4)*x*y + k(3)*(rr + 2*y^2)]; +Point2 Cal3DS2_Base::uncalibrate(const Point2& p, OptionalJacobian<2, 9> Dcal, + OptionalJacobian<2, 2> Dp) const { + // r² = x² + y²; + // g = (1 + k(1)*r² + k(2)*r⁴); + // dp = [2*k(3)*x*y + k(4)*(r² + 2*x²); 2*k(4)*x*y + k(3)*(r² + 2*y²)]; // pi(:,i) = g * pn(:,i) + dp; const double x = p.x(), y = p.y(), xy = x * y, xx = x * x, yy = y * y; const double rr = xx + yy; const double r4 = rr * rr; - const double g = 1. + k1_ * rr + k2_ * r4; // scaling factor + const double g = 1. + k1_ * rr + k2_ * r4; // scaling factor // tangential component const double dx = 2. * p1_ * xy + p2_ * (rr + 2. * xx); @@ -115,37 +111,44 @@ Point2 Cal3DS2_Base::uncalibrate(const Point2& p, const double pny = g * y + dy; Matrix2 DK; - if (H1 || H2) DK << fx_, s_, 0.0, fy_; + if (Dcal || Dp) { + DK << fx_, s_, 0.0, fy_; + } // Derivative for calibration - if (H1) - *H1 = D2dcalibration(x, y, xx, yy, xy, rr, r4, pnx, pny, DK); + if (Dcal) { + *Dcal = D2dcalibration(x, y, xx, yy, xy, rr, r4, pnx, pny, DK); + } // Derivative for points - if (H2) - *H2 = D2dintrinsic(x, y, rr, g, k1_, k2_, p1_, p2_, DK); + if (Dp) { + *Dp = D2dintrinsic(x, y, rr, g, k1_, k2_, p1_, p2_, DK); + } // Regular uncalibrate after distortion return Point2(fx_ * pnx + s_ * pny + u0_, fy_ * pny + v0_); } /* ************************************************************************* */ -Point2 Cal3DS2_Base::calibrate(const Point2& pi, const double tol) const { +Point2 Cal3DS2_Base::calibrate(const Point2& pi, OptionalJacobian<2, 9> Dcal, + OptionalJacobian<2, 2> Dp) const { // Use the following fixed point iteration to invert the radial distortion. // pn_{t+1} = (inv(K)*pi - dp(pn_{t})) / g(pn_{t}) - const Point2 invKPi ((1 / fx_) * (pi.x() - u0_ - (s_ / fy_) * (pi.y() - v0_)), - (1 / fy_) * (pi.y() - v0_)); + const Point2 invKPi((1 / fx_) * (pi.x() - u0_ - (s_ / fy_) * (pi.y() - v0_)), + (1 / fy_) * (pi.y() - v0_)); - // initialize by ignoring the distortion at all, might be problematic for pixels around boundary + // initialize by ignoring the distortion at all, might be problematic for + // pixels around boundary Point2 pn = invKPi; // iterate until the uncalibrate is close to the actual pixel coordinate const int maxIterations = 10; int iteration; for (iteration = 0; iteration < maxIterations; ++iteration) { - if (distance2(uncalibrate(pn), pi) <= tol) break; - const double x = pn.x(), y = pn.y(), xy = x * y, xx = x * x, yy = y * y; + if (distance2(uncalibrate(pn), pi) <= tol_) break; + const double px = pn.x(), py = pn.y(), xy = px * py, xx = px * px, + yy = py * py; const double rr = xx + yy; const double g = (1 + k1_ * rr + k2_ * rr * rr); const double dx = 2 * p1_ * xy + p2_ * (rr + 2 * xx); @@ -153,8 +156,11 @@ Point2 Cal3DS2_Base::calibrate(const Point2& pi, const double tol) const { pn = (invKPi - Point2(dx, dy)) / g; } - if ( iteration >= maxIterations ) - throw std::runtime_error("Cal3DS2::calibrate fails to converge. need a better initialization"); + if (iteration >= maxIterations) + throw std::runtime_error( + "Cal3DS2::calibrate fails to converge. need a better initialization"); + + calibrateJacobians(*this, pn, Dcal, Dp); return pn; } @@ -184,8 +190,5 @@ Matrix29 Cal3DS2_Base::D2d_calibration(const Point2& p) const { DK << fx_, s_, 0.0, fy_; return D2dcalibration(x, y, xx, yy, xy, rr, r4, pnx, pny, DK); } - } /* ************************************************************************* */ - - diff --git a/gtsam/geometry/Cal3DS2_Base.h b/gtsam/geometry/Cal3DS2_Base.h index 4da5d1360..23e138838 100644 --- a/gtsam/geometry/Cal3DS2_Base.h +++ b/gtsam/geometry/Cal3DS2_Base.h @@ -14,10 +14,12 @@ * @brief Calibration of a camera with radial distortion * @date Feb 28, 2010 * @author ydjian + * @author Varun Agrawal */ #pragma once +#include #include namespace gtsam { @@ -31,30 +33,34 @@ namespace gtsam { * http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html * but using only k1,k2,p1, and p2 coefficients. * K = [ fx s u0 ; 0 fy v0 ; 0 0 1 ] - * rr = Pn.x^2 + Pn.y^2 - * \hat{Pn} = (1 + k1*rr + k2*rr^2 ) Pn + [ 2*p1 Pn.x Pn.y + p2 (rr + 2 Pn.x^2) ; - * p1 (rr + 2 Pn.y^2) + 2*p2 Pn.x Pn.y ] - * pi = K*Pn + * r² = P.x² + P.y² + * P̂ = (1 + k1*r² + k2*r⁴) P + [ (2*p1 P.x P.y) + p2 (r² + 2 Pn.x²) + * p1 (r² + 2 Pn.y²) + (2*p2 Pn.x Pn.y) ] + * pi = K*P̂ */ -class GTSAM_EXPORT Cal3DS2_Base { +class GTSAM_EXPORT Cal3DS2_Base : public Cal3 { + protected: + double k1_ = 0.0f, k2_ = 0.0f; ///< radial 2nd-order and 4th-order + double p1_ = 0.0f, p2_ = 0.0f; ///< tangential distortion + double tol_ = 1e-5; ///< tolerance value when calibrating -protected: - - double fx_, fy_, s_, u0_, v0_ ; // focal length, skew and principal point - double k1_, k2_ ; // radial 2nd-order and 4th-order - double p1_, p2_ ; // tangential distortion - -public: + public: + enum { dimension = 9 }; /// @name Standard Constructors /// @{ /// Default Constructor with only unit focal length - Cal3DS2_Base() : fx_(1), fy_(1), s_(0), u0_(0), v0_(0), k1_(0), k2_(0), p1_(0), p2_(0) {} + Cal3DS2_Base() = default; - Cal3DS2_Base(double fx, double fy, double s, double u0, double v0, - double k1, double k2, double p1 = 0.0, double p2 = 0.0) : - fx_(fx), fy_(fy), s_(s), u0_(u0), v0_(v0), k1_(k1), k2_(k2), p1_(p1), p2_(p2) {} + Cal3DS2_Base(double fx, double fy, double s, double u0, double v0, double k1, + double k2, double p1 = 0.0, double p2 = 0.0, double tol = 1e-5) + : Cal3(fx, fy, s, u0, v0), + k1_(k1), + k2_(k2), + p1_(p1), + p2_(p2), + tol_(tol) {} virtual ~Cal3DS2_Base() {} @@ -62,51 +68,42 @@ class GTSAM_EXPORT Cal3DS2_Base { /// @name Advanced Constructors /// @{ - Cal3DS2_Base(const Vector &v) ; + Cal3DS2_Base(const Vector9& v) + : Cal3(v(0), v(1), v(2), v(3), v(4)), + k1_(v(5)), + k2_(v(6)), + p1_(v(7)), + p2_(v(8)) {} /// @} /// @name Testable /// @{ + /// Output stream operator + GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Cal3DS2_Base& cal); + /// print with optional string - virtual void print(const std::string& s = "") const ; + void print(const std::string& s = "") const override; /// assert equality up to a tolerance - bool equals(const Cal3DS2_Base& K, double tol = 10e-9) const; + bool equals(const Cal3DS2_Base& K, double tol = 1e-8) const; /// @} /// @name Standard Interface /// @{ - /// focal length x - inline double fx() const { return fx_;} - - /// focal length x - inline double fy() const { return fy_;} - - /// skew - inline double skew() const { return s_;} - - /// image center in x - inline double px() const { return u0_;} - - /// image center in y - inline double py() const { return v0_;} - /// First distortion coefficient - inline double k1() const { return k1_;} + inline double k1() const { return k1_; } /// Second distortion coefficient - inline double k2() const { return k2_;} + inline double k2() const { return k2_; } /// First tangential distortion coefficient - inline double p1() const { return p1_;} + inline double p1() const { return p1_; } /// Second tangential distortion coefficient - inline double p2() const { return p2_;} - - /// return calibration matrix -- not really applicable - Matrix3 K() const; + inline double p2() const { return p2_; } /// return distortion parameter vector Vector4 k() const { return Vector4(k1_, k2_, p1_, p2_); } @@ -121,18 +118,24 @@ class GTSAM_EXPORT Cal3DS2_Base { * @param Dp optional 2*2 Jacobian wrpt intrinsic coordinates * @return point in (distorted) image coordinates */ - Point2 uncalibrate(const Point2& p, - OptionalJacobian<2,9> Dcal = boost::none, - OptionalJacobian<2,2> Dp = boost::none) const ; + Point2 uncalibrate(const Point2& p, OptionalJacobian<2, 9> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) const; /// Convert (distorted) image coordinates uv to intrinsic coordinates xy - Point2 calibrate(const Point2& p, const double tol=1e-5) const; + Point2 calibrate(const Point2& p, OptionalJacobian<2, 9> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) const; /// Derivative of uncalibrate wrpt intrinsic coordinates - Matrix2 D2d_intrinsic(const Point2& p) const ; + Matrix2 D2d_intrinsic(const Point2& p) const; /// Derivative of uncalibrate wrpt the calibration parameters - Matrix29 D2d_calibration(const Point2& p) const ; + Matrix29 D2d_calibration(const Point2& p) const; + + /// return DOF, dimensionality of tangent space + virtual size_t dim() const override { return Dim(); } + + /// return DOF, dimensionality of tangent space + inline static size_t Dim() { return dimension; } /// @} /// @name Clone @@ -145,30 +148,23 @@ class GTSAM_EXPORT Cal3DS2_Base { /// @} -private: - + private: /// @name Advanced Interface /// @{ /** Serialization function */ friend class boost::serialization::access; - template - void serialize(Archive & ar, const unsigned int /*version*/) - { - ar & BOOST_SERIALIZATION_NVP(fx_); - ar & BOOST_SERIALIZATION_NVP(fy_); - ar & BOOST_SERIALIZATION_NVP(s_); - ar & BOOST_SERIALIZATION_NVP(u0_); - ar & BOOST_SERIALIZATION_NVP(v0_); - ar & BOOST_SERIALIZATION_NVP(k1_); - ar & BOOST_SERIALIZATION_NVP(k2_); - ar & BOOST_SERIALIZATION_NVP(p1_); - ar & BOOST_SERIALIZATION_NVP(p2_); + template + void serialize(Archive& ar, const unsigned int /*version*/) { + ar& boost::serialization::make_nvp( + "Cal3DS2_Base", boost::serialization::base_object(*this)); + ar& BOOST_SERIALIZATION_NVP(k1_); + ar& BOOST_SERIALIZATION_NVP(k2_); + ar& BOOST_SERIALIZATION_NVP(p1_); + ar& BOOST_SERIALIZATION_NVP(p2_); + ar& BOOST_SERIALIZATION_NVP(tol_); } /// @} - }; - } - diff --git a/gtsam/geometry/Cal3Fisheye.cpp b/gtsam/geometry/Cal3Fisheye.cpp index c6b43004e..b9e60acee 100644 --- a/gtsam/geometry/Cal3Fisheye.cpp +++ b/gtsam/geometry/Cal3Fisheye.cpp @@ -13,6 +13,7 @@ * @file Cal3Fisheye.cpp * @date Apr 8, 2020 * @author ghaggin + * @author Varun Agrawal */ #include @@ -23,18 +24,6 @@ namespace gtsam { -/* ************************************************************************* */ -Cal3Fisheye::Cal3Fisheye(const Vector& v) - : fx_(v[0]), - fy_(v[1]), - s_(v[2]), - u0_(v[3]), - v0_(v[4]), - k1_(v[5]), - k2_(v[6]), - k3_(v[7]), - k4_(v[8]) {} - /* ************************************************************************* */ Vector9 Cal3Fisheye::vector() const { Vector9 v; @@ -43,96 +32,87 @@ Vector9 Cal3Fisheye::vector() const { } /* ************************************************************************* */ -Matrix3 Cal3Fisheye::K() const { - Matrix3 K; - K << fx_, s_, u0_, 0.0, fy_, v0_, 0.0, 0.0, 1.0; - return K; -} - -/* ************************************************************************* */ -static Matrix29 D2dcalibration(const double xd, const double yd, - const double xi, const double yi, - const double t3, const double t5, - const double t7, const double t9, const double r, - Matrix2& DK) { - // order: fx, fy, s, u0, v0 - Matrix25 DR1; - DR1 << xd, 0.0, yd, 1.0, 0.0, 0.0, yd, 0.0, 0.0, 1.0; - - // order: k1, k2, k3, k4 - Matrix24 DR2; - DR2 << t3 * xi, t5 * xi, t7 * xi, t9 * xi, t3 * yi, t5 * yi, t7 * yi, t9 * yi; - DR2 /= r; - Matrix29 D; - D << DR1, DK * DR2; - return D; -} - -/* ************************************************************************* */ -static Matrix2 D2dintrinsic(const double xi, const double yi, const double r, - const double td, const double t, const double tt, - const double t4, const double t6, const double t8, - const double k1, const double k2, const double k3, - const double k4, const Matrix2& DK) { - const double dr_dxi = xi / sqrt(xi * xi + yi * yi); - const double dr_dyi = yi / sqrt(xi * xi + yi * yi); - const double dt_dr = 1 / (1 + r * r); - const double dtd_dt = - 1 + 3 * k1 * tt + 5 * k2 * t4 + 7 * k3 * t6 + 9 * k4 * t8; - const double dtd_dxi = dtd_dt * dt_dr * dr_dxi; - const double dtd_dyi = dtd_dt * dt_dr * dr_dyi; - - const double rinv = 1 / r; - const double rrinv = 1 / (r * r); - const double dxd_dxi = - dtd_dxi * xi * rinv + td * rinv + td * xi * (-rrinv) * dr_dxi; - const double dxd_dyi = dtd_dyi * xi * rinv - td * xi * rrinv * dr_dyi; - const double dyd_dxi = dtd_dxi * yi * rinv - td * yi * rrinv * dr_dxi; - const double dyd_dyi = - dtd_dyi * yi * rinv + td * rinv + td * yi * (-rrinv) * dr_dyi; - - Matrix2 DR; - DR << dxd_dxi, dxd_dyi, dyd_dxi, dyd_dyi; - - return DK * DR; +double Cal3Fisheye::Scaling(double r) { + static constexpr double threshold = 1e-8; + if (r > threshold || r < -threshold) { + return atan(r) / r; + } else { + // Taylor expansion close to 0 + double r2 = r * r, r4 = r2 * r2; + return 1.0 - r2 / 3 + r4 / 5; + } } /* ************************************************************************* */ Point2 Cal3Fisheye::uncalibrate(const Point2& p, OptionalJacobian<2, 9> H1, OptionalJacobian<2, 2> H2) const { const double xi = p.x(), yi = p.y(); - const double r = sqrt(xi * xi + yi * yi); + const double r2 = xi * xi + yi * yi, r = sqrt(r2); const double t = atan(r); - const double tt = t * t, t4 = tt * tt, t6 = tt * t4, t8 = t4 * t4; - const double td = t * (1 + k1_ * tt + k2_ * t4 + k3_ * t6 + k4_ * t8); - const double td_o_r = r > 1e-8 ? td / r : 1; - const double xd = td_o_r * xi, yd = td_o_r * yi; + const double t2 = t * t, t4 = t2 * t2, t6 = t2 * t4, t8 = t4 * t4; + Vector5 K, T; + K << 1, k1_, k2_, k3_, k4_; + T << 1, t2, t4, t6, t8; + const double scaling = Scaling(r); + const double s = scaling * K.dot(T); + const double xd = s * xi, yd = s * yi; Point2 uv(fx_ * xd + s_ * yd + u0_, fy_ * yd + v0_); Matrix2 DK; if (H1 || H2) DK << fx_, s_, 0.0, fy_; // Derivative for calibration parameters (2 by 9) - if (H1) - *H1 = D2dcalibration(xd, yd, xi, yi, t * tt, t * t4, t * t6, t * t8, r, DK); + if (H1) { + Matrix25 DR1; + // order: fx, fy, s, u0, v0 + DR1 << xd, 0.0, yd, 1.0, 0.0, 0.0, yd, 0.0, 0.0, 1.0; + + // order: k1, k2, k3, k4 + Matrix24 DR2; + auto T4 = T.tail<4>().transpose(); + DR2 << xi * T4, yi * T4; + *H1 << DR1, DK * scaling * DR2; + } // Derivative for points in intrinsic coords (2 by 2) - if (H2) - *H2 = - D2dintrinsic(xi, yi, r, td, t, tt, t4, t6, t8, k1_, k2_, k3_, k4_, DK); + if (H2) { + const double dtd_dt = + 1 + 3 * k1_ * t2 + 5 * k2_ * t4 + 7 * k3_ * t6 + 9 * k4_ * t8; + const double dt_dr = 1 / (1 + r2); + const double rinv = 1 / r; + const double dr_dxi = xi * rinv; + const double dr_dyi = yi * rinv; + const double dtd_dxi = dtd_dt * dt_dr * dr_dxi; + const double dtd_dyi = dtd_dt * dt_dr * dr_dyi; + + const double td = t * K.dot(T); + const double rrinv = 1 / r2; + const double dxd_dxi = + dtd_dxi * dr_dxi + td * rinv - td * xi * rrinv * dr_dxi; + const double dxd_dyi = dtd_dyi * dr_dxi - td * xi * rrinv * dr_dyi; + const double dyd_dxi = dtd_dxi * dr_dyi - td * yi * rrinv * dr_dxi; + const double dyd_dyi = + dtd_dyi * dr_dyi + td * rinv - td * yi * rrinv * dr_dyi; + + Matrix2 DR; + DR << dxd_dxi, dxd_dyi, dyd_dxi, dyd_dyi; + + *H2 = DK * DR; + } return uv; } /* ************************************************************************* */ -Point2 Cal3Fisheye::calibrate(const Point2& uv, const double tol) const { +Point2 Cal3Fisheye::calibrate(const Point2& uv, OptionalJacobian<2, 9> Dcal, + OptionalJacobian<2, 2> Dp) const { // initial gues just inverts the pinhole model const double u = uv.x(), v = uv.y(); const double yd = (v - v0_) / fy_; const double xd = (u - s_ * yd - u0_) / fx_; Point2 pi(xd, yd); - // Perform newtons method, break when solution converges past tol, + // Perform newtons method, break when solution converges past tol_, // throw exception if max iterations are reached const int maxIterations = 10; int iteration; @@ -143,7 +123,7 @@ Point2 Cal3Fisheye::calibrate(const Point2& uv, const double tol) const { const Point2 uv_hat = uncalibrate(pi, boost::none, jac); // Test convergence - if ((uv_hat - uv).norm() < tol) break; + if ((uv_hat - uv).norm() < tol_) break; // Newton's method update step pi = pi - jac.inverse() * (uv_hat - uv); @@ -154,64 +134,33 @@ Point2 Cal3Fisheye::calibrate(const Point2& uv, const double tol) const { "Cal3Fisheye::calibrate fails to converge. need a better " "initialization"); - return pi; -} + calibrateJacobians(*this, pi, Dcal, Dp); -/* ************************************************************************* */ -Matrix2 Cal3Fisheye::D2d_intrinsic(const Point2& p) const { - const double xi = p.x(), yi = p.y(); - const double r = sqrt(xi * xi + yi * yi); - const double t = atan(r); - const double tt = t * t, t4 = tt * tt, t6 = t4 * tt, t8 = t4 * t4; - const double td = t * (1 + k1_ * tt + k2_ * t4 + k3_ * t6 + k4_ * t8); - - Matrix2 DK; - DK << fx_, s_, 0.0, fy_; - - return D2dintrinsic(xi, yi, r, td, t, tt, t4, t6, t8, k1_, k2_, k3_, k4_, DK); + return pi; } /* ************************************************************************* */ -Matrix29 Cal3Fisheye::D2d_calibration(const Point2& p) const { - const double xi = p.x(), yi = p.y(); - const double r = sqrt(xi * xi + yi * yi); - const double t = atan(r); - const double tt = t * t, t4 = tt * tt, t6 = tt * t4, t8 = t4 * t4; - const double td = t * (1 + k1_ * tt + k2_ * t4 + k3_ * t6 + k4_ * t8); - const double xd = td / r * xi, yd = td / r * yi; - - Matrix2 DK; - DK << fx_, s_, 0.0, fy_; - return D2dcalibration(xd, yd, xi, yi, t * tt, t * t4, t * t6, t * t8, r, DK); +std::ostream& operator<<(std::ostream& os, const Cal3Fisheye& cal) { + os << (Cal3&)cal; + os << ", k1: " << cal.k1() << ", k2: " << cal.k2() << ", k3: " << cal.k3() + << ", k4: " << cal.k4(); + return os; } /* ************************************************************************* */ void Cal3Fisheye::print(const std::string& s_) const { gtsam::print((Matrix)K(), s_ + ".K"); gtsam::print(Vector(k()), s_ + ".k"); - ; } /* ************************************************************************* */ bool Cal3Fisheye::equals(const Cal3Fisheye& K, double tol) const { - if (std::abs(fx_ - K.fx_) > tol || std::abs(fy_ - K.fy_) > tol || - std::abs(s_ - K.s_) > tol || std::abs(u0_ - K.u0_) > tol || - std::abs(v0_ - K.v0_) > tol || std::abs(k1_ - K.k1_) > tol || - std::abs(k2_ - K.k2_) > tol || std::abs(k3_ - K.k3_) > tol || - std::abs(k4_ - K.k4_) > tol) - return false; - return true; + const Cal3* base = dynamic_cast(&K); + return Cal3::equals(*base, tol) && std::fabs(k1_ - K.k1_) < tol && + std::fabs(k2_ - K.k2_) < tol && std::fabs(k3_ - K.k3_) < tol && + std::fabs(k4_ - K.k4_) < tol; } /* ************************************************************************* */ -Cal3Fisheye Cal3Fisheye::retract(const Vector& d) const { - return Cal3Fisheye(vector() + d); -} -/* ************************************************************************* */ -Vector Cal3Fisheye::localCoordinates(const Cal3Fisheye& T2) const { - return T2.vector() - vector(); -} - -} // namespace gtsam -/* ************************************************************************* */ +} // \ namespace gtsam diff --git a/gtsam/geometry/Cal3Fisheye.h b/gtsam/geometry/Cal3Fisheye.h index 5fb196047..a394d2000 100644 --- a/gtsam/geometry/Cal3Fisheye.h +++ b/gtsam/geometry/Cal3Fisheye.h @@ -14,12 +14,16 @@ * @brief Calibration of a fisheye camera * @date Apr 8, 2020 * @author ghaggin + * @author Varun Agrawal */ #pragma once +#include #include +#include + namespace gtsam { /** @@ -34,43 +38,40 @@ namespace gtsam { * Intrinsic coordinates: * [x_i;y_i] = [x/z; y/z] * Distorted coordinates: - * r^2 = (x_i)^2 + (y_i)^2 + * r² = (x_i)² + (y_i)² * th = atan(r) - * th_d = th(1 + k1*th^2 + k2*th^4 + k3*th^6 + k4*th^8) + * th_d = th(1 + k1*th² + k2*th⁴ + k3*th⁶ + k4*th⁸) * [x_d; y_d] = (th_d / r)*[x_i; y_i] * Pixel coordinates: * K = [fx s u0; 0 fy v0 ;0 0 1] * [u; v; 1] = K*[x_d; y_d; 1] */ -class GTSAM_EXPORT Cal3Fisheye { - protected: - double fx_, fy_, s_, u0_, v0_; // focal length, skew and principal point - double k1_, k2_, k3_, k4_; // fisheye distortion coefficients +class GTSAM_EXPORT Cal3Fisheye : public Cal3 { + private: + double k1_ = 0.0f, k2_ = 0.0f; ///< fisheye distortion coefficients + double k3_ = 0.0f, k4_ = 0.0f; ///< fisheye distortion coefficients + double tol_ = 1e-5; ///< tolerance value when calibrating public: enum { dimension = 9 }; - typedef boost::shared_ptr - shared_ptr; ///< shared pointer to fisheye calibration object + ///< shared pointer to fisheye calibration object + using shared_ptr = boost::shared_ptr; /// @name Standard Constructors /// @{ /// Default Constructor with only unit focal length - Cal3Fisheye() - : fx_(1), fy_(1), s_(0), u0_(0), v0_(0), k1_(0), k2_(0), k3_(0), k4_(0) {} + Cal3Fisheye() = default; Cal3Fisheye(const double fx, const double fy, const double s, const double u0, const double v0, const double k1, const double k2, - const double k3, const double k4) - : fx_(fx), - fy_(fy), - s_(s), - u0_(u0), - v0_(v0), + const double k3, const double k4, double tol = 1e-5) + : Cal3(fx, fy, s, u0, v0), k1_(k1), k2_(k2), k3_(k3), - k4_(k4) {} + k4_(k4), + tol_(tol) {} virtual ~Cal3Fisheye() {} @@ -78,27 +79,17 @@ class GTSAM_EXPORT Cal3Fisheye { /// @name Advanced Constructors /// @{ - Cal3Fisheye(const Vector& v); + explicit Cal3Fisheye(const Vector9& v) + : Cal3(v(0), v(1), v(2), v(3), v(4)), + k1_(v(5)), + k2_(v(6)), + k3_(v(7)), + k4_(v(8)) {} /// @} /// @name Standard Interface /// @{ - /// focal length x - inline double fx() const { return fx_; } - - /// focal length x - inline double fy() const { return fy_; } - - /// skew - inline double skew() const { return s_; } - - /// image center in x - inline double px() const { return u0_; } - - /// image center in y - inline double py() const { return v0_; } - /// First distortion coefficient inline double k1() const { return k1_; } @@ -111,44 +102,47 @@ class GTSAM_EXPORT Cal3Fisheye { /// Second tangential distortion coefficient inline double k4() const { return k4_; } - /// return calibration matrix - Matrix3 K() const; - /// return distortion parameter vector Vector4 k() const { return Vector4(k1_, k2_, k3_, k4_); } /// Return all parameters as a vector Vector9 vector() const; + /// Helper function that calculates atan(r)/r + static double Scaling(double r); + /** * @brief convert intrinsic coordinates [x_i; y_i] to (distorted) image * coordinates [u; v] * @param p point in intrinsic coordinates - * @param Dcal optional 2*9 Jacobian wrpt intrinsic parameters (fx, fy, ..., - * k4) + * @param Dcal optional 2*9 Jacobian wrpt intrinsic parameters * @param Dp optional 2*2 Jacobian wrpt intrinsic coordinates (xi, yi) * @return point in (distorted) image coordinates */ Point2 uncalibrate(const Point2& p, OptionalJacobian<2, 9> Dcal = boost::none, OptionalJacobian<2, 2> Dp = boost::none) const; - /// Convert (distorted) image coordinates [u;v] to intrinsic coordinates [x_i, - /// y_i] - Point2 calibrate(const Point2& p, const double tol = 1e-5) const; - - /// Derivative of uncalibrate wrpt intrinsic coordinates [x_i; y_i] - Matrix2 D2d_intrinsic(const Point2& p) const; - - /// Derivative of uncalibrate wrpt the calibration parameters - /// [fx, fy, s, u0, v0, k1, k2, k3, k4] - Matrix29 D2d_calibration(const Point2& p) const; + /** + * Convert (distorted) image coordinates [u;v] to intrinsic coordinates [x_i, + * y_i] + * @param p point in image coordinates + * @param Dcal optional 2*9 Jacobian wrpt intrinsic parameters + * @param Dp optional 2*2 Jacobian wrpt intrinsic coordinates (xi, yi) + * @return point in intrinsic coordinates + */ + Point2 calibrate(const Point2& p, OptionalJacobian<2, 9> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) const; /// @} /// @name Testable /// @{ + /// Output stream operator + GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Cal3Fisheye& cal); + /// print with optional string - virtual void print(const std::string& s = "") const; + virtual void print(const std::string& s = "") const override; /// assert equality up to a tolerance bool equals(const Cal3Fisheye& K, double tol = 10e-9) const; @@ -157,17 +151,21 @@ class GTSAM_EXPORT Cal3Fisheye { /// @name Manifold /// @{ - /// Given delta vector, update calibration - Cal3Fisheye retract(const Vector& d) const; - - /// Given a different calibration, calculate update to obtain it - Vector localCoordinates(const Cal3Fisheye& T2) const; - /// Return dimensions of calibration manifold object - virtual size_t dim() const { return 9; } + virtual size_t dim() const override { return Dim(); } /// Return dimensions of calibration manifold object - static size_t Dim() { return 9; } + inline static size_t Dim() { return dimension; } + + /// Given delta vector, update calibration + inline Cal3Fisheye retract(const Vector& d) const { + return Cal3Fisheye(vector() + d); + } + + /// Given a different calibration, calculate update to obtain it + Vector localCoordinates(const Cal3Fisheye& T2) const { + return T2.vector() - vector(); + } /// @} /// @name Clone @@ -188,11 +186,8 @@ class GTSAM_EXPORT Cal3Fisheye { friend class boost::serialization::access; template void serialize(Archive& ar, const unsigned int /*version*/) { - ar& BOOST_SERIALIZATION_NVP(fx_); - ar& BOOST_SERIALIZATION_NVP(fy_); - ar& BOOST_SERIALIZATION_NVP(s_); - ar& BOOST_SERIALIZATION_NVP(u0_); - ar& BOOST_SERIALIZATION_NVP(v0_); + ar& boost::serialization::make_nvp( + "Cal3Fisheye", boost::serialization::base_object(*this)); ar& BOOST_SERIALIZATION_NVP(k1_); ar& BOOST_SERIALIZATION_NVP(k2_); ar& BOOST_SERIALIZATION_NVP(k3_); diff --git a/gtsam/geometry/Cal3Unified.cpp b/gtsam/geometry/Cal3Unified.cpp index b1b9c3722..11aabcaa7 100644 --- a/gtsam/geometry/Cal3Unified.cpp +++ b/gtsam/geometry/Cal3Unified.cpp @@ -13,21 +13,18 @@ * @file Cal3Unified.cpp * @date Mar 8, 2014 * @author Jing Dong + * @author Varun Agrawal */ -#include #include -#include +#include #include +#include #include namespace gtsam { -/* ************************************************************************* */ -Cal3Unified::Cal3Unified(const Vector &v): - Base(v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7], v[8]), xi_(v[9]) {} - /* ************************************************************************* */ Vector10 Cal3Unified::vector() const { Vector10 v; @@ -35,6 +32,13 @@ Vector10 Cal3Unified::vector() const { return v; } +/* ************************************************************************* */ +std::ostream& operator<<(std::ostream& os, const Cal3Unified& cal) { + os << (Cal3DS2_Base&)cal; + os << ", xi: " << cal.xi(); + return os; +} + /* ************************************************************************* */ void Cal3Unified::print(const std::string& s) const { Base::print(s); @@ -43,20 +47,14 @@ void Cal3Unified::print(const std::string& s) const { /* ************************************************************************* */ bool Cal3Unified::equals(const Cal3Unified& K, double tol) const { - if (std::abs(fx_ - K.fx_) > tol || std::abs(fy_ - K.fy_) > tol || std::abs(s_ - K.s_) > tol || - std::abs(u0_ - K.u0_) > tol || std::abs(v0_ - K.v0_) > tol || std::abs(k1_ - K.k1_) > tol || - std::abs(k2_ - K.k2_) > tol || std::abs(p1_ - K.p1_) > tol || std::abs(p2_ - K.p2_) > tol || - std::abs(xi_ - K.xi_) > tol) - return false; - return true; + const Cal3DS2_Base* base = dynamic_cast(&K); + return Cal3DS2_Base::equals(*base, tol) && std::fabs(xi_ - K.xi_) < tol; } /* ************************************************************************* */ // todo: make a fixed sized jacobian version of this -Point2 Cal3Unified::uncalibrate(const Point2& p, - OptionalJacobian<2,10> H1, - OptionalJacobian<2,2> H2) const { - +Point2 Cal3Unified::uncalibrate(const Point2& p, OptionalJacobian<2, 10> Dcal, + OptionalJacobian<2, 2> Dp) const { // this part of code is modified from Cal3DS2, // since the second part of this model (after project to normalized plane) // is same as Cal3DS2 @@ -69,50 +67,53 @@ Point2 Cal3Unified::uncalibrate(const Point2& p, const double sqrt_nx = sqrt(xs * xs + ys * ys + 1.0); const double xi_sqrt_nx = 1.0 / (1 + xi * sqrt_nx); const double xi_sqrt_nx2 = xi_sqrt_nx * xi_sqrt_nx; - const double x = xs * xi_sqrt_nx, y = ys * xi_sqrt_nx; // points on NPlane + const double x = xs * xi_sqrt_nx, y = ys * xi_sqrt_nx; // points on NPlane // Part2: project NPlane point to pixel plane: use Cal3DS2 - Point2 m(x,y); + Point2 m(x, y); Matrix29 H1base; - Matrix2 H2base; // jacobians from Base class + Matrix2 H2base; // jacobians from Base class Point2 puncalib = Base::uncalibrate(m, H1base, H2base); // Inlined derivative for calibration - if (H1) { + if (Dcal) { // part1 Vector2 DU; - DU << -xs * sqrt_nx * xi_sqrt_nx2, // + DU << -xs * sqrt_nx * xi_sqrt_nx2, // -ys * sqrt_nx * xi_sqrt_nx2; - *H1 << H1base, H2base * DU; + *Dcal << H1base, H2base * DU; } // Inlined derivative for points - if (H2) { + if (Dp) { // part1 const double denom = 1.0 * xi_sqrt_nx2 / sqrt_nx; - const double mid = -(xi * xs*ys) * denom; + const double mid = -(xi * xs * ys) * denom; Matrix2 DU; - DU << (sqrt_nx + xi*(ys*ys + 1)) * denom, mid, // - mid, (sqrt_nx + xi*(xs*xs + 1)) * denom; + DU << (sqrt_nx + xi * (ys * ys + 1)) * denom, mid, // + mid, (sqrt_nx + xi * (xs * xs + 1)) * denom; - *H2 << H2base * DU; + *Dp << H2base * DU; } return puncalib; } /* ************************************************************************* */ -Point2 Cal3Unified::calibrate(const Point2& pi, const double tol) const { - +Point2 Cal3Unified::calibrate(const Point2& pi, OptionalJacobian<2, 10> Dcal, + OptionalJacobian<2, 2> Dp) const { // calibrate point to Nplane use base class::calibrate() - Point2 pnplane = Base::calibrate(pi, tol); + Point2 pnplane = Base::calibrate(pi); // call nplane to space - return this->nPlaneToSpace(pnplane); + Point2 pn = this->nPlaneToSpace(pnplane); + + calibrateJacobians(*this, pn, Dcal, Dp); + + return pn; } /* ************************************************************************* */ Point2 Cal3Unified::nPlaneToSpace(const Point2& p) const { - const double x = p.x(), y = p.y(); const double xy2 = x * x + y * y; const double sq_xy = (xi_ + sqrt(1 + (1 - xi_ * xi_) * xy2)) / (xy2 + 1); @@ -122,7 +123,6 @@ Point2 Cal3Unified::nPlaneToSpace(const Point2& p) const { /* ************************************************************************* */ Point2 Cal3Unified::spaceToNPlane(const Point2& p) const { - const double x = p.x(), y = p.y(); const double sq_xy = 1 + xi_ * sqrt(x * x + y * y + 1); @@ -135,11 +135,10 @@ Cal3Unified Cal3Unified::retract(const Vector& d) const { } /* ************************************************************************* */ -Vector10 Cal3Unified::localCoordinates(const Cal3Unified& T2) const { +Vector Cal3Unified::localCoordinates(const Cal3Unified& T2) const { return T2.vector() - vector(); } -} /* ************************************************************************* */ - +} // \ namespace gtsam diff --git a/gtsam/geometry/Cal3Unified.h b/gtsam/geometry/Cal3Unified.h index ae75c3916..ee388c8c1 100644 --- a/gtsam/geometry/Cal3Unified.h +++ b/gtsam/geometry/Cal3Unified.h @@ -14,6 +14,7 @@ * @brief Unified Calibration Model, see Mei07icra for details * @date Mar 8, 2014 * @author Jing Dong + * @author Varun Agrawal */ /** @@ -27,40 +28,39 @@ namespace gtsam { /** - * @brief Calibration of a omni-directional camera with mirror + lens radial distortion + * @brief Calibration of a omni-directional camera with mirror + lens radial + * distortion * @addtogroup geometry * \nosubgrouping * * Similar to Cal3DS2, does distortion but has additional mirror parameter xi * K = [ fx s u0 ; 0 fy v0 ; 0 0 1 ] - * Pn = [ P.x / (1 + xi * \sqrt{P.x^2 + P.y^2 + 1}), P.y / (1 + xi * \sqrt{P.x^2 + P.y^2 + 1})] - * rr = Pn.x^2 + Pn.y^2 - * \hat{pn} = (1 + k1*rr + k2*rr^2 ) pn + [ 2*k3 pn.x pn.y + k4 (rr + 2 Pn.x^2) ; - * k3 (rr + 2 Pn.y^2) + 2*k4 pn.x pn.y ] + * Pn = [ P.x / (1 + xi * \sqrt{P.x² + P.y² + 1}), P.y / (1 + xi * \sqrt{P.x² + + * P.y² + 1})] + * r² = Pn.x² + Pn.y² + * \hat{pn} = (1 + k1*r² + k2*r⁴ ) pn + [ 2*k3 pn.x pn.y + k4 (r² + 2 Pn.x²) ; + * k3 (rr + 2 Pn.y²) + 2*k4 pn.x pn.y ] * pi = K*pn */ class GTSAM_EXPORT Cal3Unified : public Cal3DS2_Base { + using This = Cal3Unified; + using Base = Cal3DS2_Base; - typedef Cal3Unified This; - typedef Cal3DS2_Base Base; - -private: - - double xi_; // mirror parameter - -public: + private: + double xi_ = 0.0f; ///< mirror parameter + public: enum { dimension = 10 }; /// @name Standard Constructors /// @{ /// Default Constructor with only unit focal length - Cal3Unified() : Base(), xi_(0) {} + Cal3Unified() = default; - Cal3Unified(double fx, double fy, double s, double u0, double v0, - double k1, double k2, double p1 = 0.0, double p2 = 0.0, double xi = 0.0) : - Base(fx, fy, s, u0, v0, k1, k2, p1, p2), xi_(xi) {} + Cal3Unified(double fx, double fy, double s, double u0, double v0, double k1, + double k2, double p1 = 0.0, double p2 = 0.0, double xi = 0.0) + : Base(fx, fy, s, u0, v0, k1, k2, p1, p2), xi_(xi) {} virtual ~Cal3Unified() {} @@ -68,14 +68,19 @@ class GTSAM_EXPORT Cal3Unified : public Cal3DS2_Base { /// @name Advanced Constructors /// @{ - Cal3Unified(const Vector &v) ; + Cal3Unified(const Vector10& v) + : Base(v(0), v(1), v(2), v(3), v(4), v(5), v(6), v(7), v(8)), xi_(v(9)) {} /// @} /// @name Testable /// @{ + /// Output stream operator + GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Cal3Unified& cal); + /// print with optional string - virtual void print(const std::string& s = "") const ; + void print(const std::string& s = "") const override; /// assert equality up to a tolerance bool equals(const Cal3Unified& K, double tol = 10e-9) const; @@ -85,7 +90,10 @@ class GTSAM_EXPORT Cal3Unified : public Cal3DS2_Base { /// @{ /// mirror parameter - inline double xi() const { return xi_;} + inline double xi() const { return xi_; } + + /// Return all parameters as a vector + Vector10 vector() const; /** * convert intrinsic coordinates xy to image coordinates uv @@ -95,11 +103,12 @@ class GTSAM_EXPORT Cal3Unified : public Cal3DS2_Base { * @return point in image coordinates */ Point2 uncalibrate(const Point2& p, - OptionalJacobian<2,10> Dcal = boost::none, - OptionalJacobian<2,2> Dp = boost::none) const ; + OptionalJacobian<2, 10> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) const; /// Conver a pixel coordinate to ideal coordinate - Point2 calibrate(const Point2& p, const double tol=1e-5) const; + Point2 calibrate(const Point2& p, OptionalJacobian<2, 10> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) const; /// Convert a 3D point to normalized unit plane Point2 spaceToNPlane(const Point2& p) const; @@ -112,41 +121,33 @@ class GTSAM_EXPORT Cal3Unified : public Cal3DS2_Base { /// @{ /// Given delta vector, update calibration - Cal3Unified retract(const Vector& d) const ; + Cal3Unified retract(const Vector& d) const; /// Given a different calibration, calculate update to obtain it - Vector10 localCoordinates(const Cal3Unified& T2) const ; + Vector localCoordinates(const Cal3Unified& T2) const; /// Return dimensions of calibration manifold object - virtual size_t dim() const { return dimension ; } + virtual size_t dim() const override { return Dim(); } /// Return dimensions of calibration manifold object - static size_t Dim() { return dimension; } - - /// Return all parameters as a vector - Vector10 vector() const ; + inline static size_t Dim() { return dimension; } /// @} -private: - + private: /** Serialization function */ friend class boost::serialization::access; - template - void serialize(Archive & ar, const unsigned int /*version*/) - { - ar & boost::serialization::make_nvp("Cal3Unified", - boost::serialization::base_object(*this)); - ar & BOOST_SERIALIZATION_NVP(xi_); + template + void serialize(Archive& ar, const unsigned int /*version*/) { + ar& boost::serialization::make_nvp( + "Cal3Unified", boost::serialization::base_object(*this)); + ar& BOOST_SERIALIZATION_NVP(xi_); } - }; -template<> +template <> struct traits : public internal::Manifold {}; -template<> +template <> struct traits : public internal::Manifold {}; - } - diff --git a/gtsam/geometry/Cal3_S2.cpp b/gtsam/geometry/Cal3_S2.cpp index b3d1be4b6..1a76c3f6f 100644 --- a/gtsam/geometry/Cal3_S2.cpp +++ b/gtsam/geometry/Cal3_S2.cpp @@ -22,95 +22,55 @@ #include namespace gtsam { -using namespace std; /* ************************************************************************* */ -Cal3_S2::Cal3_S2(double fov, int w, int h) : - s_(0), u0_((double) w / 2.0), v0_((double) h / 2.0) { - double a = fov * M_PI / 360.0; // fov/2 in radians - fx_ = (double) w / (2.0 * tan(a)); // old formula: fx_ = (double) w * tan(a); - fy_ = fx_; -} - -/* ************************************************************************* */ -Cal3_S2::Cal3_S2(const std::string &path) : - fx_(320), fy_(320), s_(0), u0_(320), v0_(140) { - - char buffer[200]; - buffer[0] = 0; - sprintf(buffer, "%s/calibration_info.txt", path.c_str()); - std::ifstream infile(buffer, std::ios::in); - - if (infile) - infile >> fx_ >> fy_ >> s_ >> u0_ >> v0_; - else { - throw std::runtime_error("Cal3_S2: Unable to load the calibration"); - } - - infile.close(); -} - -/* ************************************************************************* */ -ostream& operator<<(ostream& os, const Cal3_S2& cal) { - os << "{fx: " << cal.fx() << ", fy: " << cal.fy() << ", s:" << cal.skew() << ", px:" << cal.px() - << ", py:" << cal.py() << "}"; +std::ostream& operator<<(std::ostream& os, const Cal3_S2& cal) { + // Use the base class version since it is identical. + os << (Cal3&)cal; return os; } /* ************************************************************************* */ void Cal3_S2::print(const std::string& s) const { - gtsam::print((Matrix)matrix(), s); + gtsam::print((Matrix)K(), s); } /* ************************************************************************* */ bool Cal3_S2::equals(const Cal3_S2& K, double tol) const { - if (std::abs(fx_ - K.fx_) > tol) - return false; - if (std::abs(fy_ - K.fy_) > tol) - return false; - if (std::abs(s_ - K.s_) > tol) - return false; - if (std::abs(u0_ - K.u0_) > tol) - return false; - if (std::abs(v0_ - K.v0_) > tol) - return false; - return true; + return Cal3::equals(K, tol); } /* ************************************************************************* */ Point2 Cal3_S2::uncalibrate(const Point2& p, OptionalJacobian<2, 5> Dcal, - OptionalJacobian<2, 2> Dp) const { + OptionalJacobian<2, 2> Dp) const { const double x = p.x(), y = p.y(); - if (Dcal) - *Dcal << x, 0.0, y, 1.0, 0.0, 0.0, y, 0.0, 0.0, 1.0; - if (Dp) - *Dp << fx_, s_, 0.0, fy_; + if (Dcal) *Dcal << x, 0.0, y, 1.0, 0.0, 0.0, y, 0.0, 0.0, 1.0; + if (Dp) *Dp << fx_, s_, 0.0, fy_; return Point2(fx_ * x + s_ * y + u0_, fy_ * y + v0_); } /* ************************************************************************* */ -Point2 Cal3_S2::calibrate(const Point2& p, OptionalJacobian<2,5> Dcal, - OptionalJacobian<2,2> Dp) const { - const double u = p.x(), v = p.y(); - double delta_u = u - u0_, delta_v = v - v0_; - double inv_fx = 1/ fx_, inv_fy = 1/fy_; - double inv_fy_delta_v = inv_fy * delta_v, inv_fx_s_inv_fy = inv_fx * s_ * inv_fy; - Point2 point(inv_fx * (delta_u - s_ * inv_fy_delta_v), - inv_fy_delta_v); - if(Dcal) - *Dcal << - inv_fx * point.x(), inv_fx * s_ * inv_fy * inv_fy_delta_v, -inv_fx * point.y(), - -inv_fx, inv_fx_s_inv_fy, - 0, -inv_fy * point.y(), 0, 0, -inv_fy; - if(Dp) - *Dp << inv_fx, -inv_fx_s_inv_fy, 0, inv_fy; - return point; +Point2 Cal3_S2::calibrate(const Point2& p, OptionalJacobian<2, 5> Dcal, + OptionalJacobian<2, 2> Dp) const { + const double u = p.x(), v = p.y(); + double delta_u = u - u0_, delta_v = v - v0_; + double inv_fx = 1 / fx_, inv_fy = 1 / fy_; + double inv_fy_delta_v = inv_fy * delta_v; + double inv_fx_s_inv_fy = inv_fx * s_ * inv_fy; + + Point2 point(inv_fx * (delta_u - s_ * inv_fy_delta_v), inv_fy_delta_v); + if (Dcal) { + *Dcal << -inv_fx * point.x(), inv_fx * s_ * inv_fy * inv_fy_delta_v, + -inv_fx * point.y(), -inv_fx, inv_fx_s_inv_fy, 0, -inv_fy * point.y(), + 0, 0, -inv_fy; + } + if (Dp) *Dp << inv_fx, -inv_fx_s_inv_fy, 0, inv_fy; + return point; } /* ************************************************************************* */ -Vector3 Cal3_S2::calibrate(const Vector3& p) const { - return matrix_inverse() * p; -} +Vector3 Cal3_S2::calibrate(const Vector3& p) const { return inverse() * p; } /* ************************************************************************* */ -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/geometry/Cal3_S2.h b/gtsam/geometry/Cal3_S2.h index f2848d0a3..93b98a7e1 100644 --- a/gtsam/geometry/Cal3_S2.h +++ b/gtsam/geometry/Cal3_S2.h @@ -21,6 +21,7 @@ #pragma once +#include #include namespace gtsam { @@ -30,31 +31,25 @@ namespace gtsam { * @addtogroup geometry * \nosubgrouping */ -class GTSAM_EXPORT Cal3_S2 { -private: - double fx_, fy_, s_, u0_, v0_; - -public: +class GTSAM_EXPORT Cal3_S2 : public Cal3 { + public: enum { dimension = 5 }; - typedef boost::shared_ptr shared_ptr; ///< shared pointer to calibration object + + ///< shared pointer to calibration object + using shared_ptr = boost::shared_ptr; /// @name Standard Constructors /// @{ /// Create a default calibration that leaves coordinates unchanged - Cal3_S2() : - fx_(1), fy_(1), s_(0), u0_(0), v0_(0) { - } + Cal3_S2() = default; /// constructor from doubles - Cal3_S2(double fx, double fy, double s, double u0, double v0) : - fx_(fx), fy_(fy), s_(s), u0_(u0), v0_(v0) { - } + Cal3_S2(double fx, double fy, double s, double u0, double v0) + : Cal3(fx, fy, s, u0, v0) {} /// constructor from vector - Cal3_S2(const Vector &d) : - fx_(d(0)), fy_(d(1)), s_(d(2)), u0_(d(3)), v0_(d(4)) { - } + Cal3_S2(const Vector5& d) : Cal3(d) {} /** * Easy constructor, takes fov in degrees, asssumes zero skew, unit aspect @@ -62,141 +57,65 @@ class GTSAM_EXPORT Cal3_S2 { * @param w image width * @param h image height */ - Cal3_S2(double fov, int w, int h); - - /// @} - /// @name Advanced Constructors - /// @{ - - /// load calibration from location (default name is calibration_info.txt) - Cal3_S2(const std::string &path); - - /// @} - /// @name Testable - /// @{ - - /// Output stream operator - GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Cal3_S2& cal); - - /// print with optional string - void print(const std::string& s = "Cal3_S2") const; - - /// Check if equal up to specified tolerance - bool equals(const Cal3_S2& K, double tol = 10e-9) const; - - /// @} - /// @name Standard Interface - /// @{ - - /// focal length x - inline double fx() const { - return fx_; - } - - /// focal length y - inline double fy() const { - return fy_; - } - - /// aspect ratio - inline double aspectRatio() const { - return fx_/fy_; - } - - /// skew - inline double skew() const { - return s_; - } - - /// image center in x - inline double px() const { - return u0_; - } - - /// image center in y - inline double py() const { - return v0_; - } - - /// return the principal point - Point2 principalPoint() const { - return Point2(u0_, v0_); - } - - /// vectorized form (column-wise) - Vector5 vector() const { - Vector5 v; - v << fx_, fy_, s_, u0_, v0_; - return v; - } - - /// return calibration matrix K - Matrix3 K() const { - Matrix3 K; - K << fx_, s_, u0_, 0.0, fy_, v0_, 0.0, 0.0, 1.0; - return K; - } - - /** @deprecated The following function has been deprecated, use K above */ - Matrix3 matrix() const { - return K(); - } - - /// return inverted calibration matrix inv(K) - Matrix3 matrix_inverse() const { - const double fxy = fx_ * fy_, sv0 = s_ * v0_, fyu0 = fy_ * u0_; - Matrix3 K_inverse; - K_inverse << 1.0 / fx_, -s_ / fxy, (sv0 - fyu0) / fxy, 0.0, - 1.0 / fy_, -v0_ / fy_, 0.0, 0.0, 1.0; - return K_inverse; - } + Cal3_S2(double fov, int w, int h) : Cal3(fov, w, h) {} /** - * convert intrinsic coordinates xy to image coordinates uv, fixed derivaitves + * Convert intrinsic coordinates xy to image coordinates uv, fixed derivaitves * @param p point in intrinsic coordinates - * @param Dcal optional 2*5 Jacobian wrpt Cal3_S2 parameters + * @param Dcal optional 2*5 Jacobian wrpt Cal3 parameters * @param Dp optional 2*2 Jacobian wrpt intrinsic coordinates * @return point in image coordinates */ - Point2 uncalibrate(const Point2& p, OptionalJacobian<2,5> Dcal = boost::none, - OptionalJacobian<2,2> Dp = boost::none) const; + Point2 uncalibrate(const Point2& p, OptionalJacobian<2, 5> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) const; /** - * convert image coordinates uv to intrinsic coordinates xy + * Convert image coordinates uv to intrinsic coordinates xy * @param p point in image coordinates - * @param Dcal optional 2*5 Jacobian wrpt Cal3_S2 parameters + * @param Dcal optional 2*5 Jacobian wrpt Cal3 parameters * @param Dp optional 2*2 Jacobian wrpt intrinsic coordinates * @return point in intrinsic coordinates */ - Point2 calibrate(const Point2& p, OptionalJacobian<2,5> Dcal = boost::none, - OptionalJacobian<2,2> Dp = boost::none) const; + Point2 calibrate(const Point2& p, OptionalJacobian<2, 5> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) const; /** - * convert homogeneous image coordinates to intrinsic coordinates + * Convert homogeneous image coordinates to intrinsic coordinates * @param p point in image coordinates * @return point in intrinsic coordinates */ Vector3 calibrate(const Vector3& p) const; + /// @} + /// @name Testable + /// @{ + + /// Output stream operator + GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Cal3_S2& cal); + + /// print with optional string + void print(const std::string& s = "Cal3_S2") const override; + + /// Check if equal up to specified tolerance + bool equals(const Cal3_S2& K, double tol = 10e-9) const; + /// "Between", subtracts calibrations. between(p,q) == compose(inverse(p),q) inline Cal3_S2 between(const Cal3_S2& q, - OptionalJacobian<5,5> H1=boost::none, - OptionalJacobian<5,5> H2=boost::none) const { - if(H1) *H1 = -I_5x5; - if(H2) *H2 = I_5x5; - return Cal3_S2(q.fx_-fx_, q.fy_-fy_, q.s_-s_, q.u0_-u0_, q.v0_-v0_); + OptionalJacobian<5, 5> H1 = boost::none, + OptionalJacobian<5, 5> H2 = boost::none) const { + if (H1) *H1 = -I_5x5; + if (H2) *H2 = I_5x5; + return Cal3_S2(q.fx_ - fx_, q.fy_ - fy_, q.s_ - s_, q.u0_ - u0_, + q.v0_ - v0_); } - /// @} /// @name Manifold /// @{ /// return DOF, dimensionality of tangent space - inline size_t dim() const { return dimension; } - - /// return DOF, dimensionality of tangent space - static size_t Dim() { return dimension; } + inline static size_t Dim() { return dimension; } /// Given 5-dim tangent vector, create new calibration inline Cal3_S2 retract(const Vector& d) const { @@ -212,27 +131,22 @@ class GTSAM_EXPORT Cal3_S2 { /// @name Advanced Interface /// @{ -private: - + private: /// Serialization function friend class boost::serialization::access; - template - void serialize(Archive & ar, const unsigned int /*version*/) { - ar & BOOST_SERIALIZATION_NVP(fx_); - ar & BOOST_SERIALIZATION_NVP(fy_); - ar & BOOST_SERIALIZATION_NVP(s_); - ar & BOOST_SERIALIZATION_NVP(u0_); - ar & BOOST_SERIALIZATION_NVP(v0_); + template + void serialize(Archive& ar, const unsigned int /*version*/) { + ar& boost::serialization::make_nvp( + "Cal3_S2", boost::serialization::base_object(*this)); } /// @} - }; -template<> +template <> struct traits : public internal::Manifold {}; -template<> +template <> struct traits : public internal::Manifold {}; -} // \ namespace gtsam +} // \ namespace gtsam diff --git a/gtsam/geometry/Cal3_S2Stereo.cpp b/gtsam/geometry/Cal3_S2Stereo.cpp index 9b5aea4ed..9ef8c83a3 100644 --- a/gtsam/geometry/Cal3_S2Stereo.cpp +++ b/gtsam/geometry/Cal3_S2Stereo.cpp @@ -20,20 +20,56 @@ #include namespace gtsam { -using namespace std; + +/* ************************************************************************* */ +std::ostream& operator<<(std::ostream& os, const Cal3_S2Stereo& cal) { + os << (Cal3_S2&)cal; + os << ", b: " << cal.baseline(); + return os; +} /* ************************************************************************* */ void Cal3_S2Stereo::print(const std::string& s) const { - K_.print(s+"K: "); - std::cout << s << "Baseline: " << b_ << std::endl; - } + std::cout << s << (s != "" ? " " : ""); + std::cout << "K: " << (Matrix)K() << std::endl; + std::cout << "Baseline: " << b_ << std::endl; +} /* ************************************************************************* */ bool Cal3_S2Stereo::equals(const Cal3_S2Stereo& other, double tol) const { - if (std::abs(b_ - other.b_) > tol) return false; - return K_.equals(other.K_,tol); + const Cal3_S2* base = dynamic_cast(&other); + return (Cal3_S2::equals(*base, tol) && + std::fabs(b_ - other.baseline()) < tol); +} + +/* ************************************************************************* */ +Point2 Cal3_S2Stereo::uncalibrate(const Point2& p, OptionalJacobian<2, 6> Dcal, + OptionalJacobian<2, 2> Dp) const { + const double x = p.x(), y = p.y(); + if (Dcal) *Dcal << x, 0.0, y, 1.0, 0.0, 0.0, 0.0, y, 0.0, 0.0, 1.0, 0.0; + if (Dp) *Dp << fx_, s_, 0.0, fy_; + return Point2(fx_ * x + s_ * y + u0_, fy_ * y + v0_); +} + +/* ************************************************************************* */ +Point2 Cal3_S2Stereo::calibrate(const Point2& p, OptionalJacobian<2, 6> Dcal, + OptionalJacobian<2, 2> Dp) const { + const double u = p.x(), v = p.y(); + double delta_u = u - u0_, delta_v = v - v0_; + double inv_fx = 1 / fx_, inv_fy = 1 / fy_; + double inv_fy_delta_v = inv_fy * delta_v; + double inv_fx_s_inv_fy = inv_fx * s_ * inv_fy; + + Point2 point(inv_fx * (delta_u - s_ * inv_fy_delta_v), inv_fy_delta_v); + if (Dcal) { + *Dcal << -inv_fx * point.x(), inv_fx * s_ * inv_fy * inv_fy_delta_v, + -inv_fx * point.y(), -inv_fx, inv_fx_s_inv_fy, 0, 0, + -inv_fy * point.y(), 0, 0, -inv_fy, 0; + } + if (Dp) *Dp << inv_fx, -inv_fx_s_inv_fy, 0, inv_fy; + return point; } /* ************************************************************************* */ -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/geometry/Cal3_S2Stereo.h b/gtsam/geometry/Cal3_S2Stereo.h index a6eb41b60..ae0052fd5 100644 --- a/gtsam/geometry/Cal3_S2Stereo.h +++ b/gtsam/geometry/Cal3_S2Stereo.h @@ -22,135 +22,143 @@ namespace gtsam { - /** - * @brief The most common 5DOF 3D->2D calibration, stereo version - * @addtogroup geometry - * \nosubgrouping - */ - class GTSAM_EXPORT Cal3_S2Stereo { - private: - - Cal3_S2 K_; - double b_; - - public: - - enum { dimension = 6 }; - typedef boost::shared_ptr shared_ptr; ///< shared pointer to stereo calibration object - - /// @name Standard Constructors - /// @ - - /// default calibration leaves coordinates unchanged - Cal3_S2Stereo() : - K_(1, 1, 0, 0, 0), b_(1.0) { - } - - /// constructor from doubles - Cal3_S2Stereo(double fx, double fy, double s, double u0, double v0, double b) : - K_(fx, fy, s, u0, v0), b_(b) { - } - - /// constructor from vector - Cal3_S2Stereo(const Vector &d): K_(d(0), d(1), d(2), d(3), d(4)), b_(d(5)){} - - /// easy constructor; field-of-view in degrees, assumes zero skew - Cal3_S2Stereo(double fov, int w, int h, double b) : - K_(fov, w, h), b_(b) { - } - - /// @} - /// @name Testable - /// @{ - - void print(const std::string& s = "") const; - - /// Check if equal up to specified tolerance - bool equals(const Cal3_S2Stereo& other, double tol = 10e-9) const; - - /// @} - /// @name Standard Interface - /// @{ - - /// return calibration, same for left and right - const Cal3_S2& calibration() const { return K_;} - - /// return calibration matrix K, same for left and right - Matrix matrix() const { return K_.matrix();} - - /// focal length x - inline double fx() const { return K_.fx();} - - /// focal length x - inline double fy() const { return K_.fy();} - - /// skew - inline double skew() const { return K_.skew();} - - /// image center in x - inline double px() const { return K_.px();} - - /// image center in y - inline double py() const { return K_.py();} - - /// return the principal point - Point2 principalPoint() const { return K_.principalPoint();} - - /// return baseline - inline double baseline() const { return b_; } - - /// vectorized form (column-wise) - Vector6 vector() const { - Vector6 v; - v << K_.vector(), b_; - return v; - } - - /// @} - /// @name Manifold - /// @{ - - /// return DOF, dimensionality of tangent space - inline size_t dim() const { return dimension; } +/** + * @brief The most common 5DOF 3D->2D calibration, stereo version + * @addtogroup geometry + * \nosubgrouping + */ +class GTSAM_EXPORT Cal3_S2Stereo : public Cal3_S2 { + private: + double b_ = 1.0f; ///< Stereo baseline. - /// return DOF, dimensionality of tangent space - static size_t Dim() { return dimension; } + public: + enum { dimension = 6 }; - /// Given 6-dim tangent vector, create new calibration - inline Cal3_S2Stereo retract(const Vector& d) const { - return Cal3_S2Stereo(K_.fx() + d(0), K_.fy() + d(1), K_.skew() + d(2), K_.px() + d(3), K_.py() + d(4), b_ + d(5)); - } + ///< shared pointer to stereo calibration object + using shared_ptr = boost::shared_ptr; - /// Unretraction for the calibration - Vector6 localCoordinates(const Cal3_S2Stereo& T2) const { - return T2.vector() - vector(); - } + /// @name Standard Constructors + /// @ + /// default calibration leaves coordinates unchanged + Cal3_S2Stereo() = default; - /// @} - /// @name Advanced Interface - /// @{ + /// constructor from doubles + Cal3_S2Stereo(double fx, double fy, double s, double u0, double v0, double b) + : Cal3_S2(fx, fy, s, u0, v0), b_(b) {} - private: - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(Archive & ar, const unsigned int /*version*/) - { - ar & BOOST_SERIALIZATION_NVP(K_); - ar & BOOST_SERIALIZATION_NVP(b_); - } - /// @} + /// constructor from vector + Cal3_S2Stereo(const Vector6& d) + : Cal3_S2(d(0), d(1), d(2), d(3), d(4)), b_(d(5)) {} - }; + /// easy constructor; field-of-view in degrees, assumes zero skew + Cal3_S2Stereo(double fov, int w, int h, double b) + : Cal3_S2(fov, w, h), b_(b) {} - // Define GTSAM traits - template<> - struct traits : public internal::Manifold { - }; + /** + * Convert intrinsic coordinates xy to image coordinates uv, fixed derivaitves + * @param p point in intrinsic coordinates + * @param Dcal optional 2*6 Jacobian wrpt Cal3_S2Stereo parameters + * @param Dp optional 2*2 Jacobian wrpt intrinsic coordinates + * @return point in image coordinates + */ + Point2 uncalibrate(const Point2& p, OptionalJacobian<2, 6> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) const; - template<> - struct traits : public internal::Manifold { - }; + /** + * Convert image coordinates uv to intrinsic coordinates xy + * @param p point in image coordinates + * @param Dcal optional 2*6 Jacobian wrpt Cal3_S2Stereo parameters + * @param Dp optional 2*2 Jacobian wrpt intrinsic coordinates + * @return point in intrinsic coordinates + */ + Point2 calibrate(const Point2& p, OptionalJacobian<2, 6> Dcal = boost::none, + OptionalJacobian<2, 2> Dp = boost::none) const; -} // \ namespace gtsam + /** + * Convert homogeneous image coordinates to intrinsic coordinates + * @param p point in image coordinates + * @return point in intrinsic coordinates + */ + Vector3 calibrate(const Vector3& p) const { return Cal3_S2::calibrate(p); } + + /// @} + /// @name Testable + /// @{ + + /// Output stream operator + GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, + const Cal3_S2Stereo& cal); + + /// print with optional string + void print(const std::string& s = "") const override; + + /// Check if equal up to specified tolerance + bool equals(const Cal3_S2Stereo& other, double tol = 10e-9) const; + + /// @} + /// @name Standard Interface + /// @{ + + /// return calibration, same for left and right + const Cal3_S2& calibration() const { return *this; } + + /// return calibration matrix K, same for left and right + Matrix3 K() const override { return Cal3_S2::K(); } + + /// return baseline + inline double baseline() const { return b_; } + + /// vectorized form (column-wise) + Vector6 vector() const { + Vector6 v; + v << Cal3_S2::vector(), b_; + return v; + } + + /// @} + /// @name Manifold + /// @{ + + /// return DOF, dimensionality of tangent space + inline size_t dim() const override { return Dim(); } + + /// return DOF, dimensionality of tangent space + inline static size_t Dim() { return dimension; } + + /// Given 6-dim tangent vector, create new calibration + inline Cal3_S2Stereo retract(const Vector& d) const { + return Cal3_S2Stereo(fx() + d(0), fy() + d(1), skew() + d(2), px() + d(3), + py() + d(4), b_ + d(5)); + } + + /// Unretraction for the calibration + Vector6 localCoordinates(const Cal3_S2Stereo& T2) const { + return T2.vector() - vector(); + } + + /// @} + /// @name Advanced Interface + /// @{ + + private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(Archive& ar, const unsigned int /*version*/) { + ar& boost::serialization::make_nvp( + "Cal3_S2", boost::serialization::base_object(*this)); + ar& BOOST_SERIALIZATION_NVP(b_); + } + /// @} +}; + +// Define GTSAM traits +template <> +struct traits : public internal::Manifold {}; + +template <> +struct traits : public internal::Manifold { +}; + +} // \ namespace gtsam diff --git a/gtsam/geometry/CameraSet.h b/gtsam/geometry/CameraSet.h index ecf9a572d..feab8e0fa 100644 --- a/gtsam/geometry/CameraSet.h +++ b/gtsam/geometry/CameraSet.h @@ -319,7 +319,7 @@ class CameraSet : public std::vector > } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; template diff --git a/gtsam/geometry/EssentialMatrix.h b/gtsam/geometry/EssentialMatrix.h index 3235fdedd..909576aa0 100644 --- a/gtsam/geometry/EssentialMatrix.h +++ b/gtsam/geometry/EssentialMatrix.h @@ -176,17 +176,6 @@ class EssentialMatrix { /// @} -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - Point3 transform_to(const Point3& p, - OptionalJacobian<3, 5> DE = boost::none, - OptionalJacobian<3, 3> Dpoint = boost::none) const { - return transformTo(p, DE, Dpoint); - }; - /// @} -#endif - private: /// @name Advanced Interface /// @{ @@ -212,7 +201,7 @@ class EssentialMatrix { /// @} public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; template<> diff --git a/gtsam/geometry/PinholeCamera.h b/gtsam/geometry/PinholeCamera.h index c52127a45..cf449ce8c 100644 --- a/gtsam/geometry/PinholeCamera.h +++ b/gtsam/geometry/PinholeCamera.h @@ -175,7 +175,7 @@ class PinholeCamera: public PinholeBaseK { } /// return calibration - const Calibration& calibration() const { + const Calibration& calibration() const override { return K_; } @@ -325,7 +325,7 @@ class PinholeCamera: public PinholeBaseK { } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; // manifold traits diff --git a/gtsam/geometry/PinholePose.h b/gtsam/geometry/PinholePose.h index 935962423..3bf96c08b 100644 --- a/gtsam/geometry/PinholePose.h +++ b/gtsam/geometry/PinholePose.h @@ -107,9 +107,9 @@ class PinholeBaseK: public PinholeBase { // If needed, apply chain rule if (Dpose) - *Dpose = Dpi_pn * *Dpose; + *Dpose = Dpi_pn * *Dpose; if (Dpoint) - *Dpoint = Dpi_pn * *Dpoint; + *Dpoint = Dpi_pn * *Dpoint; return pi; } @@ -222,7 +222,7 @@ class PinholeBaseK: public PinholeBase { } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; // end of class PinholeBaseK @@ -361,7 +361,7 @@ class PinholePose: public PinholeBaseK { } /// return calibration - virtual const CALIBRATION& calibration() const { + const CALIBRATION& calibration() const override { return *K_; } @@ -425,7 +425,7 @@ class PinholePose: public PinholeBaseK { } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; // end of class PinholePose diff --git a/gtsam/geometry/PinholeSet.h b/gtsam/geometry/PinholeSet.h index a2896ca8d..7e3dae56f 100644 --- a/gtsam/geometry/PinholeSet.h +++ b/gtsam/geometry/PinholeSet.h @@ -45,7 +45,7 @@ class PinholeSet: public CameraSet { /// @{ /// print - virtual void print(const std::string& s = "") const { + void print(const std::string& s = "") const override { Base::print(s); } diff --git a/gtsam/geometry/Point2.cpp b/gtsam/geometry/Point2.cpp index 3d4bb753e..d8060cfcf 100644 --- a/gtsam/geometry/Point2.cpp +++ b/gtsam/geometry/Point2.cpp @@ -50,49 +50,6 @@ double distance2(const Point2& p, const Point2& q, OptionalJacobian<1, 2> H1, } } -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS - -/* ************************************************************************* */ -void Point2::print(const string& s) const { - cout << s << *this << endl; -} - -/* ************************************************************************* */ -bool Point2::equals(const Point2& q, double tol) const { - return (std::abs(x() - q.x()) < tol && std::abs(y() - q.y()) < tol); -} - -/* ************************************************************************* */ -double Point2::norm(OptionalJacobian<1,2> H) const { - return gtsam::norm2(*this, H); -} - -/* ************************************************************************* */ -double Point2::distance(const Point2& point, OptionalJacobian<1,2> H1, - OptionalJacobian<1,2> H2) const { - return gtsam::distance2(*this, point, H1, H2); -} - -/* ************************************************************************* */ -ostream &operator<<(ostream &os, const Point2& p) { - os << '(' << p.x() << ", " << p.y() << ')'; - return os; -} - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -boost::optional CircleCircleIntersection(double R_d, double r_d, double tol) { - return circleCircleIntersection(R_d, r_d, tol); -} -std::list CircleCircleIntersection(Point2 c1, Point2 c2, boost::optional fh) { - return circleCircleIntersection(c1, c2, fh); -} -std::list CircleCircleIntersection(Point2 c1, double r1, Point2 c2, double r2, double tol) { - return circleCircleIntersection(c1, r1, c2, r2, tol); -} -#endif - -#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS - /* ************************************************************************* */ // Math inspired by http://paulbourke.net/geometry/circlesphere/ boost::optional circleCircleIntersection(double R_d, double r_d, diff --git a/gtsam/geometry/Point2.h b/gtsam/geometry/Point2.h index 718fb2992..e6574fe41 100644 --- a/gtsam/geometry/Point2.h +++ b/gtsam/geometry/Point2.h @@ -22,134 +22,9 @@ namespace gtsam { -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS - - /// As of GTSAM 4, in order to make GTSAM more lean, - /// it is now possible to just typedef Point2 to Vector2 - typedef Vector2 Point2; - -#else - -/** - * A 2D point - * Complies with the Testable Concept - * Functional, so no set functions: once created, a point is constant. - * @addtogroup geometry - * \nosubgrouping - */ -class Point2 : public Vector2 { -private: - -public: - enum { dimension = 2 }; - /// @name Standard Constructors - /// @{ - - /// default constructor -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - // Deprecated default constructor initializes to zero, in contrast to new behavior below - Point2() { setZero(); } -#else - Point2() {} -#endif - - using Vector2::Vector2; - - /// @} - /// @name Advanced Constructors - /// @{ - - /// construct from 2D vector - explicit Point2(const Vector2& v):Vector2(v) {} - /// @} - /// @name Testable - /// @{ - - /// print with optional string - GTSAM_EXPORT void print(const std::string& s = "") const; - - /// equals with an tolerance, prints out message if unequal - GTSAM_EXPORT bool equals(const Point2& q, double tol = 1e-9) const; - - /// @} - /// @name Group - /// @{ - - /// identity - inline static Point2 identity() {return Point2(0,0);} - - /// @} - /// @name Vector Space - /// @{ - - /** creates a unit vector */ - Point2 unit() const { return *this/norm(); } - - /** norm of point, with derivative */ - GTSAM_EXPORT double norm(OptionalJacobian<1,2> H = boost::none) const; - - /** distance between two points */ - GTSAM_EXPORT double distance(const Point2& p2, OptionalJacobian<1,2> H1 = boost::none, - OptionalJacobian<1,2> H2 = boost::none) const; - - /// @} - /// @name Standard Interface - /// @{ - - /// equality - inline bool operator ==(const Point2& q) const {return x()==q.x() && y()==q.y();} - - /// get x - inline double x() const {return (*this)[0];} - - /// get y - inline double y() const {return (*this)[1];} - - /// return vectorized form (column-wise). - const Vector2& vector() const { return *this; } - - /// @} - - /// Streaming - GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point2& p); - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - Point2 inverse() const { return -(*this); } - Point2 compose(const Point2& q) const { return (*this)+q;} - Point2 between(const Point2& q) const { return q-(*this);} - Vector2 localCoordinates(const Point2& q) const { return between(q);} - Point2 retract(const Vector2& v) const { return compose(Point2(v));} - static Vector2 Logmap(const Point2& p) { return p;} - static Point2 Expmap(const Vector2& v) { return Point2(v);} - inline double dist(const Point2& p2) const {return distance(p2);} - GTSAM_EXPORT static boost::optional CircleCircleIntersection(double R_d, double r_d, double tol = 1e-9); - GTSAM_EXPORT static std::list CircleCircleIntersection(Point2 c1, Point2 c2, boost::optional fh); - GTSAM_EXPORT static std::list CircleCircleIntersection(Point2 c1, double r1, Point2 c2, double r2, double tol = 1e-9); - /// @} -#endif - -private: - - /// @name Advanced Interface - /// @{ - - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) - { - ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Vector2);} - - /// @} -}; - -template<> -struct traits : public internal::VectorSpace { -}; - -#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS +/// As of GTSAM 4, in order to make GTSAM more lean, +/// it is now possible to just typedef Point2 to Vector2 +typedef Vector2 Point2; /// Distance of the point from the origin, with Jacobian GTSAM_EXPORT double norm2(const Point2& p, OptionalJacobian<1, 2> H = boost::none); diff --git a/gtsam/geometry/Point3.cpp b/gtsam/geometry/Point3.cpp index 8aa339a89..a565ac140 100644 --- a/gtsam/geometry/Point3.cpp +++ b/gtsam/geometry/Point3.cpp @@ -22,64 +22,6 @@ using namespace std; namespace gtsam { -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -bool Point3::equals(const Point3 &q, double tol) const { - return (std::abs(x() - q.x()) < tol && std::abs(y() - q.y()) < tol && - std::abs(z() - q.z()) < tol); -} - -void Point3::print(const string& s) const { - cout << s << *this << endl; -} - -/* ************************************************************************* */ -double Point3::distance(const Point3 &q, OptionalJacobian<1, 3> H1, - OptionalJacobian<1, 3> H2) const { - return gtsam::distance3(*this,q,H1,H2); -} - -double Point3::norm(OptionalJacobian<1,3> H) const { - return gtsam::norm3(*this, H); -} - -Point3 Point3::normalized(OptionalJacobian<3,3> H) const { - return gtsam::normalize(*this, H); -} - -Point3 Point3::cross(const Point3 &q, OptionalJacobian<3, 3> H1, - OptionalJacobian<3, 3> H2) const { - return gtsam::cross(*this, q, H1, H2); -} - -double Point3::dot(const Point3 &q, OptionalJacobian<1, 3> H1, - OptionalJacobian<1, 3> H2) const { - return gtsam::dot(*this, q, H1, H2); -} - -/* ************************************************************************* */ -ostream &operator<<(ostream &os, const Point3& p) { - os << '[' << p.x() << ", " << p.y() << ", " << p.z() << "]'"; - return os; -} - -/* ************************************************************************* */ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -Point3 Point3::add(const Point3 &q, OptionalJacobian<3,3> H1, - OptionalJacobian<3,3> H2) const { - if (H1) *H1 = I_3x3; - if (H2) *H2 = I_3x3; - return *this + q; -} - -Point3 Point3::sub(const Point3 &q, OptionalJacobian<3,3> H1, - OptionalJacobian<3,3> H2) const { - if (H1) *H1 = I_3x3; - if (H2) *H2 = -I_3x3; - return *this - q; -} -#endif - -#endif /* ************************************************************************* */ double distance3(const Point3 &p1, const Point3 &q, OptionalJacobian<1, 3> H1, OptionalJacobian<1, 3> H2) { @@ -133,6 +75,18 @@ double dot(const Point3 &p, const Point3 &q, OptionalJacobian<1, 3> H1, return p.x() * q.x() + p.y() * q.y() + p.z() * q.z(); } +Point3Pair means(const std::vector &abPointPairs) { + const size_t n = abPointPairs.size(); + if (n == 0) throw std::invalid_argument("Point3::mean input Point3Pair vector is empty"); + Point3 aSum(0, 0, 0), bSum(0, 0, 0); + for (const Point3Pair &abPair : abPointPairs) { + aSum += abPair.first; + bSum += abPair.second; + } + const double f = 1.0 / n; + return {aSum * f, bSum * f}; +} + /* ************************************************************************* */ ostream &operator<<(ostream &os, const gtsam::Point3Pair &p) { os << p.first << " <-> " << p.second; diff --git a/gtsam/geometry/Point3.h b/gtsam/geometry/Point3.h index 3b2330403..57188fc5e 100644 --- a/gtsam/geometry/Point3.h +++ b/gtsam/geometry/Point3.h @@ -26,134 +26,13 @@ #include #include #include +#include namespace gtsam { -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS - - /// As of GTSAM 4, in order to make GTSAM more lean, - /// it is now possible to just typedef Point3 to Vector3 - typedef Vector3 Point3; - -#else - -/** - * A 3D point is just a Vector3 with some additional methods - * @addtogroup geometry - * \nosubgrouping - */ -class Point3 : public Vector3 { - - public: - - enum { dimension = 3 }; - - /// @name Standard Constructors - /// @{ - - // Deprecated default constructor initializes to zero, in contrast to new behavior below -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - Point3() { setZero(); } -#endif - - using Vector3::Vector3; - - /// @} - /// @name Testable - /// @{ - - /** print with optional string */ - GTSAM_EXPORT void print(const std::string& s = "") const; - - /** equals with an tolerance */ - GTSAM_EXPORT bool equals(const Point3& p, double tol = 1e-9) const; - - /// @} - /// @name Group - /// @{ - - /// identity for group operation - inline static Point3 identity() { return Point3(0.0, 0.0, 0.0); } - - /// @} - /// @name Vector Space - /// @{ - - /** distance between two points */ - GTSAM_EXPORT double distance(const Point3& p2, OptionalJacobian<1, 3> H1 = boost::none, - OptionalJacobian<1, 3> H2 = boost::none) const; - - /** Distance of the point from the origin, with Jacobian */ - GTSAM_EXPORT double norm(OptionalJacobian<1,3> H = boost::none) const; - - /** normalize, with optional Jacobian */ - GTSAM_EXPORT Point3 normalized(OptionalJacobian<3, 3> H = boost::none) const; - - /** cross product @return this x q */ - GTSAM_EXPORT Point3 cross(const Point3 &q, OptionalJacobian<3, 3> H_p = boost::none, // - OptionalJacobian<3, 3> H_q = boost::none) const; - - /** dot product @return this * q*/ - GTSAM_EXPORT double dot(const Point3 &q, OptionalJacobian<1, 3> H_p = boost::none, // - OptionalJacobian<1, 3> H_q = boost::none) const; - - /// @} - /// @name Standard Interface - /// @{ - - /// return as Vector3 - const Vector3& vector() const { return *this; } - - /// get x - inline double x() const {return (*this)[0];} - - /// get y - inline double y() const {return (*this)[1];} - - /// get z - inline double z() const {return (*this)[2];} - - /// @} - - /// Output stream operator - GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Point3& p); - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - Point3 inverse() const { return -(*this);} - Point3 compose(const Point3& q) const { return (*this)+q;} - Point3 between(const Point3& q) const { return q-(*this);} - Vector3 localCoordinates(const Point3& q) const { return between(q);} - Point3 retract(const Vector3& v) const { return compose(Point3(v));} - static Vector3 Logmap(const Point3& p) { return p;} - static Point3 Expmap(const Vector3& v) { return Point3(v);} - inline double dist(const Point3& q) const { return (q - *this).norm(); } - Point3 normalize(OptionalJacobian<3, 3> H = boost::none) const { return normalized(H);} - GTSAM_EXPORT Point3 add(const Point3& q, OptionalJacobian<3, 3> H1 = boost::none, - OptionalJacobian<3, 3> H2 = boost::none) const; - GTSAM_EXPORT Point3 sub(const Point3& q, OptionalJacobian<3, 3> H1 = boost::none, - OptionalJacobian<3, 3> H2 = boost::none) const; - /// @} -#endif - - private: - - /** Serialization function */ - friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Vector3); - } - }; - -template<> -struct traits : public internal::VectorSpace {}; - -template<> -struct traits : public internal::VectorSpace {}; - -#endif // GTSAM_TYPEDEF_POINTS_TO_VECTORS +/// As of GTSAM 4, in order to make GTSAM more lean, +/// it is now possible to just typedef Point3 to Vector3 +typedef Vector3 Point3; // Convenience typedef typedef std::pair Point3Pair; @@ -180,6 +59,18 @@ GTSAM_EXPORT double dot(const Point3& p, const Point3& q, OptionalJacobian<1, 3> H_p = boost::none, OptionalJacobian<1, 3> H_q = boost::none); +/// mean +template +GTSAM_EXPORT Point3 mean(const CONTAINER& points) { + if (points.size() == 0) throw std::invalid_argument("Point3::mean input container is empty"); + Point3 sum(0, 0, 0); + sum = std::accumulate(points.begin(), points.end(), sum); + return sum / points.size(); +} + +/// Calculate the two means of a set of Point3 pairs +GTSAM_EXPORT Point3Pair means(const std::vector &abPointPairs); + template struct Range; diff --git a/gtsam/geometry/Pose2.cpp b/gtsam/geometry/Pose2.cpp index 9c41a76c8..71df0f753 100644 --- a/gtsam/geometry/Pose2.cpp +++ b/gtsam/geometry/Pose2.cpp @@ -322,10 +322,10 @@ boost::optional align(const vector& pairs) { // calculate cos and sin double c=0,s=0; for(const Point2Pair& pair: pairs) { - Point2 dq = pair.first - cp; - Point2 dp = pair.second - cq; - c += dp.x() * dq.x() + dp.y() * dq.y(); - s += dp.y() * dq.x() - dp.x() * dq.y(); // this works but is negative from formula above !! :-( + Point2 dp = pair.first - cp; + Point2 dq = pair.second - cq; + c += dp.x() * dq.x() + dp.y() * dq.y(); + s += -dp.y() * dq.x() + dp.x() * dq.y(); } // calculate angle and translation diff --git a/gtsam/geometry/Pose2.h b/gtsam/geometry/Pose2.h index 388318827..6372779c3 100644 --- a/gtsam/geometry/Pose2.h +++ b/gtsam/geometry/Pose2.h @@ -289,22 +289,6 @@ class Pose2: public LieGroup { /// @} -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - Point2 transform_from(const Point2& point, - OptionalJacobian<2, 3> Dpose = boost::none, - OptionalJacobian<2, 2> Dpoint = boost::none) const { - return transformFrom(point, Dpose, Dpoint); - } - Point2 transform_to(const Point2& point, - OptionalJacobian<2, 3> Dpose = boost::none, - OptionalJacobian<2, 2> Dpoint = boost::none) const { - return transformTo(point, Dpose, Dpoint); - } - /// @} -#endif - private: // Serialization function @@ -317,7 +301,7 @@ class Pose2: public LieGroup { public: // Align for Point2, which is either derived from, or is typedef, of Vector2 - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; // Pose2 /** specialization for pose2 wedge function (generic template in Lie.h) */ diff --git a/gtsam/geometry/Pose3.cpp b/gtsam/geometry/Pose3.cpp index 31033a027..c183e32ed 100644 --- a/gtsam/geometry/Pose3.cpp +++ b/gtsam/geometry/Pose3.cpp @@ -19,13 +19,16 @@ #include #include -#include #include - -using namespace std; +#include +#include +#include namespace gtsam { +using std::vector; +using Point3Pairs = vector; + /** instantiate concept checks */ GTSAM_CONCEPT_POSE_INST(Pose3); @@ -36,10 +39,10 @@ Pose3::Pose3(const Pose2& pose2) : } /* ************************************************************************* */ -Pose3 Pose3::Create(const Rot3& R, const Point3& t, OptionalJacobian<6, 3> H1, - OptionalJacobian<6, 3> H2) { - if (H1) *H1 << I_3x3, Z_3x3; - if (H2) *H2 << Z_3x3, R.transpose(); +Pose3 Pose3::Create(const Rot3& R, const Point3& t, OptionalJacobian<6, 3> HR, + OptionalJacobian<6, 3> Ht) { + if (HR) *HR << I_3x3, Z_3x3; + if (Ht) *Ht << Z_3x3, R.transpose(); return Pose3(R, t); } @@ -72,15 +75,15 @@ Matrix6 Pose3::adjointMap(const Vector6& xi) { /* ************************************************************************* */ Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y, - OptionalJacobian<6,6> H) { - if (H) { - H->setZero(); + OptionalJacobian<6, 6> Hxi) { + if (Hxi) { + Hxi->setZero(); for (int i = 0; i < 6; ++i) { Vector6 dxi; dxi.setZero(); dxi(i) = 1.0; Matrix6 Gi = adjointMap(dxi); - H->col(i) = Gi * y; + Hxi->col(i) = Gi * y; } } return adjointMap(xi) * y; @@ -88,25 +91,23 @@ Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y, /* ************************************************************************* */ Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y, - OptionalJacobian<6,6> H) { - if (H) { - H->setZero(); + OptionalJacobian<6, 6> Hxi) { + if (Hxi) { + Hxi->setZero(); for (int i = 0; i < 6; ++i) { Vector6 dxi; dxi.setZero(); dxi(i) = 1.0; Matrix6 GTi = adjointMap(dxi).transpose(); - H->col(i) = GTi * y; + Hxi->col(i) = GTi * y; } } return adjointMap(xi).transpose() * y; } /* ************************************************************************* */ -void Pose3::print(const string& s) const { - cout << s; - R_.print("R:\n"); - cout << t_ << ";" << endl; +void Pose3::print(const std::string& s) const { + std::cout << (s.empty() ? s : s + " ") << *this << std::endl; } /* ************************************************************************* */ @@ -116,8 +117,8 @@ bool Pose3::equals(const Pose3& pose, double tol) const { /* ************************************************************************* */ /** Modified from Murray94book version (which assumes w and v normalized?) */ -Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) { - if (H) *H = ExpmapDerivative(xi); +Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi) { + if (Hxi) *Hxi = ExpmapDerivative(xi); // get angular velocity omega and translational velocity v from twist xi Vector3 omega(xi(0), xi(1), xi(2)), v(xi(3), xi(4), xi(5)); @@ -125,8 +126,8 @@ Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) { Rot3 R = Rot3::Expmap(omega); double theta2 = omega.dot(omega); if (theta2 > std::numeric_limits::epsilon()) { - Vector3 t_parallel = omega * omega.dot(v); // translation parallel to axis - Vector3 omega_cross_v = omega.cross(v); // points towards axis + Vector3 t_parallel = omega * omega.dot(v); // translation parallel to axis + Vector3 omega_cross_v = omega.cross(v); // points towards axis Vector3 t = (omega_cross_v - R * omega_cross_v + t_parallel) / theta2; return Pose3(R, t); } else { @@ -135,10 +136,10 @@ Pose3 Pose3::Expmap(const Vector6& xi, OptionalJacobian<6, 6> H) { } /* ************************************************************************* */ -Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) { - if (H) *H = LogmapDerivative(p); - const Vector3 w = Rot3::Logmap(p.rotation()); - const Vector3 T = p.translation(); +Vector6 Pose3::Logmap(const Pose3& pose, OptionalJacobian<6, 6> Hpose) { + if (Hpose) *Hpose = LogmapDerivative(pose); + const Vector3 w = Rot3::Logmap(pose.rotation()); + const Vector3 T = pose.translation(); const double t = w.norm(); if (t < 1e-10) { Vector6 log; @@ -158,47 +159,39 @@ Vector6 Pose3::Logmap(const Pose3& p, OptionalJacobian<6, 6> H) { } /* ************************************************************************* */ -Pose3 Pose3::ChartAtOrigin::Retract(const Vector6& xi, ChartJacobian H) { +Pose3 Pose3::ChartAtOrigin::Retract(const Vector6& xi, ChartJacobian Hxi) { #ifdef GTSAM_POSE3_EXPMAP - return Expmap(xi, H); + return Expmap(xi, Hxi); #else Matrix3 DR; - Rot3 R = Rot3::Retract(xi.head<3>(), H ? &DR : 0); - if (H) { - *H = I_6x6; - H->topLeftCorner<3,3>() = DR; + Rot3 R = Rot3::Retract(xi.head<3>(), Hxi ? &DR : 0); + if (Hxi) { + *Hxi = I_6x6; + Hxi->topLeftCorner<3, 3>() = DR; } return Pose3(R, Point3(xi.tail<3>())); #endif } /* ************************************************************************* */ -Vector6 Pose3::ChartAtOrigin::Local(const Pose3& T, ChartJacobian H) { +Vector6 Pose3::ChartAtOrigin::Local(const Pose3& pose, ChartJacobian Hpose) { #ifdef GTSAM_POSE3_EXPMAP - return Logmap(T, H); + return Logmap(pose, Hpose); #else Matrix3 DR; - Vector3 omega = Rot3::LocalCoordinates(T.rotation(), H ? &DR : 0); - if (H) { - *H = I_6x6; - H->topLeftCorner<3,3>() = DR; + Vector3 omega = Rot3::LocalCoordinates(pose.rotation(), Hpose ? &DR : 0); + if (Hpose) { + *Hpose = I_6x6; + Hpose->topLeftCorner<3, 3>() = DR; } Vector6 xi; - xi << omega, T.translation(); + xi << omega, pose.translation(); return xi; #endif } /* ************************************************************************* */ -/** - * Compute the 3x3 bottom-left block Q of the SE3 Expmap derivative matrix - * J(xi) = [J_(w) Z_3x3; - * Q J_(w)] - * where J_(w) is the SO3 Expmap derivative. - * (see Chirikjian11book2, pg 44, eq 10.95. - * The closed-form formula is similar to formula 102 in Barfoot14tro) - */ -static Matrix3 computeQforExpmapDerivative(const Vector6& xi) { +Matrix3 Pose3::ComputeQforExpmapDerivative(const Vector6& xi, double nearZeroThreshold) { const auto w = xi.head<3>(); const auto v = xi.tail<3>(); const Matrix3 V = skewSymmetric(v); @@ -220,18 +213,20 @@ static Matrix3 computeQforExpmapDerivative(const Vector6& xi) { #else // The closed-form formula in Barfoot14tro eq. (102) double phi = w.norm(); - if (std::abs(phi)>1e-5) { - const double sinPhi = sin(phi), cosPhi = cos(phi); - const double phi2 = phi * phi, phi3 = phi2 * phi, phi4 = phi3 * phi, phi5 = phi4 * phi; + const Matrix3 WVW = W * V * W; + if (std::abs(phi) > nearZeroThreshold) { + const double s = sin(phi), c = cos(phi); + const double phi2 = phi * phi, phi3 = phi2 * phi, phi4 = phi3 * phi, + phi5 = phi4 * phi; // Invert the sign of odd-order terms to have the right Jacobian - Q = -0.5*V + (phi-sinPhi)/phi3*(W*V + V*W - W*V*W) - + (1-phi2/2-cosPhi)/phi4*(W*W*V + V*W*W - 3*W*V*W) - - 0.5*((1-phi2/2-cosPhi)/phi4 - 3*(phi-sinPhi-phi3/6.)/phi5)*(W*V*W*W + W*W*V*W); - } - else { - Q = -0.5*V + 1./6.*(W*V + V*W - W*V*W) - + 1./24.*(W*W*V + V*W*W - 3*W*V*W) - - 0.5*(1./24. + 3./120.)*(W*V*W*W + W*W*V*W); + Q = -0.5 * V + (phi - s) / phi3 * (W * V + V * W - WVW) + + (1 - phi2 / 2 - c) / phi4 * (W * W * V + V * W * W - 3 * WVW) - + 0.5 * ((1 - phi2 / 2 - c) / phi4 - 3 * (phi - s - phi3 / 6.) / phi5) * + (WVW * W + W * WVW); + } else { + Q = -0.5 * V + 1. / 6. * (W * V + V * W - WVW) - + 1. / 24. * (W * W * V + V * W * W - 3 * WVW) + + 1. / 120. * (WVW * W + W * WVW); } #endif @@ -242,7 +237,7 @@ static Matrix3 computeQforExpmapDerivative(const Vector6& xi) { Matrix6 Pose3::ExpmapDerivative(const Vector6& xi) { const Vector3 w = xi.head<3>(); const Matrix3 Jw = Rot3::ExpmapDerivative(w); - const Matrix3 Q = computeQforExpmapDerivative(xi); + const Matrix3 Q = ComputeQforExpmapDerivative(xi); Matrix6 J; J << Jw, Z_3x3, Q, Jw; return J; @@ -253,7 +248,7 @@ Matrix6 Pose3::LogmapDerivative(const Pose3& pose) { const Vector6 xi = Logmap(pose); const Vector3 w = xi.head<3>(); const Matrix3 Jw = Rot3::LogmapDerivative(w); - const Matrix3 Q = computeQforExpmapDerivative(xi); + const Matrix3 Q = ComputeQforExpmapDerivative(xi); const Matrix3 Q2 = -Jw*Q*Jw; Matrix6 J; J << Jw, Z_3x3, Q2, Jw; @@ -261,16 +256,16 @@ Matrix6 Pose3::LogmapDerivative(const Pose3& pose) { } /* ************************************************************************* */ -const Point3& Pose3::translation(OptionalJacobian<3, 6> H) const { - if (H) *H << Z_3x3, rotation().matrix(); +const Point3& Pose3::translation(OptionalJacobian<3, 6> Hself) const { + if (Hself) *Hself << Z_3x3, rotation().matrix(); return t_; } /* ************************************************************************* */ -const Rot3& Pose3::rotation(OptionalJacobian<3, 6> H) const { - if (H) { - *H << I_3x3, Z_3x3; +const Rot3& Pose3::rotation(OptionalJacobian<3, 6> Hself) const { + if (Hself) { + *Hself << I_3x3, Z_3x3; } return R_; } @@ -284,161 +279,147 @@ Matrix4 Pose3::matrix() const { } /* ************************************************************************* */ -Pose3 Pose3::transformPoseFrom(const Pose3& aTb) const { +Pose3 Pose3::transformPoseFrom(const Pose3& aTb, OptionalJacobian<6, 6> Hself, + OptionalJacobian<6, 6> HaTb) const { const Pose3& wTa = *this; - return wTa * aTb; + return wTa.compose(aTb, Hself, HaTb); } /* ************************************************************************* */ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -Pose3 Pose3::transform_to(const Pose3& pose) const { - Rot3 cRv = R_ * Rot3(pose.R_.inverse()); - Point3 t = pose.transform_to(t_); - return Pose3(cRv, t); -} -#endif - -/* ************************************************************************* */ -Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> H1, - OptionalJacobian<6, 6> H2) const { - if (H1) *H1 = -wTb.inverse().AdjointMap() * AdjointMap(); - if (H2) *H2 = I_6x6; +Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> Hself, + OptionalJacobian<6, 6> HwTb) const { + if (Hself) *Hself = -wTb.inverse().AdjointMap() * AdjointMap(); + if (HwTb) *HwTb = I_6x6; const Pose3& wTa = *this; return wTa.inverse() * wTb; } /* ************************************************************************* */ -Point3 Pose3::transformFrom(const Point3& p, OptionalJacobian<3,6> Dpose, - OptionalJacobian<3,3> Dpoint) const { +Point3 Pose3::transformFrom(const Point3& point, OptionalJacobian<3, 6> Hself, + OptionalJacobian<3, 3> Hpoint) const { // Only get matrix once, to avoid multiple allocations, // as well as multiple conversions in the Quaternion case const Matrix3 R = R_.matrix(); - if (Dpose) { - Dpose->leftCols<3>() = R * skewSymmetric(-p.x(), -p.y(), -p.z()); - Dpose->rightCols<3>() = R; + if (Hself) { + Hself->leftCols<3>() = R * skewSymmetric(-point.x(), -point.y(), -point.z()); + Hself->rightCols<3>() = R; } - if (Dpoint) { - *Dpoint = R; + if (Hpoint) { + *Hpoint = R; } - return R_ * p + t_; + return R_ * point + t_; } /* ************************************************************************* */ -Point3 Pose3::transformTo(const Point3& p, OptionalJacobian<3,6> Dpose, - OptionalJacobian<3,3> Dpoint) const { +Point3 Pose3::transformTo(const Point3& point, OptionalJacobian<3, 6> Hself, + OptionalJacobian<3, 3> Hpoint) const { // Only get transpose once, to avoid multiple allocations, // as well as multiple conversions in the Quaternion case const Matrix3 Rt = R_.transpose(); - const Point3 q(Rt*(p - t_)); - if (Dpose) { + const Point3 q(Rt*(point - t_)); + if (Hself) { const double wx = q.x(), wy = q.y(), wz = q.z(); - (*Dpose) << + (*Hself) << 0.0, -wz, +wy,-1.0, 0.0, 0.0, +wz, 0.0, -wx, 0.0,-1.0, 0.0, -wy, +wx, 0.0, 0.0, 0.0,-1.0; } - if (Dpoint) { - *Dpoint = Rt; + if (Hpoint) { + *Hpoint = Rt; } return q; } /* ************************************************************************* */ -double Pose3::range(const Point3& point, OptionalJacobian<1, 6> H1, - OptionalJacobian<1, 3> H2) const { +double Pose3::range(const Point3& point, OptionalJacobian<1, 6> Hself, + OptionalJacobian<1, 3> Hpoint) const { Matrix36 D_local_pose; Matrix3 D_local_point; - Point3 local = transformTo(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0); - if (!H1 && !H2) { + Point3 local = transformTo(point, Hself ? &D_local_pose : 0, Hpoint ? &D_local_point : 0); + if (!Hself && !Hpoint) { return local.norm(); } else { Matrix13 D_r_local; const double r = norm3(local, D_r_local); - if (H1) *H1 = D_r_local * D_local_pose; - if (H2) *H2 = D_r_local * D_local_point; + if (Hself) *Hself = D_r_local * D_local_pose; + if (Hpoint) *Hpoint = D_r_local * D_local_point; return r; } } /* ************************************************************************* */ -double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> H1, - OptionalJacobian<1, 6> H2) const { +double Pose3::range(const Pose3& pose, OptionalJacobian<1, 6> Hself, + OptionalJacobian<1, 6> Hpose) const { Matrix13 D_local_point; - double r = range(pose.translation(), H1, H2 ? &D_local_point : 0); - if (H2) *H2 << Matrix13::Zero(), D_local_point * pose.rotation().matrix(); + double r = range(pose.translation(), Hself, Hpose ? &D_local_point : 0); + if (Hpose) *Hpose << Matrix13::Zero(), D_local_point * pose.rotation().matrix(); return r; } /* ************************************************************************* */ -Unit3 Pose3::bearing(const Point3& point, OptionalJacobian<2, 6> H1, - OptionalJacobian<2, 3> H2) const { +Unit3 Pose3::bearing(const Point3& point, OptionalJacobian<2, 6> Hself, + OptionalJacobian<2, 3> Hpoint) const { Matrix36 D_local_pose; Matrix3 D_local_point; - Point3 local = transformTo(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0); - if (!H1 && !H2) { + Point3 local = transformTo(point, Hself ? &D_local_pose : 0, Hpoint ? &D_local_point : 0); + if (!Hself && !Hpoint) { return Unit3(local); } else { Matrix23 D_b_local; Unit3 b = Unit3::FromPoint3(local, D_b_local); - if (H1) *H1 = D_b_local * D_local_pose; - if (H2) *H2 = D_b_local * D_local_point; + if (Hself) *Hself = D_b_local * D_local_pose; + if (Hpoint) *Hpoint = D_b_local * D_local_point; return b; } } /* ************************************************************************* */ -Unit3 Pose3::bearing(const Pose3& pose, OptionalJacobian<2, 6> H1, - OptionalJacobian<2, 6> H2) const { - if (H2) { - H2->setZero(); - return bearing(pose.translation(), H1, H2.cols<3>(3)); +Unit3 Pose3::bearing(const Pose3& pose, OptionalJacobian<2, 6> Hself, + OptionalJacobian<2, 6> Hpose) const { + if (Hpose) { + Hpose->setZero(); + return bearing(pose.translation(), Hself, Hpose.cols<3>(3)); } - return bearing(pose.translation(), H1, boost::none); + return bearing(pose.translation(), Hself, boost::none); } /* ************************************************************************* */ -boost::optional Pose3::Align(const std::vector& abPointPairs) { +boost::optional Pose3::Align(const Point3Pairs &abPointPairs) { const size_t n = abPointPairs.size(); - if (n < 3) - return boost::none; // we need at least three pairs + if (n < 3) { + return boost::none; // we need at least three pairs + } // calculate centroids - Point3 aCentroid(0,0,0), bCentroid(0,0,0); - for(const Point3Pair& abPair: abPointPairs) { - aCentroid += abPair.first; - bCentroid += abPair.second; - } - double f = 1.0 / n; - aCentroid *= f; - bCentroid *= f; + const auto centroids = means(abPointPairs); // Add to form H matrix Matrix3 H = Z_3x3; - for(const Point3Pair& abPair: abPointPairs) { - Point3 da = abPair.first - aCentroid; - Point3 db = abPair.second - bCentroid; + for (const Point3Pair &abPair : abPointPairs) { + const Point3 da = abPair.first - centroids.first; + const Point3 db = abPair.second - centroids.second; H += da * db.transpose(); - } + } // ClosestTo finds rotation matrix closest to H in Frobenius sense - Rot3 aRb = Rot3::ClosestTo(H); - Point3 aTb = Point3(aCentroid) - aRb * Point3(bCentroid); + const Rot3 aRb = Rot3::ClosestTo(H); + const Point3 aTb = centroids.first - aRb * centroids.second; return Pose3(aRb, aTb); } -boost::optional align(const vector& baPointPairs) { - vector abPointPairs; - for (const Point3Pair& baPair: baPointPairs) { - abPointPairs.push_back(make_pair(baPair.second, baPair.first)); +boost::optional align(const Point3Pairs &baPointPairs) { + Point3Pairs abPointPairs; + for (const Point3Pair &baPair : baPointPairs) { + abPointPairs.emplace_back(baPair.second, baPair.first); } return Pose3::Align(abPointPairs); } /* ************************************************************************* */ std::ostream &operator<<(std::ostream &os, const Pose3& pose) { - os << pose.rotation() << "\n"; - const Point3& t = pose.translation(); - os << '[' << t.x() << ", " << t.y() << ", " << t.z() << "]\';\n"; + // Both Rot3 and Point3 have ostream definitions so we use them. + os << "R: " << pose.rotation() << "\n"; + os << "t: " << pose.translation().transpose(); return os; } diff --git a/gtsam/geometry/Pose3.h b/gtsam/geometry/Pose3.h index fa55f98de..4c8973996 100644 --- a/gtsam/geometry/Pose3.h +++ b/gtsam/geometry/Pose3.h @@ -75,8 +75,8 @@ class GTSAM_EXPORT Pose3: public LieGroup { /// Named constructor with derivatives static Pose3 Create(const Rot3& R, const Point3& t, - OptionalJacobian<6, 3> H1 = boost::none, - OptionalJacobian<6, 3> H2 = boost::none); + OptionalJacobian<6, 3> HR = boost::none, + OptionalJacobian<6, 3> Ht = boost::none); /** * Create Pose3 by aligning two point pairs @@ -112,15 +112,34 @@ class GTSAM_EXPORT Pose3: public LieGroup { return Pose3(R_ * T.R_, t_ + R_ * T.t_); } + /** + * Interpolate between two poses via individual rotation and translation + * interpolation. + * + * The default "interpolate" method defined in Lie.h minimizes the geodesic + * distance on the manifold, leading to a screw motion interpolation in + * Cartesian space, which might not be what is expected. + * In contrast, this method executes a straight line interpolation for the + * translation, while still using interpolate (aka "slerp") for the rotational + * component. This might be more intuitive in many applications. + * + * @param T End point of interpolation. + * @param t A value in [0, 1]. + */ + Pose3 interpolateRt(const Pose3& T, double t) const { + return Pose3(interpolate(R_, T.R_, t), + interpolate(t_, T.t_, t)); + } + /// @} /// @name Lie Group /// @{ /// Exponential map at identity - create a rotation from canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$ - static Pose3 Expmap(const Vector6& xi, OptionalJacobian<6, 6> H = boost::none); + static Pose3 Expmap(const Vector6& xi, OptionalJacobian<6, 6> Hxi = boost::none); /// Log map at identity - return the canonical coordinates \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$ of this rotation - static Vector6 Logmap(const Pose3& p, OptionalJacobian<6, 6> H = boost::none); + static Vector6 Logmap(const Pose3& pose, OptionalJacobian<6, 6> Hpose = boost::none); /** * Calculate Adjoint map, transforming a twist in the this pose's (i.e, body) frame to the world spatial frame @@ -157,7 +176,7 @@ class GTSAM_EXPORT Pose3: public LieGroup { * Action of the adjointMap on a Lie-algebra vector y, with optional derivatives */ static Vector6 adjoint(const Vector6 &xi, const Vector6 &y, - OptionalJacobian<6, 6> = boost::none); + OptionalJacobian<6, 6> Hxi = boost::none); // temporary fix for wrappers until case issue is resolved static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);} @@ -167,7 +186,7 @@ class GTSAM_EXPORT Pose3: public LieGroup { * The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space. */ static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y, - OptionalJacobian<6, 6> H = boost::none); + OptionalJacobian<6, 6> Hxi = boost::none); /// Derivative of Expmap static Matrix6 ExpmapDerivative(const Vector6& xi); @@ -177,10 +196,22 @@ class GTSAM_EXPORT Pose3: public LieGroup { // Chart at origin, depends on compile-time flag GTSAM_POSE3_EXPMAP struct ChartAtOrigin { - static Pose3 Retract(const Vector6& v, ChartJacobian H = boost::none); - static Vector6 Local(const Pose3& r, ChartJacobian H = boost::none); + static Pose3 Retract(const Vector6& xi, ChartJacobian Hxi = boost::none); + static Vector6 Local(const Pose3& pose, ChartJacobian Hpose = boost::none); }; + /** + * Compute the 3x3 bottom-left block Q of SE3 Expmap right derivative matrix + * J_r(xi) = [J_(w) Z_3x3; + * Q_r J_(w)] + * where J_(w) is the SO3 Expmap right derivative. + * (see Chirikjian11book2, pg 44, eq 10.95. + * The closed-form formula is identical to formula 102 in Barfoot14tro where + * Q_l of the SE3 Expmap left derivative matrix is given. + */ + static Matrix3 ComputeQforExpmapDerivative( + const Vector6& xi, double nearZeroThreshold = 1e-5); + using LieGroup::inverse; // version with derivative /** @@ -201,38 +232,38 @@ class GTSAM_EXPORT Pose3: public LieGroup { /** * @brief takes point in Pose coordinates and transforms it to world coordinates - * @param p point in Pose coordinates - * @param Dpose optional 3*6 Jacobian wrpt this pose - * @param Dpoint optional 3*3 Jacobian wrpt point + * @param point point in Pose coordinates + * @param Hself optional 3*6 Jacobian wrpt this pose + * @param Hpoint optional 3*3 Jacobian wrpt point * @return point in world coordinates */ - Point3 transformFrom(const Point3& p, OptionalJacobian<3, 6> Dpose = - boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const; + Point3 transformFrom(const Point3& point, OptionalJacobian<3, 6> Hself = + boost::none, OptionalJacobian<3, 3> Hpoint = boost::none) const; /** syntactic sugar for transformFrom */ - inline Point3 operator*(const Point3& p) const { - return transformFrom(p); + inline Point3 operator*(const Point3& point) const { + return transformFrom(point); } /** * @brief takes point in world coordinates and transforms it to Pose coordinates - * @param p point in world coordinates - * @param Dpose optional 3*6 Jacobian wrpt this pose - * @param Dpoint optional 3*3 Jacobian wrpt point + * @param point point in world coordinates + * @param Hself optional 3*6 Jacobian wrpt this pose + * @param Hpoint optional 3*3 Jacobian wrpt point * @return point in Pose coordinates */ - Point3 transformTo(const Point3& p, OptionalJacobian<3, 6> Dpose = - boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const; + Point3 transformTo(const Point3& point, OptionalJacobian<3, 6> Hself = + boost::none, OptionalJacobian<3, 3> Hpoint = boost::none) const; /// @} /// @name Standard Interface /// @{ /// get rotation - const Rot3& rotation(OptionalJacobian<3, 6> H = boost::none) const; + const Rot3& rotation(OptionalJacobian<3, 6> Hself = boost::none) const; /// get translation - const Point3& translation(OptionalJacobian<3, 6> H = boost::none) const; + const Point3& translation(OptionalJacobian<3, 6> Hself = boost::none) const; /// get x double x() const { @@ -252,36 +283,44 @@ class GTSAM_EXPORT Pose3: public LieGroup { /** convert to 4*4 matrix */ Matrix4 matrix() const; - /** receives a pose in local coordinates and transforms it to world coordinates */ - Pose3 transformPoseFrom(const Pose3& pose) const; + /** + * Assuming self == wTa, takes a pose aTb in local coordinates + * and transforms it to world coordinates wTb = wTa * aTb. + * This is identical to compose. + */ + Pose3 transformPoseFrom(const Pose3& aTb, OptionalJacobian<6, 6> Hself = boost::none, + OptionalJacobian<6, 6> HaTb = boost::none) const; - /** receives a pose in world coordinates and transforms it to local coordinates */ - Pose3 transformPoseTo(const Pose3& pose, OptionalJacobian<6, 6> H1 = boost::none, - OptionalJacobian<6, 6> H2 = boost::none) const; + /** + * Assuming self == wTa, takes a pose wTb in world coordinates + * and transforms it to local coordinates aTb = inv(wTa) * wTb + */ + Pose3 transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> Hself = boost::none, + OptionalJacobian<6, 6> HwTb = boost::none) const; /** * Calculate range to a landmark * @param point 3D location of landmark * @return range (double) */ - double range(const Point3& point, OptionalJacobian<1, 6> H1 = boost::none, - OptionalJacobian<1, 3> H2 = boost::none) const; + double range(const Point3& point, OptionalJacobian<1, 6> Hself = boost::none, + OptionalJacobian<1, 3> Hpoint = boost::none) const; /** * Calculate range to another pose * @param pose Other SO(3) pose * @return range (double) */ - double range(const Pose3& pose, OptionalJacobian<1, 6> H1 = boost::none, - OptionalJacobian<1, 6> H2 = boost::none) const; + double range(const Pose3& pose, OptionalJacobian<1, 6> Hself = boost::none, + OptionalJacobian<1, 6> Hpose = boost::none) const; /** * Calculate bearing to a landmark * @param point 3D location of landmark * @return bearing (Unit3) */ - Unit3 bearing(const Point3& point, OptionalJacobian<2, 6> H1 = boost::none, - OptionalJacobian<2, 3> H2 = boost::none) const; + Unit3 bearing(const Point3& point, OptionalJacobian<2, 6> Hself = boost::none, + OptionalJacobian<2, 3> Hpoint = boost::none) const; /** * Calculate bearing to another pose @@ -289,8 +328,8 @@ class GTSAM_EXPORT Pose3: public LieGroup { * information is ignored. * @return bearing (Unit3) */ - Unit3 bearing(const Pose3& pose, OptionalJacobian<2, 6> H1 = boost::none, - OptionalJacobian<2, 6> H2 = boost::none) const; + Unit3 bearing(const Pose3& pose, OptionalJacobian<2, 6> Hself = boost::none, + OptionalJacobian<2, 6> Hpose = boost::none) const; /// @} /// @name Advanced Interface @@ -318,30 +357,6 @@ class GTSAM_EXPORT Pose3: public LieGroup { GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const Pose3& p); -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - Point3 transform_from(const Point3& p, - OptionalJacobian<3, 6> Dpose = boost::none, - OptionalJacobian<3, 3> Dpoint = boost::none) const { - return transformFrom(p, Dpose, Dpoint); - } - Point3 transform_to(const Point3& p, - OptionalJacobian<3, 6> Dpose = boost::none, - OptionalJacobian<3, 3> Dpoint = boost::none) const { - return transformTo(p, Dpose, Dpoint); - } - Pose3 transform_pose_to(const Pose3& pose, - OptionalJacobian<6, 6> H1 = boost::none, - OptionalJacobian<6, 6> H2 = boost::none) const { - return transformPoseTo(pose, H1, H2); - } - /** - * @deprecated: this function is neither here not there. */ - Pose3 transform_to(const Pose3& pose) const; - /// @} -#endif - private: /** Serialization function */ friend class boost::serialization::access; @@ -355,7 +370,7 @@ class GTSAM_EXPORT Pose3: public LieGroup { #ifdef GTSAM_USE_QUATERNIONS // Align if we are using Quaternions public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW #endif }; // Pose3 class @@ -372,10 +387,8 @@ inline Matrix wedge(const Vector& xi) { return Pose3::wedge(xi(0), xi(1), xi(2), xi(3), xi(4), xi(5)); } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -// deprecated: use Pose3::Align with point pairs ordered the opposite way -GTSAM_EXPORT boost::optional align(const std::vector& baPointPairs); -#endif +// Convenience typedef +using Pose3Pair = std::pair; // For MATLAB wrapper typedef std::vector Pose3Vector; diff --git a/gtsam/geometry/Rot2.cpp b/gtsam/geometry/Rot2.cpp index 04ed16774..7502c4ccf 100644 --- a/gtsam/geometry/Rot2.cpp +++ b/gtsam/geometry/Rot2.cpp @@ -39,6 +39,13 @@ Rot2 Rot2::atan2(double y, double x) { return R.normalize(); } +/* ************************************************************************* */ +Rot2 Rot2::Random(std::mt19937& rng) { + uniform_real_distribution randomAngle(-M_PI, M_PI); + double angle = randomAngle(rng); + return fromAngle(angle); +} + /* ************************************************************************* */ void Rot2::print(const string& s) const { cout << s << ": " << theta() << endl; diff --git a/gtsam/geometry/Rot2.h b/gtsam/geometry/Rot2.h index f46f12540..8a361f558 100644 --- a/gtsam/geometry/Rot2.h +++ b/gtsam/geometry/Rot2.h @@ -22,6 +22,8 @@ #include #include +#include + namespace gtsam { /** @@ -79,6 +81,14 @@ namespace gtsam { /** Named constructor that behaves as atan2, i.e., y,x order (!) and normalizes */ static Rot2 atan2(double y, double x); + /** + * Random, generates random angle \in [-p,pi] + * Example: + * std::mt19937 engine(42); + * Unit3 unit = Unit3::Random(engine); + */ + static Rot2 Random(std::mt19937 & rng); + /// @} /// @name Testable /// @{ diff --git a/gtsam/geometry/Rot3.cpp b/gtsam/geometry/Rot3.cpp index 01f62b8cb..8f8088a74 100644 --- a/gtsam/geometry/Rot3.cpp +++ b/gtsam/geometry/Rot3.cpp @@ -32,7 +32,8 @@ namespace gtsam { /* ************************************************************************* */ void Rot3::print(const std::string& s) const { - gtsam::print((Matrix)matrix(), s); + cout << (s.empty() ? "R: " : s + " "); + gtsam::print(static_cast(matrix())); } /* ************************************************************************* */ @@ -157,21 +158,73 @@ Point3 Rot3::column(int index) const{ } /* ************************************************************************* */ -Vector3 Rot3::xyz() const { +Vector3 Rot3::xyz(OptionalJacobian<3, 3> H) const { Matrix3 I;Vector3 q; - boost::tie(I,q)=RQ(matrix()); + if (H) { + Matrix93 mH; + const auto m = matrix(); +#ifdef GTSAM_USE_QUATERNIONS + SO3{m}.vec(mH); +#else + rot_.vec(mH); +#endif + + Matrix39 qHm; + boost::tie(I, q) = RQ(m, qHm); + + // TODO : Explore whether this expression can be optimized as both + // qHm and mH are super-sparse + *H = qHm * mH; + } else + boost::tie(I, q) = RQ(matrix()); return q; } /* ************************************************************************* */ -Vector3 Rot3::ypr() const { - Vector3 q = xyz(); +Vector3 Rot3::ypr(OptionalJacobian<3, 3> H) const { + Vector3 q = xyz(H); + if (H) H->row(0).swap(H->row(2)); + return Vector3(q(2),q(1),q(0)); } /* ************************************************************************* */ -Vector3 Rot3::rpy() const { - return xyz(); +Vector3 Rot3::rpy(OptionalJacobian<3, 3> H) const { return xyz(H); } + +/* ************************************************************************* */ +double Rot3::roll(OptionalJacobian<1, 3> H) const { + double r; + if (H) { + Matrix3 xyzH; + r = xyz(xyzH)(0); + *H = xyzH.row(0); + } else + r = xyz()(0); + return r; +} + +/* ************************************************************************* */ +double Rot3::pitch(OptionalJacobian<1, 3> H) const { + double p; + if (H) { + Matrix3 xyzH; + p = xyz(xyzH)(1); + *H = xyzH.row(1); + } else + p = xyz()(1); + return p; +} + +/* ************************************************************************* */ +double Rot3::yaw(OptionalJacobian<1, 3> H) const { + double y; + if (H) { + Matrix3 xyzH; + y = xyz(xyzH)(2); + *H = xyzH.row(2); + } else + y = xyz()(2); + return y; } /* ************************************************************************* */ @@ -202,30 +255,68 @@ Matrix3 Rot3::LogmapDerivative(const Vector3& x) { } /* ************************************************************************* */ -pair RQ(const Matrix3& A) { - - double x = -atan2(-A(2, 1), A(2, 2)); - Rot3 Qx = Rot3::Rx(-x); - Matrix3 B = A * Qx.matrix(); - - double y = -atan2(B(2, 0), B(2, 2)); - Rot3 Qy = Rot3::Ry(-y); - Matrix3 C = B * Qy.matrix(); - - double z = -atan2(-C(1, 0), C(1, 1)); - Rot3 Qz = Rot3::Rz(-z); - Matrix3 R = C * Qz.matrix(); +pair RQ(const Matrix3& A, OptionalJacobian<3, 9> H) { + const double x = -atan2(-A(2, 1), A(2, 2)); + const auto Qx = Rot3::Rx(-x).matrix(); + const Matrix3 B = A * Qx; + + const double y = -atan2(B(2, 0), B(2, 2)); + const auto Qy = Rot3::Ry(-y).matrix(); + const Matrix3 C = B * Qy; + + const double z = -atan2(-C(1, 0), C(1, 1)); + const auto Qz = Rot3::Rz(-z).matrix(); + const Matrix3 R = C * Qz; + + if (H) { + if (std::abs(y - M_PI / 2) < 1e-2) + throw std::runtime_error( + "Rot3::RQ : Derivative undefined at singularity (gimbal lock)"); + + auto atan_d1 = [](double y, double x) { return x / (x * x + y * y); }; + auto atan_d2 = [](double y, double x) { return -y / (x * x + y * y); }; + + const auto sx = -Qx(2, 1), cx = Qx(1, 1); + const auto sy = -Qy(0, 2), cy = Qy(0, 0); + + *H = Matrix39::Zero(); + // First, calculate the derivate of x + (*H)(0, 5) = atan_d1(A(2, 1), A(2, 2)); + (*H)(0, 8) = atan_d2(A(2, 1), A(2, 2)); + + // Next, calculate the derivate of y. We have + // b20 = a20 and b22 = a21 * sx + a22 * cx + (*H)(1, 2) = -atan_d1(B(2, 0), B(2, 2)); + const auto yHb22 = -atan_d2(B(2, 0), B(2, 2)); + (*H)(1, 5) = yHb22 * sx; + (*H)(1, 8) = yHb22 * cx; + + // Next, calculate the derivate of z. We have + // c20 = a10 * cy + a11 * sx * sy + a12 * cx * sy + // c22 = a11 * cx - a12 * sx + const auto c10Hx = (A(1, 1) * cx - A(1, 2) * sx) * sy; + const auto c10Hy = A(1, 2) * cx * cy + A(1, 1) * cy * sx - A(1, 0) * sy; + Vector9 c10HA = c10Hx * H->row(0) + c10Hy * H->row(1); + c10HA[1] = cy; + c10HA[4] = sx * sy; + c10HA[7] = cx * sy; + + const auto c11Hx = -A(1, 2) * cx - A(1, 1) * sx; + Vector9 c11HA = c11Hx * H->row(0); + c11HA[4] = cx; + c11HA[7] = -sx; + + H->block<1, 9>(2, 0) = + atan_d1(C(1, 0), C(1, 1)) * c10HA + atan_d2(C(1, 0), C(1, 1)) * c11HA; + } - Vector xyz = Vector3(x, y, z); + const auto xyz = Vector3(x, y, z); return make_pair(R, xyz); } /* ************************************************************************* */ ostream &operator<<(ostream &os, const Rot3& R) { - os << "\n"; - os << '|' << R.r1().x() << ", " << R.r2().x() << ", " << R.r3().x() << "|\n"; - os << '|' << R.r1().y() << ", " << R.r2().y() << ", " << R.r3().y() << "|\n"; - os << '|' << R.r1().z() << ", " << R.r2().z() << ", " << R.r3().z() << "|\n"; + os << R.matrix().format(matlabFormat()); return os; } diff --git a/gtsam/geometry/Rot3.h b/gtsam/geometry/Rot3.h index fc3a8b3f2..abd74e063 100644 --- a/gtsam/geometry/Rot3.h +++ b/gtsam/geometry/Rot3.h @@ -154,12 +154,23 @@ namespace gtsam { static Rot3 Rz(double t); /// Rotations around Z, Y, then X axes as in http://en.wikipedia.org/wiki/Rotation_matrix, counterclockwise when looking from unchanging axis. - static Rot3 RzRyRx(double x, double y, double z); + static Rot3 RzRyRx(double x, double y, double z, + OptionalJacobian<3, 1> Hx = boost::none, + OptionalJacobian<3, 1> Hy = boost::none, + OptionalJacobian<3, 1> Hz = boost::none); /// Rotations around Z, Y, then X axes as in http://en.wikipedia.org/wiki/Rotation_matrix, counterclockwise when looking from unchanging axis. - inline static Rot3 RzRyRx(const Vector& xyz) { + inline static Rot3 RzRyRx(const Vector& xyz, + OptionalJacobian<3, 3> H = boost::none) { assert(xyz.size() == 3); - return RzRyRx(xyz(0), xyz(1), xyz(2)); + Rot3 out; + if (H) { + Vector3 Hx, Hy, Hz; + out = RzRyRx(xyz(0), xyz(1), xyz(2), Hx, Hy, Hz); + (*H) << Hx, Hy, Hz; + } else + out = RzRyRx(xyz(0), xyz(1), xyz(2)); + return out; } /// Positive yaw is to right (as in aircraft heading). See ypr @@ -185,7 +196,12 @@ namespace gtsam { * Positive pitch is down (decreasing aircraft altitude). * Positive roll is to right (decreasing yaw in aircraft). */ - static Rot3 Ypr(double y, double p, double r) { return RzRyRx(r,p,y);} + static Rot3 Ypr(double y, double p, double r, + OptionalJacobian<3, 1> Hy = boost::none, + OptionalJacobian<3, 1> Hp = boost::none, + OptionalJacobian<3, 1> Hr = boost::none) { + return RzRyRx(r, p, y, Hr, Hp, Hy); + } /** Create from Quaternion coefficients */ static Rot3 Quaternion(double w, double x, double y, double z) { @@ -246,15 +262,35 @@ namespace gtsam { static Rot3 AlignTwoPairs(const Unit3& a_p, const Unit3& b_p, // const Unit3& a_q, const Unit3& b_q); - /// Static, named constructor that finds Rot3 element closest to M in Frobenius norm. + /** + * Static, named constructor that finds Rot3 element closest to M in Frobenius norm. + * + * Uses Full SVD to compute the orthogonal matrix, thus is highly accurate and robust. + * + * N. J. Higham. Matrix nearness problems and applications. + * In M. J. C. Gover and S. Barnett, editors, Applications of Matrix Theory, pages 1–27. + * Oxford University Press, 1989. + */ static Rot3 ClosestTo(const Matrix3& M) { return Rot3(SO3::ClosestTo(M)); } + /** + * Normalize rotation so that its determinant is 1. + * This means either re-orthogonalizing the Matrix representation or + * normalizing the quaternion representation. + * + * This method is akin to `ClosestTo` but uses a computationally cheaper + * algorithm. + * + * Ref: https://drive.google.com/file/d/0B9rLLz1XQKmaZTlQdV81QjNoZTA/view + */ + Rot3 normalized() const; + /// @} /// @name Testable /// @{ /** print */ - void print(const std::string& s="R") const; + void print(const std::string& s="") const; /** equals with an tolerance */ bool equals(const Rot3& p, double tol = 1e-9) const; @@ -425,19 +461,19 @@ namespace gtsam { * Use RQ to calculate xyz angle representation * @return a vector containing x,y,z s.t. R = Rot3::RzRyRx(x,y,z) */ - Vector3 xyz() const; + Vector3 xyz(OptionalJacobian<3, 3> H = boost::none) const; /** * Use RQ to calculate yaw-pitch-roll angle representation * @return a vector containing ypr s.t. R = Rot3::Ypr(y,p,r) */ - Vector3 ypr() const; + Vector3 ypr(OptionalJacobian<3, 3> H = boost::none) const; /** * Use RQ to calculate roll-pitch-yaw angle representation * @return a vector containing rpy s.t. R = Rot3::Ypr(y,p,r) */ - Vector3 rpy() const; + Vector3 rpy(OptionalJacobian<3, 3> H = boost::none) const; /** * Accessor to get to component of angle representations @@ -445,7 +481,7 @@ namespace gtsam { * you should instead use xyz() or ypr() * TODO: make this more efficient */ - inline double roll() const { return xyz()(0); } + double roll(OptionalJacobian<1, 3> H = boost::none) const; /** * Accessor to get to component of angle representations @@ -453,7 +489,7 @@ namespace gtsam { * you should instead use xyz() or ypr() * TODO: make this more efficient */ - inline double pitch() const { return xyz()(1); } + double pitch(OptionalJacobian<1, 3> H = boost::none) const; /** * Accessor to get to component of angle representations @@ -461,7 +497,7 @@ namespace gtsam { * you should instead use xyz() or ypr() * TODO: make this more efficient */ - inline double yaw() const { return xyz()(2); } + double yaw(OptionalJacobian<1, 3> H = boost::none) const; /// @} /// @name Advanced Interface @@ -490,7 +526,7 @@ namespace gtsam { /** * @brief Spherical Linear intERPolation between *this and other - * @param s a value between 0 and 1 + * @param t a value between 0 and 1 * @param other final point of iterpolation geodesic on manifold */ Rot3 slerp(double t, const Rot3& other) const; @@ -500,23 +536,6 @@ namespace gtsam { /// @} -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - static Rot3 rodriguez(const Point3& axis, double angle) { return AxisAngle(axis, angle); } - static Rot3 rodriguez(const Unit3& axis, double angle) { return AxisAngle(axis, angle); } - static Rot3 rodriguez(const Vector3& w) { return Rodrigues(w); } - static Rot3 rodriguez(double wx, double wy, double wz) { return Rodrigues(wx, wy, wz); } - static Rot3 yaw (double t) { return Yaw(t); } - static Rot3 pitch(double t) { return Pitch(t); } - static Rot3 roll (double t) { return Roll(t); } - static Rot3 ypr(double y, double p, double r) { return Ypr(r,p,y);} - static Rot3 quaternion(double w, double x, double y, double z) { - return Rot3::Quaternion(w, x, y, z); - } - /// @} -#endif - private: /** Serialization function */ friend class boost::serialization::access; @@ -544,7 +563,7 @@ namespace gtsam { #ifdef GTSAM_USE_QUATERNIONS // only align if quaternion, Matrix3 has no alignment requirements public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW #endif }; @@ -558,7 +577,8 @@ namespace gtsam { * @return an upper triangular matrix R * @return a vector [thetax, thetay, thetaz] in radians. */ - GTSAM_EXPORT std::pair RQ(const Matrix3& A); + GTSAM_EXPORT std::pair RQ( + const Matrix3& A, OptionalJacobian<3, 9> H = boost::none); template<> struct traits : public internal::LieGroup {}; diff --git a/gtsam/geometry/Rot3M.cpp b/gtsam/geometry/Rot3M.cpp index 46a07e50a..07cc7302a 100644 --- a/gtsam/geometry/Rot3M.cpp +++ b/gtsam/geometry/Rot3M.cpp @@ -82,7 +82,8 @@ Rot3 Rot3::Rz(double t) { /* ************************************************************************* */ // Considerably faster than composing matrices above ! -Rot3 Rot3::RzRyRx(double x, double y, double z) { +Rot3 Rot3::RzRyRx(double x, double y, double z, OptionalJacobian<3, 1> Hx, + OptionalJacobian<3, 1> Hy, OptionalJacobian<3, 1> Hz) { double cx=cos(x),sx=sin(x); double cy=cos(y),sy=sin(y); double cz=cos(z),sz=sin(z); @@ -97,6 +98,9 @@ Rot3 Rot3::RzRyRx(double x, double y, double z) { double s_c = sx * cz; double c_c = cx * cz; double ssc = ss_ * cz, csc = cs_ * cz, sss = ss_ * sz, css = cs_ * sz; + if (Hx) (*Hx) << 1, 0, 0; + if (Hy) (*Hy) << 0, cx, -sx; + if (Hz) (*Hz) << -sy, sc_, cc_; return Rot3( _cc,- c_s + ssc, s_s + csc, _cs, c_c + sss, -s_c + css, @@ -104,6 +108,33 @@ Rot3 Rot3::RzRyRx(double x, double y, double z) { ); } +/* ************************************************************************* */ +Rot3 Rot3::normalized() const { + /// Implementation from here: https://stackoverflow.com/a/23082112/1236990 + + /// Essentially, this computes the orthogonalization error, distributes the + /// error to the x and y rows, and then performs a Taylor expansion to + /// orthogonalize. + + Matrix3 rot = rot_.matrix(), rot_orth; + + // Check if determinant is already 1. + // If yes, then return the current Rot3. + if (std::fabs(rot.determinant()-1) < 1e-12) return Rot3(rot_); + + Vector3 x = rot.block<1, 3>(0, 0), y = rot.block<1, 3>(1, 0); + double error = x.dot(y); + + Vector3 x_ort = x - (error / 2) * y, y_ort = y - (error / 2) * x; + Vector3 z_ort = x_ort.cross(y_ort); + + rot_orth.block<1, 3>(0, 0) = 0.5 * (3 - x_ort.dot(x_ort)) * x_ort; + rot_orth.block<1, 3>(1, 0) = 0.5 * (3 - y_ort.dot(y_ort)) * y_ort; + rot_orth.block<1, 3>(2, 0) = 0.5 * (3 - z_ort.dot(z_ort)) * z_ort; + + return Rot3(rot_orth); +} + /* ************************************************************************* */ Rot3 Rot3::operator*(const Rot3& R2) const { return Rot3(rot_*R2.rot_); @@ -145,7 +176,17 @@ Vector3 Rot3::CayleyChart::Local(const Rot3& R, OptionalJacobian<3,3> H) { if (H) throw std::runtime_error("Rot3::CayleyChart::Local Derivative"); // Create a fixed-size matrix Matrix3 A = R.matrix(); - // Mathematica closed form optimization (procrastination?) gone wild: + + // Check if (A+I) is invertible. Same as checking for -1 eigenvalue. + if ((A + I_3x3).determinant() == 0.0) { + throw std::runtime_error("Rot3::CayleyChart::Local Invalid Rotation"); + } + + // Mathematica closed form optimization. + // The following are the essential computations for the following algorithm + // 1. Compute the inverse of P = (A+I), using a closed-form formula since P is 3x3 + // 2. Compute the Cayley transform C = 2 * P^{-1} * (A-I) + // 3. C is skew-symmetric, so we pick out the computations corresponding only to x, y, and z. const double a = A(0, 0), b = A(0, 1), c = A(0, 2); const double d = A(1, 0), e = A(1, 1), f = A(1, 2); const double g = A(2, 0), h = A(2, 1), i = A(2, 2); diff --git a/gtsam/geometry/Rot3Q.cpp b/gtsam/geometry/Rot3Q.cpp index d609c289c..523255d87 100644 --- a/gtsam/geometry/Rot3Q.cpp +++ b/gtsam/geometry/Rot3Q.cpp @@ -67,12 +67,29 @@ namespace gtsam { } /* ************************************************************************* */ - Rot3 Rot3::RzRyRx(double x, double y, double z) { return Rot3( - gtsam::Quaternion(Eigen::AngleAxisd(z, Eigen::Vector3d::UnitZ())) * - gtsam::Quaternion(Eigen::AngleAxisd(y, Eigen::Vector3d::UnitY())) * - gtsam::Quaternion(Eigen::AngleAxisd(x, Eigen::Vector3d::UnitX()))); + Rot3 Rot3::RzRyRx(double x, double y, double z, OptionalJacobian<3, 1> Hx, + OptionalJacobian<3, 1> Hy, OptionalJacobian<3, 1> Hz) { + if (Hx) (*Hx) << 1, 0, 0; + + if (Hy or Hz) { + const auto cx = cos(x), sx = sin(x); + if (Hy) (*Hy) << 0, cx, -sx; + if (Hz) { + const auto cy = cos(y), sy = sin(y); + (*Hz) << -sy, sx * cy, cx * cy; + } + } + + return Rot3( + gtsam::Quaternion(Eigen::AngleAxisd(z, Eigen::Vector3d::UnitZ())) * + gtsam::Quaternion(Eigen::AngleAxisd(y, Eigen::Vector3d::UnitY())) * + gtsam::Quaternion(Eigen::AngleAxisd(x, Eigen::Vector3d::UnitX()))); } + /* ************************************************************************* */ + Rot3 Rot3::normalized() const { + return Rot3(quaternion_.normalized()); + } /* ************************************************************************* */ Rot3 Rot3::operator*(const Rot3& R2) const { return Rot3(quaternion_ * R2.quaternion_); diff --git a/gtsam/geometry/SO4.cpp b/gtsam/geometry/SO4.cpp index 8a78bb83f..1c4920af8 100644 --- a/gtsam/geometry/SO4.cpp +++ b/gtsam/geometry/SO4.cpp @@ -35,7 +35,7 @@ namespace gtsam { // TODO(frank): is this better than SOn::Random? // /* ************************************************************************* -// */ static Vector3 randomOmega(boost::mt19937 &rng) { +// */ static Vector3 randomOmega(std::mt19937 &rng) { // static std::uniform_real_distribution randomAngle(-M_PI, M_PI); // return Unit3::Random(rng).unitVector() * randomAngle(rng); // } @@ -43,7 +43,7 @@ namespace gtsam { // /* ************************************************************************* // */ // // Create random SO(4) element using direct product of lie algebras. -// SO4 SO4::Random(boost::mt19937 &rng) { +// SO4 SO4::Random(std::mt19937 &rng) { // Vector6 delta; // delta << randomOmega(rng), randomOmega(rng); // return SO4::Expmap(delta); diff --git a/gtsam/geometry/SO4.h b/gtsam/geometry/SO4.h index 5014414c2..33bd8c161 100644 --- a/gtsam/geometry/SO4.h +++ b/gtsam/geometry/SO4.h @@ -34,7 +34,7 @@ namespace gtsam { using SO4 = SO<4>; // /// Random SO(4) element (no big claims about uniformity) -// static SO4 Random(boost::mt19937 &rng); +// static SO4 Random(std::mt19937 &rng); // Below are all declarations of SO<4> specializations. // They are *defined* in SO4.cpp. diff --git a/gtsam/geometry/SOn-inl.h b/gtsam/geometry/SOn-inl.h index 0d7f3e108..284ae76de 100644 --- a/gtsam/geometry/SOn-inl.h +++ b/gtsam/geometry/SOn-inl.h @@ -22,17 +22,15 @@ #include -using namespace std; - namespace gtsam { -// Implementation for N>5 just uses dynamic version +// Implementation for N>=5 just uses dynamic version template typename SO::MatrixNN SO::Hat(const TangentVector& xi) { return SOn::Hat(xi); } -// Implementation for N>5 just uses dynamic version +// Implementation for N>=5 just uses dynamic version template typename SO::TangentVector SO::Vee(const MatrixNN& X) { return SOn::Vee(X); @@ -60,8 +58,9 @@ typename SO::TangentVector SO::ChartAtOrigin::Local(const SO& R, template typename SO::MatrixDD SO::AdjointMap() const { + if (N==2) return I_1x1; // SO(2) case throw std::runtime_error( - "SO::AdjointMap only implemented for SO3 and SO4."); + "SO::AdjointMap only implemented for SO2, SO3 and SO4."); } template @@ -84,30 +83,22 @@ typename SO::MatrixDD SO::LogmapDerivative(const TangentVector& omega) { throw std::runtime_error("O::LogmapDerivative only implemented for SO3."); } +// Default fixed size version (but specialized elsewehere for N=2,3,4) template typename SO::VectorN2 SO::vec( OptionalJacobian H) const { - const size_t n = rows(); - const size_t n2 = n * n; - // Vectorize - VectorN2 X(n2); - X << Eigen::Map(matrix_.data(), n2, 1); + VectorN2 X = Eigen::Map(matrix_.data()); // If requested, calculate H as (I \oplus Q) * P, // where Q is the N*N rotation matrix, and P is calculated below. if (H) { // Calculate P matrix of vectorized generators // TODO(duy): Should we refactor this as the jacobian of Hat? - const size_t d = dim(); - Matrix P(n2, d); - for (size_t j = 0; j < d; j++) { - const auto X = Hat(Eigen::VectorXd::Unit(d, j)); - P.col(j) = Eigen::Map(X.data(), n2, 1); - } - H->resize(n2, d); - for (size_t i = 0; i < n; i++) { - H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d); + Matrix P = SO::VectorizedGenerators(); + for (size_t i = 0; i < N; i++) { + H->block(i * N, 0, N, dimension) = + matrix_ * P.block(i * N, 0, N, dimension); } } return X; @@ -115,7 +106,7 @@ typename SO::VectorN2 SO::vec( template void SO::print(const std::string& s) const { - cout << s << matrix_ << endl; + std::cout << s << matrix_ << std::endl; } } // namespace gtsam diff --git a/gtsam/geometry/SOn.cpp b/gtsam/geometry/SOn.cpp index 37b6c1784..c6cff4214 100644 --- a/gtsam/geometry/SOn.cpp +++ b/gtsam/geometry/SOn.cpp @@ -22,21 +22,18 @@ namespace gtsam { template <> -GTSAM_EXPORT -Matrix SOn::Hat(const Vector& xi) { +GTSAM_EXPORT void SOn::Hat(const Vector &xi, Eigen::Ref X) { size_t n = AmbientDim(xi.size()); - if (n < 2) throw std::invalid_argument("SO::Hat: n<2 not supported"); - - Matrix X(n, n); // allocate space for n*n skew-symmetric matrix - X.setZero(); - if (n == 2) { + if (n < 2) + throw std::invalid_argument("SO::Hat: n<2 not supported"); + else if (n == 2) { // Handle SO(2) case as recursion bottom assert(xi.size() == 1); X << 0, -xi(0), xi(0), 0; } else { // Recursively call SO(n-1) call for top-left block const size_t dmin = (n - 1) * (n - 2) / 2; - X.topLeftCorner(n - 1, n - 1) = Hat(xi.tail(dmin)); + Hat(xi.tail(dmin), X.topLeftCorner(n - 1, n - 1)); // determine sign of last element (signs alternate) double sign = pow(-1.0, xi.size()); @@ -47,7 +44,14 @@ Matrix SOn::Hat(const Vector& xi) { X(j, n - 1) = -X(n - 1, j); sign = -sign; } + X(n - 1, n - 1) = 0; // bottom-right } +} + +template <> GTSAM_EXPORT Matrix SOn::Hat(const Vector &xi) { + size_t n = AmbientDim(xi.size()); + Matrix X(n, n); // allocate space for n*n skew-symmetric matrix + SOn::Hat(xi, X); return X; } @@ -99,4 +103,27 @@ SOn LieGroup::between(const SOn& g, DynamicJacobian H1, return result; } +// Dynamic version of vec +template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const { + const size_t n = rows(), n2 = n * n; + + // Vectorize + VectorN2 X(n2); + X << Eigen::Map(matrix_.data(), n2, 1); + + // If requested, calculate H as (I \oplus Q) * P, + // where Q is the N*N rotation matrix, and P is calculated below. + if (H) { + // Calculate P matrix of vectorized generators + // TODO(duy): Should we refactor this as the jacobian of Hat? + Matrix P = SOn::VectorizedGenerators(n); + const size_t d = dim(); + H->resize(n2, d); + for (size_t i = 0; i < n; i++) { + H->block(i * n, 0, n, d) = matrix_ * P.block(i * n, 0, n, d); + } + } + return X; +} + } // namespace gtsam diff --git a/gtsam/geometry/SOn.h b/gtsam/geometry/SOn.h index 5313d3018..86b6019e1 100644 --- a/gtsam/geometry/SOn.h +++ b/gtsam/geometry/SOn.h @@ -20,8 +20,8 @@ #include #include +#include #include - #include #include // TODO(frank): how to avoid? @@ -54,7 +54,7 @@ class SO : public LieGroup, internal::DimensionSO(N)> { using VectorN2 = Eigen::Matrix; using MatrixDD = Eigen::Matrix; - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(true) protected: MatrixNN matrix_; ///< Rotation matrix @@ -98,8 +98,8 @@ class SO : public LieGroup, internal::DimensionSO(N)> { template > static SO Lift(size_t n, const Eigen::MatrixBase &R) { Matrix Q = Matrix::Identity(n, n); - size_t p = R.rows(); - assert(p < n && R.cols() == p); + const int p = R.rows(); + assert(p >= 0 && p <= static_cast(n) && R.cols() == p); Q.topLeftCorner(p, p) = R; return SO(Q); } @@ -208,7 +208,7 @@ class SO : public LieGroup, internal::DimensionSO(N)> { // Calculate run-time dimensionality of manifold. // Available as dimension or Dim() for fixed N. - size_t dim() const { return Dimension(matrix_.rows()); } + size_t dim() const { return Dimension(static_cast(matrix_.rows())); } /** * Hat operator creates Lie algebra element corresponding to d-vector, where d @@ -227,9 +227,10 @@ class SO : public LieGroup, internal::DimensionSO(N)> { */ static MatrixNN Hat(const TangentVector& xi); - /** - * Inverse of Hat. See note about xi element order in Hat. - */ + /// In-place version of Hat (see details there), implements recursion. + static void Hat(const Vector &xi, Eigen::Ref X); + + /// Inverse of Hat. See note about xi element order in Hat. static TangentVector Vee(const MatrixNN& X); // Chart at origin @@ -290,12 +291,45 @@ class SO : public LieGroup, internal::DimensionSO(N)> { * */ VectorN2 vec(OptionalJacobian H = boost::none) const; - /// @} + /// Calculate N^2 x dim matrix of vectorized Lie algebra generators for SO(N) + template > + static Matrix VectorizedGenerators() { + constexpr size_t N2 = static_cast(N * N); + Eigen::Matrix G; + for (size_t j = 0; j < dimension; j++) { + const auto X = Hat(Vector::Unit(dimension, j)); + G.col(j) = Eigen::Map(X.data()); + } + return G; + } + + /// Calculate n^2 x dim matrix of vectorized Lie algebra generators for SO(n) + template > + static Matrix VectorizedGenerators(size_t n = 0) { + const size_t n2 = n * n, dim = Dimension(n); + Matrix G(n2, dim); + for (size_t j = 0; j < dim; j++) { + const auto X = Hat(Vector::Unit(dim, j)); + G.col(j) = Eigen::Map(X.data(), n2, 1); + } + return G; + } + + /// @{ + /// @name Serialization + /// @{ + + template + friend void save(Archive&, SO&, const unsigned int); + template + friend void load(Archive&, SO&, const unsigned int); template friend void serialize(Archive&, SO&, const unsigned int); friend class boost::serialization::access; friend class Rot3; // for serialize + + /// @} }; using SOn = SO; @@ -329,6 +363,21 @@ template <> SOn LieGroup::between(const SOn& g, DynamicJacobian H1, DynamicJacobian H2) const; +/* + * Specialize dynamic vec. + */ +template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const; + +/** Serialization function */ +template +void serialize( + Archive& ar, SOn& Q, + const unsigned int file_version +) { + Matrix& M = Q.matrix_; + ar& BOOST_SERIALIZATION_NVP(M); +} + /* * Define the traits. internal::LieGroup provides both Lie group and Testable */ diff --git a/gtsam/geometry/SimpleCamera.cpp b/gtsam/geometry/SimpleCamera.cpp index 6134ae3d4..d1a5ed330 100644 --- a/gtsam/geometry/SimpleCamera.cpp +++ b/gtsam/geometry/SimpleCamera.cpp @@ -21,6 +21,7 @@ namespace gtsam { +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 SimpleCamera simpleCamera(const Matrix34& P) { // P = [A|a] = s K cRw [I|-T], with s the unknown scale @@ -45,5 +46,6 @@ namespace gtsam { return SimpleCamera(Pose3(wRc, T), Cal3_S2(K(0, 0), K(1, 1), K(0, 1), K(0, 2), K(1, 2))); } +#endif } diff --git a/gtsam/geometry/SimpleCamera.h b/gtsam/geometry/SimpleCamera.h index 82f26aee2..aa00222c7 100644 --- a/gtsam/geometry/SimpleCamera.h +++ b/gtsam/geometry/SimpleCamera.h @@ -19,14 +19,22 @@ #pragma once #include -#include +#include +#include +#include #include +#include namespace gtsam { - /// A simple camera class with a Cal3_S2 calibration -typedef gtsam::PinholeCamera PinholeCameraCal3_S2; + /// Convenient aliases for Pinhole camera classes with different calibrations. + /// Also needed as forward declarations in the wrapper. + using PinholeCameraCal3_S2 = gtsam::PinholeCamera; + using PinholeCameraCal3Bundler = gtsam::PinholeCamera; + using PinholeCameraCal3DS2 = gtsam::PinholeCamera; + using PinholeCameraCal3Unified = gtsam::PinholeCamera; +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 /** * @deprecated: SimpleCamera for backwards compatability with GTSAM 3.x * Use PinholeCameraCal3_S2 instead @@ -140,4 +148,6 @@ struct traits : public internal::Manifold {}; template struct Range : HasRange {}; +#endif + } // namespace gtsam diff --git a/gtsam/geometry/Unit3.h b/gtsam/geometry/Unit3.h index f1a9c1a69..27d41a014 100644 --- a/gtsam/geometry/Unit3.h +++ b/gtsam/geometry/Unit3.h @@ -90,6 +90,8 @@ class Unit3 { /// Copy assignment Unit3& operator=(const Unit3 & u) { p_ = u.p_; + B_ = u.B_; + H_B_ = u.H_B_; return *this; } @@ -214,7 +216,7 @@ class Unit3 { /// @} public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; // Define GTSAM traits diff --git a/gtsam/geometry/tests/testCal3Bundler.cpp b/gtsam/geometry/tests/testCal3Bundler.cpp index 8de049fa4..b821d295b 100644 --- a/gtsam/geometry/tests/testCal3Bundler.cpp +++ b/gtsam/geometry/tests/testCal3Bundler.cpp @@ -11,11 +11,12 @@ /** * @file testCal3Bundler.cpp - * @brief Unit tests for transform derivatives + * @brief Unit tests for Bundler calibration model. */ #include #include +#include #include #include @@ -25,74 +26,98 @@ GTSAM_CONCEPT_TESTABLE_INST(Cal3Bundler) GTSAM_CONCEPT_MANIFOLD_INST(Cal3Bundler) static Cal3Bundler K(500, 1e-3, 1e-3, 1000, 2000); -static Point2 p(2,3); +static Point2 p(2, 3); /* ************************************************************************* */ -TEST( Cal3Bundler, vector) -{ +TEST(Cal3Bundler, vector) { Cal3Bundler K; Vector expected(3); expected << 1, 0, 0; - CHECK(assert_equal(expected,K.vector())); + CHECK(assert_equal(expected, K.vector())); } /* ************************************************************************* */ -TEST( Cal3Bundler, uncalibrate) -{ - Vector v = K.vector() ; - double r = p.x()*p.x() + p.y()*p.y() ; - double g = v[0]*(1+v[1]*r+v[2]*r*r) ; - Point2 expected (1000+g*p.x(), 2000+g*p.y()) ; +TEST(Cal3Bundler, uncalibrate) { + Vector v = K.vector(); + double r = p.x() * p.x() + p.y() * p.y(); + double g = v[0] * (1 + v[1] * r + v[2] * r * r); + Point2 expected(1000 + g * p.x(), 2000 + g * p.y()); Point2 actual = K.uncalibrate(p); - CHECK(assert_equal(expected,actual)); + CHECK(assert_equal(expected, actual)); } -TEST( Cal3Bundler, calibrate ) -{ +TEST(Cal3Bundler, calibrate) { Point2 pn(0.5, 0.5); Point2 pi = K.uncalibrate(pn); Point2 pn_hat = K.calibrate(pi); - CHECK( traits::Equals(pn, pn_hat, 1e-5)); + CHECK(traits::Equals(pn, pn_hat, 1e-5)); } /* ************************************************************************* */ -Point2 uncalibrate_(const Cal3Bundler& k, const Point2& pt) { return k.uncalibrate(pt); } +Point2 uncalibrate_(const Cal3Bundler& k, const Point2& pt) { + return k.uncalibrate(pt); +} + +Point2 calibrate_(const Cal3Bundler& k, const Point2& pt) { + return k.calibrate(pt); +} /* ************************************************************************* */ -TEST( Cal3Bundler, Duncalibrate) -{ +TEST(Cal3Bundler, Duncalibrate) { Matrix Dcal, Dp; Point2 actual = K.uncalibrate(p, Dcal, Dp); Point2 expected(2182, 3773); - CHECK(assert_equal(expected,actual,1e-7)); + CHECK(assert_equal(expected, actual, 1e-7)); Matrix numerical1 = numericalDerivative21(uncalibrate_, K, p); Matrix numerical2 = numericalDerivative22(uncalibrate_, K, p); - CHECK(assert_equal(numerical1,Dcal,1e-7)); - CHECK(assert_equal(numerical2,Dp,1e-7)); - CHECK(assert_equal(numerical1,K.D2d_calibration(p),1e-7)); - CHECK(assert_equal(numerical2,K.D2d_intrinsic(p),1e-7)); - Matrix Dcombined(2,5); - Dcombined << Dp, Dcal; - CHECK(assert_equal(Dcombined,K.D2d_intrinsic_calibration(p),1e-7)); + CHECK(assert_equal(numerical1, Dcal, 1e-7)); + CHECK(assert_equal(numerical2, Dp, 1e-7)); } /* ************************************************************************* */ -TEST( Cal3Bundler, assert_equal) -{ - CHECK(assert_equal(K,K,1e-7)); +TEST(Cal3Bundler, Dcalibrate) { + Matrix Dcal, Dp; + Point2 pn(0.5, 0.5); + Point2 pi = K.uncalibrate(pn); + Point2 actual = K.calibrate(pi, Dcal, Dp); + CHECK(assert_equal(pn, actual, 1e-7)); + Matrix numerical1 = numericalDerivative21(calibrate_, K, pi); + Matrix numerical2 = numericalDerivative22(calibrate_, K, pi); + CHECK(assert_equal(numerical1, Dcal, 1e-5)); + CHECK(assert_equal(numerical2, Dp, 1e-5)); } /* ************************************************************************* */ -TEST( Cal3Bundler, retract) -{ +TEST(Cal3Bundler, assert_equal) { CHECK(assert_equal(K, K, 1e-7)); } + +/* ************************************************************************* */ +TEST(Cal3Bundler, retract) { Cal3Bundler expected(510, 2e-3, 2e-3, 1000, 2000); - Vector d(3); + EXPECT_LONGS_EQUAL(3, expected.dim()); + + EXPECT_LONGS_EQUAL(Cal3Bundler::Dim(), 3); + EXPECT_LONGS_EQUAL(expected.dim(), 3); + + Vector3 d; d << 10, 1e-3, 1e-3; Cal3Bundler actual = K.retract(d); - CHECK(assert_equal(expected,actual,1e-7)); - CHECK(assert_equal(d,K.localCoordinates(actual),1e-7)); + CHECK(assert_equal(expected, actual, 1e-7)); + CHECK(assert_equal(d, K.localCoordinates(actual), 1e-7)); } /* ************************************************************************* */ -int main() { TestResult tr; return TestRegistry::runAllTests(tr); } +TEST(Cal3_S2, Print) { + Cal3Bundler cal(1, 2, 3, 4, 5); + std::stringstream os; + os << "f: " << cal.fx() << ", k1: " << cal.k1() << ", k2: " << cal.k2() + << ", px: " << cal.px() << ", py: " << cal.py(); + + EXPECT(assert_stdout_equal(os.str(), cal)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testCal3DFisheye.cpp b/gtsam/geometry/tests/testCal3DFisheye.cpp index 9203b5438..28064a92c 100644 --- a/gtsam/geometry/tests/testCal3DFisheye.cpp +++ b/gtsam/geometry/tests/testCal3DFisheye.cpp @@ -15,12 +15,14 @@ * @author ghaggin */ -#include #include +#include #include #include #include +#include + using namespace gtsam; GTSAM_CONCEPT_TESTABLE_INST(Cal3Fisheye) @@ -30,12 +32,31 @@ static const double fx = 250, fy = 260, s = 0.1, u0 = 320, v0 = 240; static Cal3Fisheye K(fx, fy, s, u0, v0, -0.013721808247486035, 0.020727425669427896, -0.012786476702685545, 0.0025242267320687625); -static Point2 p(2, 3); +static Point2 kTestPoint2(2, 3); + +/* ************************************************************************* */ +TEST(Cal3Fisheye, assert_equal) { CHECK(assert_equal(K, K, 1e-5)); } + +/* ************************************************************************* */ +TEST(Cal3Fisheye, retract) { + Cal3Fisheye expected(K.fx() + 1, K.fy() + 2, K.skew() + 3, K.px() + 4, + K.py() + 5, K.k1() + 6, K.k2() + 7, K.k3() + 8, + K.k4() + 9); + + EXPECT_LONGS_EQUAL(Cal3Fisheye::Dim(), 9); + EXPECT_LONGS_EQUAL(expected.dim(), 9); + + Vector9 d; + d << 1, 2, 3, 4, 5, 6, 7, 8, 9; + Cal3Fisheye actual = K.retract(d); + CHECK(assert_equal(expected, actual, 1e-7)); + CHECK(assert_equal(d, K.localCoordinates(actual), 1e-7)); +} /* ************************************************************************* */ TEST(Cal3Fisheye, uncalibrate1) { // Calculate the solution - const double xi = p.x(), yi = p.y(); + const double xi = kTestPoint2.x(), yi = kTestPoint2.y(); const double r = sqrt(xi * xi + yi * yi); const double t = atan(r); const double tt = t * t, t4 = tt * tt, t6 = tt * t4, t8 = t4 * t4; @@ -46,32 +67,42 @@ TEST(Cal3Fisheye, uncalibrate1) { Point2 uv_sol(v[0] / v[2], v[1] / v[2]); - Point2 uv = K.uncalibrate(p); + Point2 uv = K.uncalibrate(kTestPoint2); CHECK(assert_equal(uv, uv_sol)); } /* ************************************************************************* */ -/** - * Check that a point at (0,0) projects to the - * image center. - */ +// For numerical derivatives +Point2 f(const Cal3Fisheye& k, const Point2& pt) { return k.uncalibrate(pt); } + +/* ************************************************************************* */ +TEST(Cal3Fisheye, Derivatives) { + Matrix H1, H2; + K.uncalibrate(kTestPoint2, H1, H2); + CHECK(assert_equal(numericalDerivative21(f, K, kTestPoint2, 1e-7), H1, 1e-5)); + CHECK(assert_equal(numericalDerivative22(f, K, kTestPoint2, 1e-7), H2, 1e-5)); +} + +/* ************************************************************************* */ +// Check that a point at (0,0) projects to the image center. TEST(Cal3Fisheye, uncalibrate2) { Point2 pz(0, 0); - auto uv = K.uncalibrate(pz); + Matrix H1, H2; + auto uv = K.uncalibrate(pz, H1, H2); CHECK(assert_equal(uv, Point2(u0, v0))); + CHECK(assert_equal(numericalDerivative21(f, K, pz, 1e-7), H1, 1e-5)); + // TODO(frank): the second jacobian is all NaN for the image center! + // CHECK(assert_equal(numericalDerivative22(f, K, pz, 1e-7), H2, 1e-5)); } /* ************************************************************************* */ -/** - * This test uses cv2::fisheye::projectPoints to test that uncalibrate - * properly projects a point into the image plane. One notable difference - * between opencv and the Cal3Fisheye::uncalibrate function is the skew - * parameter. The equivalence is alpha = s/fx. - * - * - * Python script to project points with fisheye model in OpenCv - * (script run with OpenCv version 4.2.0 and Numpy version 1.18.2) - */ +// This test uses cv2::fisheye::projectPoints to test that uncalibrate +// properly projects a point into the image plane. One notable difference +// between opencv and the Cal3Fisheye::uncalibrate function is the skew +// parameter. The equivalence is alpha = s/fx. +// +// Python script to project points with fisheye model in OpenCv +// (script run with OpenCv version 4.2.0 and Numpy version 1.18.2) // clang-format off /* =========================================================== @@ -94,6 +125,7 @@ tvec = np.float64([[0.,0.,0.]]); imagePoints, jacobian = cv2.fisheye.projectPoints(objpts, rvec, tvec, cameraMatrix, distCoeffs, alpha=alpha) np.set_printoptions(precision=14) print(imagePoints) + =========================================================== * Script output: [[[457.82638130304935 408.18905848512986]]] */ @@ -134,21 +166,18 @@ TEST(Cal3Fisheye, calibrate1) { } /* ************************************************************************* */ -/** - * Check that calibrate returns (0,0) for the image center - */ +// Check that calibrate returns (0,0) for the image center TEST(Cal3Fisheye, calibrate2) { Point2 uv(u0, v0); auto xi_hat = K.calibrate(uv); CHECK(assert_equal(xi_hat, Point2(0, 0))) } -/** - * Run calibrate on OpenCv test from uncalibrate3 - * (script shown above) - * 3d point: (23, 27, 31) - * 2d point in image plane: (457.82638130304935, 408.18905848512986) - */ +/* ************************************************************************* */ +// Run calibrate on OpenCv test from uncalibrate3 +// (script shown above) +// 3d point: (23, 27, 31) +// 2d point in image plane: (457.82638130304935, 408.18905848512986) TEST(Cal3Fisheye, calibrate3) { Point3 p3(23, 27, 31); Point2 xi(p3.x() / p3.z(), p3.y() / p3.z()); @@ -157,45 +186,31 @@ TEST(Cal3Fisheye, calibrate3) { CHECK(assert_equal(xi_hat, xi)); } -/* ************************************************************************* */ -// For numerical derivatives -Point2 uncalibrate_(const Cal3Fisheye& k, const Point2& pt) { - return k.uncalibrate(pt); +Point2 calibrate_(const Cal3Fisheye& k, const Point2& pt) { + return k.calibrate(pt); } /* ************************************************************************* */ -TEST(Cal3Fisheye, Duncalibrate1) { - Matrix computed; - K.uncalibrate(p, computed, boost::none); - Matrix numerical = numericalDerivative21(uncalibrate_, K, p, 1e-7); - CHECK(assert_equal(numerical, computed, 1e-5)); - Matrix separate = K.D2d_calibration(p); - CHECK(assert_equal(numerical, separate, 1e-5)); +TEST(Cal3Fisheye, Dcalibrate) { + Point2 p(0.5, 0.5); + Point2 pi = K.uncalibrate(p); + Matrix Dcal, Dp; + K.calibrate(pi, Dcal, Dp); + Matrix numerical1 = numericalDerivative21(calibrate_, K, pi); + CHECK(assert_equal(numerical1, Dcal, 1e-5)); + Matrix numerical2 = numericalDerivative22(calibrate_, K, pi); + CHECK(assert_equal(numerical2, Dp, 1e-5)); } /* ************************************************************************* */ -TEST(Cal3Fisheye, Duncalibrate2) { - Matrix computed; - K.uncalibrate(p, boost::none, computed); - Matrix numerical = numericalDerivative22(uncalibrate_, K, p, 1e-7); - CHECK(assert_equal(numerical, computed, 1e-5)); - Matrix separate = K.D2d_intrinsic(p); - CHECK(assert_equal(numerical, separate, 1e-5)); -} - -/* ************************************************************************* */ -TEST(Cal3Fisheye, assert_equal) { CHECK(assert_equal(K, K, 1e-5)); } - -/* ************************************************************************* */ -TEST(Cal3Fisheye, retract) { - Cal3Fisheye expected(K.fx() + 1, K.fy() + 2, K.skew() + 3, K.px() + 4, - K.py() + 5, K.k1() + 6, K.k2() + 7, K.k3() + 8, - K.k4() + 9); - Vector d(9); - d << 1, 2, 3, 4, 5, 6, 7, 8, 9; - Cal3Fisheye actual = K.retract(d); - CHECK(assert_equal(expected, actual, 1e-7)); - CHECK(assert_equal(d, K.localCoordinates(actual), 1e-7)); +TEST(Cal3Fisheye, Print) { + Cal3Fisheye cal(1, 2, 3, 4, 5, 6, 7, 8, 9); + std::stringstream os; + os << "fx: " << cal.fx() << ", fy: " << cal.fy() << ", s: " << cal.skew() + << ", px: " << cal.px() << ", py: " << cal.py() << ", k1: " << cal.k1() + << ", k2: " << cal.k2() << ", k3: " << cal.k3() << ", k4: " << cal.k4(); + + EXPECT(assert_stdout_equal(os.str(), cal)); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testCal3DS2.cpp b/gtsam/geometry/tests/testCal3DS2.cpp index 416665d46..7ef6e5001 100644 --- a/gtsam/geometry/tests/testCal3DS2.cpp +++ b/gtsam/geometry/tests/testCal3DS2.cpp @@ -11,12 +11,12 @@ /** * @file testCal3DS2.cpp - * @brief Unit tests for transform derivatives + * @brief Unit tests for Cal3DS2 calibration model. */ - #include #include +#include #include #include @@ -25,73 +25,103 @@ using namespace gtsam; GTSAM_CONCEPT_TESTABLE_INST(Cal3DS2) GTSAM_CONCEPT_MANIFOLD_INST(Cal3DS2) -static Cal3DS2 K(500, 100, 0.1, 320, 240, 1e-3, 2.0*1e-3, 3.0*1e-3, 4.0*1e-3); -static Point2 p(2,3); +static Cal3DS2 K(500, 100, 0.1, 320, 240, 1e-3, 2.0 * 1e-3, 3.0 * 1e-3, + 4.0 * 1e-3); +static Point2 p(2, 3); /* ************************************************************************* */ -TEST( Cal3DS2, uncalibrate) -{ - Vector k = K.k() ; - double r = p.x()*p.x() + p.y()*p.y() ; - double g = 1+k[0]*r+k[1]*r*r ; - double tx = 2*k[2]*p.x()*p.y() + k[3]*(r+2*p.x()*p.x()) ; - double ty = k[2]*(r+2*p.y()*p.y()) + 2*k[3]*p.x()*p.y() ; - Vector v_hat = (Vector(3) << g*p.x() + tx, g*p.y() + ty, 1.0).finished(); - Vector v_i = K.K() * v_hat ; - Point2 p_i(v_i(0)/v_i(2), v_i(1)/v_i(2)) ; +TEST(Cal3DS2, Uncalibrate) { + Vector k = K.k(); + double r = p.x() * p.x() + p.y() * p.y(); + double g = 1 + k[0] * r + k[1] * r * r; + double tx = 2 * k[2] * p.x() * p.y() + k[3] * (r + 2 * p.x() * p.x()); + double ty = k[2] * (r + 2 * p.y() * p.y()) + 2 * k[3] * p.x() * p.y(); + Vector v_hat = (Vector(3) << g * p.x() + tx, g * p.y() + ty, 1.0).finished(); + Vector v_i = K.K() * v_hat; + Point2 p_i(v_i(0) / v_i(2), v_i(1) / v_i(2)); Point2 q = K.uncalibrate(p); - CHECK(assert_equal(q,p_i)); + CHECK(assert_equal(q, p_i)); } -TEST( Cal3DS2, calibrate ) -{ +TEST(Cal3DS2, Calibrate) { Point2 pn(0.5, 0.5); Point2 pi = K.uncalibrate(pn); Point2 pn_hat = K.calibrate(pi); - CHECK( traits::Equals(pn, pn_hat, 1e-5)); + CHECK(traits::Equals(pn, pn_hat, 1e-5)); } -Point2 uncalibrate_(const Cal3DS2& k, const Point2& pt) { return k.uncalibrate(pt); } +Point2 uncalibrate_(const Cal3DS2& k, const Point2& pt) { + return k.uncalibrate(pt); +} /* ************************************************************************* */ -TEST( Cal3DS2, Duncalibrate1) -{ +TEST(Cal3DS2, Duncalibrate1) { Matrix computed; K.uncalibrate(p, computed, boost::none); Matrix numerical = numericalDerivative21(uncalibrate_, K, p, 1e-7); - CHECK(assert_equal(numerical,computed,1e-5)); + CHECK(assert_equal(numerical, computed, 1e-5)); Matrix separate = K.D2d_calibration(p); - CHECK(assert_equal(numerical,separate,1e-5)); + CHECK(assert_equal(numerical, separate, 1e-5)); } /* ************************************************************************* */ -TEST( Cal3DS2, Duncalibrate2) -{ - Matrix computed; K.uncalibrate(p, boost::none, computed); +TEST(Cal3DS2, Duncalibrate2) { + Matrix computed; + K.uncalibrate(p, boost::none, computed); Matrix numerical = numericalDerivative22(uncalibrate_, K, p, 1e-7); - CHECK(assert_equal(numerical,computed,1e-5)); + CHECK(assert_equal(numerical, computed, 1e-5)); Matrix separate = K.D2d_intrinsic(p); - CHECK(assert_equal(numerical,separate,1e-5)); + CHECK(assert_equal(numerical, separate, 1e-5)); +} + +Point2 calibrate_(const Cal3DS2& k, const Point2& pt) { + return k.calibrate(pt); } /* ************************************************************************* */ -TEST( Cal3DS2, assert_equal) -{ - CHECK(assert_equal(K,K,1e-5)); +TEST(Cal3DS2, Dcalibrate) { + Point2 pn(0.5, 0.5); + Point2 pi = K.uncalibrate(pn); + Matrix Dcal, Dp; + K.calibrate(pi, Dcal, Dp); + Matrix numerical1 = numericalDerivative21(calibrate_, K, pi, 1e-7); + CHECK(assert_equal(numerical1, Dcal, 1e-5)); + Matrix numerical2 = numericalDerivative22(calibrate_, K, pi, 1e-7); + CHECK(assert_equal(numerical2, Dp, 1e-5)); } /* ************************************************************************* */ -TEST( Cal3DS2, retract) -{ +TEST(Cal3DS2, Equal) { CHECK(assert_equal(K, K, 1e-5)); } + +/* ************************************************************************* */ +TEST(Cal3DS2, Retract) { Cal3DS2 expected(500 + 1, 100 + 2, 0.1 + 3, 320 + 4, 240 + 5, 1e-3 + 6, - 2.0 * 1e-3 + 7, 3.0 * 1e-3 + 8, 4.0 * 1e-3 + 9); - Vector d(9); - d << 1,2,3,4,5,6,7,8,9; + 2.0 * 1e-3 + 7, 3.0 * 1e-3 + 8, 4.0 * 1e-3 + 9); + + EXPECT_LONGS_EQUAL(Cal3DS2::Dim(), 9); + EXPECT_LONGS_EQUAL(expected.dim(), 9); + + Vector9 d; + d << 1, 2, 3, 4, 5, 6, 7, 8, 9; Cal3DS2 actual = K.retract(d); - CHECK(assert_equal(expected,actual,1e-7)); - CHECK(assert_equal(d,K.localCoordinates(actual),1e-7)); + CHECK(assert_equal(expected, actual, 1e-7)); + CHECK(assert_equal(d, K.localCoordinates(actual), 1e-7)); } /* ************************************************************************* */ -int main() { TestResult tr; return TestRegistry::runAllTests(tr); } +TEST(Cal3DS2, Print) { + Cal3DS2 cal(1, 2, 3, 4, 5, 6, 7, 8, 9); + std::stringstream os; + os << "fx: " << cal.fx() << ", fy: " << cal.fy() << ", s: " << cal.skew() + << ", px: " << cal.px() << ", py: " << cal.py() << ", k1: " << cal.k1() + << ", k2: " << cal.k2() << ", p1: " << cal.p1() << ", p2: " << cal.p2(); + + EXPECT(assert_stdout_equal(os.str(), cal)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testCal3Unified.cpp b/gtsam/geometry/tests/testCal3Unified.cpp index 2c5ffd7fb..648bb358c 100644 --- a/gtsam/geometry/tests/testCal3Unified.cpp +++ b/gtsam/geometry/tests/testCal3Unified.cpp @@ -10,17 +10,18 @@ * -------------------------------------------------------------------------- */ /** - * @file testCal3Unify.cpp - * @brief Unit tests for transform derivatives + * @file testCal3Unified.cpp + * @brief Unit tests for Cal3Unified calibration model. */ #include #include +#include #include #include -#include #include +#include using namespace gtsam; @@ -35,74 +36,87 @@ V = [0.1, 1e-3, 2.0*1e-3, 3.0*1e-3, 4.0*1e-3, 0, 0, 100, 105, 320, 240]; matlab toolbox available at http://homepages.laas.fr/~cmei/index.php/Toolbox */ -static Cal3Unified K(100, 105, 0.0, 320, 240, 1e-3, 2.0*1e-3, 3.0*1e-3, 4.0*1e-3, 0.1); +static Cal3Unified K(100, 105, 0.0, 320, 240, 1e-3, 2.0 * 1e-3, 3.0 * 1e-3, + 4.0 * 1e-3, 0.1); static Point2 p(0.5, 0.7); /* ************************************************************************* */ -TEST( Cal3Unified, uncalibrate) -{ - Point2 p_i(364.7791831734982, 305.6677211952602) ; +TEST(Cal3Unified, Uncalibrate) { + Point2 p_i(364.7791831734982, 305.6677211952602); Point2 q = K.uncalibrate(p); - CHECK(assert_equal(q,p_i)); + CHECK(assert_equal(q, p_i)); } /* ************************************************************************* */ -TEST( Cal3Unified, spaceNplane) -{ +TEST(Cal3Unified, SpaceNplane) { Point2 q = K.spaceToNPlane(p); CHECK(assert_equal(Point2(0.441731600049497, 0.618424240069295), q)); CHECK(assert_equal(p, K.nPlaneToSpace(q))); } /* ************************************************************************* */ -TEST( Cal3Unified, calibrate) -{ +TEST(Cal3Unified, Calibrate) { Point2 pi = K.uncalibrate(p); Point2 pn_hat = K.calibrate(pi); - CHECK( traits::Equals(p, pn_hat, 1e-8)); + CHECK(traits::Equals(p, pn_hat, 1e-8)); } -Point2 uncalibrate_(const Cal3Unified& k, const Point2& pt) { return k.uncalibrate(pt); } +Point2 uncalibrate_(const Cal3Unified& k, const Point2& pt) { + return k.uncalibrate(pt); +} /* ************************************************************************* */ -TEST( Cal3Unified, Duncalibrate1) -{ +TEST(Cal3Unified, Duncalibrate1) { Matrix computed; K.uncalibrate(p, computed, boost::none); Matrix numerical = numericalDerivative21(uncalibrate_, K, p, 1e-7); - CHECK(assert_equal(numerical,computed,1e-6)); + CHECK(assert_equal(numerical, computed, 1e-6)); } /* ************************************************************************* */ -TEST( Cal3Unified, Duncalibrate2) -{ +TEST(Cal3Unified, Duncalibrate2) { Matrix computed; K.uncalibrate(p, boost::none, computed); Matrix numerical = numericalDerivative22(uncalibrate_, K, p, 1e-7); - CHECK(assert_equal(numerical,computed,1e-6)); + CHECK(assert_equal(numerical, computed, 1e-6)); +} + +Point2 calibrate_(const Cal3Unified& k, const Point2& pt) { + return k.calibrate(pt); } /* ************************************************************************* */ -TEST( Cal3Unified, assert_equal) -{ - CHECK(assert_equal(K,K,1e-9)); +TEST(Cal3Unified, Dcalibrate) { + Point2 pi = K.uncalibrate(p); + Matrix Dcal, Dp; + K.calibrate(pi, Dcal, Dp); + Matrix numerical1 = numericalDerivative21(calibrate_, K, pi); + CHECK(assert_equal(numerical1, Dcal, 1e-5)); + Matrix numerical2 = numericalDerivative22(calibrate_, K, pi); + CHECK(assert_equal(numerical2, Dp, 1e-5)); } /* ************************************************************************* */ -TEST( Cal3Unified, retract) -{ - Cal3Unified expected(100 + 2, 105 + 3, 0.0 + 4, 320 + 5, 240 + 6, - 1e-3 + 7, 2.0*1e-3 + 8, 3.0*1e-3 + 9, 4.0*1e-3 + 10, 0.1 + 1); - Vector d(10); +TEST(Cal3Unified, Equal) { CHECK(assert_equal(K, K, 1e-9)); } + +/* ************************************************************************* */ +TEST(Cal3Unified, Retract) { + Cal3Unified expected(100 + 2, 105 + 3, 0.0 + 4, 320 + 5, 240 + 6, 1e-3 + 7, + 2.0 * 1e-3 + 8, 3.0 * 1e-3 + 9, 4.0 * 1e-3 + 10, + 0.1 + 1); + + EXPECT_LONGS_EQUAL(Cal3Unified::Dim(), 10); + EXPECT_LONGS_EQUAL(expected.dim(), 10); + + Vector10 d; d << 2, 3, 4, 5, 6, 7, 8, 9, 10, 1; Cal3Unified actual = K.retract(d); - CHECK(assert_equal(expected,actual,1e-9)); - CHECK(assert_equal(d,K.localCoordinates(actual),1e-9)); + CHECK(assert_equal(expected, actual, 1e-9)); + CHECK(assert_equal(d, K.localCoordinates(actual), 1e-9)); } /* ************************************************************************* */ -TEST( Cal3Unified, DerivedValue) -{ +TEST(Cal3Unified, DerivedValue) { Values values; Cal3Unified cal(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); Key key = 1; @@ -110,9 +124,24 @@ TEST( Cal3Unified, DerivedValue) Cal3Unified calafter = values.at(key); - CHECK(assert_equal(cal,calafter,1e-9)); + CHECK(assert_equal(cal, calafter, 1e-9)); } /* ************************************************************************* */ -int main() { TestResult tr; return TestRegistry::runAllTests(tr); } +TEST(Cal3Unified, Print) { + Cal3Unified cal(0, 1, 2, 3, 4, 5, 6, 7, 8, 9); + std::stringstream os; + os << "fx: " << cal.fx() << ", fy: " << cal.fy() << ", s: " << cal.skew() + << ", px: " << cal.px() << ", py: " << cal.py() << ", k1: " << cal.k1() + << ", k2: " << cal.k2() << ", p1: " << cal.p1() << ", p2: " << cal.p2() + << ", xi: " << cal.xi(); + + EXPECT(assert_stdout_equal(os.str(), cal)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testCal3_S2.cpp b/gtsam/geometry/tests/testCal3_S2.cpp index ccecb09c3..41be5ea8e 100644 --- a/gtsam/geometry/tests/testCal3_S2.cpp +++ b/gtsam/geometry/tests/testCal3_S2.cpp @@ -11,11 +11,12 @@ /** * @file testCal3_S2.cpp - * @brief Unit tests for transform derivatives + * @brief Unit tests for basic Cal3_S2 calibration model. */ #include #include +#include #include #include @@ -30,90 +31,94 @@ static Point2 p_uv(1320.3, 1740); static Point2 p_xy(2, 3); /* ************************************************************************* */ -TEST( Cal3_S2, easy_constructor) -{ +TEST(Cal3_S2, Constructor) { Cal3_S2 expected(554.256, 554.256, 0, 640 / 2, 480 / 2); - double fov = 60; // degrees - size_t w=640,h=480; - Cal3_S2 actual(fov,w,h); + double fov = 60; // degrees + size_t w = 640, h = 480; + Cal3_S2 actual(fov, w, h); - CHECK(assert_equal(expected,actual,1e-3)); + CHECK(assert_equal(expected, actual, 1e-3)); } /* ************************************************************************* */ -TEST( Cal3_S2, calibrate) -{ - Point2 intrinsic(2,3); +TEST(Cal3_S2, Calibrate) { + Point2 intrinsic(2, 3); Point2 expectedimage(1320.3, 1740); Point2 imagecoordinates = K.uncalibrate(intrinsic); - CHECK(assert_equal(expectedimage,imagecoordinates)); - CHECK(assert_equal(intrinsic,K.calibrate(imagecoordinates))); + CHECK(assert_equal(expectedimage, imagecoordinates)); + CHECK(assert_equal(intrinsic, K.calibrate(imagecoordinates))); } /* ************************************************************************* */ -TEST( Cal3_S2, calibrate_homogeneous) { +TEST(Cal3_S2, CalibrateHomogeneous) { Vector3 intrinsic(2, 3, 1); Vector3 image(1320.3, 1740, 1); - CHECK(assert_equal((Vector)intrinsic,(Vector)K.calibrate(image))); + CHECK(assert_equal((Vector)intrinsic, (Vector)K.calibrate(image))); } /* ************************************************************************* */ -Point2 uncalibrate_(const Cal3_S2& k, const Point2& pt) { return k.uncalibrate(pt); } -TEST( Cal3_S2, Duncalibrate1) -{ - Matrix25 computed; K.uncalibrate(p, computed, boost::none); +Point2 uncalibrate_(const Cal3_S2& k, const Point2& pt) { + return k.uncalibrate(pt); +} + +TEST(Cal3_S2, Duncalibrate1) { + Matrix25 computed; + K.uncalibrate(p, computed, boost::none); Matrix numerical = numericalDerivative21(uncalibrate_, K, p); - CHECK(assert_equal(numerical,computed,1e-8)); + CHECK(assert_equal(numerical, computed, 1e-8)); } /* ************************************************************************* */ -TEST( Cal3_S2, Duncalibrate2) -{ - Matrix computed; K.uncalibrate(p, boost::none, computed); +TEST(Cal3_S2, Duncalibrate2) { + Matrix computed; + K.uncalibrate(p, boost::none, computed); Matrix numerical = numericalDerivative22(uncalibrate_, K, p); - CHECK(assert_equal(numerical,computed,1e-9)); + CHECK(assert_equal(numerical, computed, 1e-9)); +} + +Point2 calibrate_(const Cal3_S2& k, const Point2& pt) { + return k.calibrate(pt); } -Point2 calibrate_(const Cal3_S2& k, const Point2& pt) {return k.calibrate(pt); } /* ************************************************************************* */ -TEST(Cal3_S2, Dcalibrate1) -{ - Matrix computed; - Point2 expected = K.calibrate(p_uv, computed, boost::none); - Matrix numerical = numericalDerivative21(calibrate_, K, p_uv); - CHECK(assert_equal(expected, p_xy, 1e-8)); - CHECK(assert_equal(numerical, computed, 1e-8)); +TEST(Cal3_S2, Dcalibrate1) { + Matrix computed; + Point2 expected = K.calibrate(p_uv, computed, boost::none); + Matrix numerical = numericalDerivative21(calibrate_, K, p_uv); + CHECK(assert_equal(expected, p_xy, 1e-8)); + CHECK(assert_equal(numerical, computed, 1e-8)); } /* ************************************************************************* */ -TEST(Cal3_S2, Dcalibrate2) -{ - Matrix computed; - Point2 expected = K.calibrate(p_uv, boost::none, computed); - Matrix numerical = numericalDerivative22(calibrate_, K, p_uv); - CHECK(assert_equal(expected, p_xy, 1e-8)); - CHECK(assert_equal(numerical, computed, 1e-8)); +TEST(Cal3_S2, Dcalibrate2) { + Matrix computed; + Point2 expected = K.calibrate(p_uv, boost::none, computed); + Matrix numerical = numericalDerivative22(calibrate_, K, p_uv); + CHECK(assert_equal(expected, p_xy, 1e-8)); + CHECK(assert_equal(numerical, computed, 1e-8)); } /* ************************************************************************* */ -TEST( Cal3_S2, assert_equal) -{ - CHECK(assert_equal(K,K,1e-9)); +TEST(Cal3_S2, Equal) { + CHECK(assert_equal(K, K, 1e-9)); Cal3_S2 K1(500, 500, 0.1, 640 / 2, 480 / 2); - CHECK(assert_equal(K,K1,1e-9)); + CHECK(assert_equal(K, K1, 1e-9)); } /* ************************************************************************* */ -TEST( Cal3_S2, retract) -{ - Cal3_S2 expected(500+1, 500+2, 0.1+3, 640 / 2+4, 480 / 2+5); - Vector d(5); - d << 1,2,3,4,5; +TEST(Cal3_S2, Retract) { + Cal3_S2 expected(500 + 1, 500 + 2, 0.1 + 3, 640 / 2 + 4, 480 / 2 + 5); + + EXPECT_LONGS_EQUAL(Cal3_S2::Dim(), 5); + EXPECT_LONGS_EQUAL(expected.dim(), 5); + + Vector5 d; + d << 1, 2, 3, 4, 5; Cal3_S2 actual = K.retract(d); - CHECK(assert_equal(expected,actual,1e-7)); - CHECK(assert_equal(d,K.localCoordinates(actual),1e-7)); + CHECK(assert_equal(expected, actual, 1e-7)); + CHECK(assert_equal(d, K.localCoordinates(actual), 1e-7)); } /* ************************************************************************* */ @@ -121,10 +126,19 @@ TEST(Cal3_S2, between) { Cal3_S2 k1(5, 5, 5, 5, 5), k2(5, 6, 7, 8, 9); Matrix H1, H2; - EXPECT(assert_equal(Cal3_S2(0,1,2,3,4), k1.between(k2, H1, H2))); + EXPECT(assert_equal(Cal3_S2(0, 1, 2, 3, 4), k1.between(k2, H1, H2))); EXPECT(assert_equal(-I_5x5, H1)); EXPECT(assert_equal(I_5x5, H2)); +} +/* ************************************************************************* */ +TEST(Cal3_S2, Print) { + Cal3_S2 cal(5, 5, 5, 5, 5); + std::stringstream os; + os << "fx: " << cal.fx() << ", fy: " << cal.fy() << ", s: " << cal.skew() + << ", px: " << cal.px() << ", py: " << cal.py(); + + EXPECT(assert_stdout_equal(os.str(), cal)); } /* ************************************************************************* */ @@ -133,4 +147,3 @@ int main() { return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ - diff --git a/gtsam/geometry/tests/testCal3_S2Stereo.cpp b/gtsam/geometry/tests/testCal3_S2Stereo.cpp new file mode 100644 index 000000000..070eee8fe --- /dev/null +++ b/gtsam/geometry/tests/testCal3_S2Stereo.cpp @@ -0,0 +1,129 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testCal3_S2Stereo.cpp + * @brief Unit tests for stereo-rig calibration model. + */ + +#include +#include +#include +#include +#include + +using namespace gtsam; + +GTSAM_CONCEPT_TESTABLE_INST(Cal3_S2Stereo) +GTSAM_CONCEPT_MANIFOLD_INST(Cal3_S2Stereo) + +static Cal3_S2Stereo K(500, 500, 0.1, 640 / 2, 480 / 2, 1); +static Point2 p(1, -2); +static Point2 p_uv(1320.3, 1740); +static Point2 p_xy(2, 3); + +/* ************************************************************************* */ +TEST(Cal3_S2Stereo, Constructor) { + Cal3_S2Stereo expected(554.256, 554.256, 0, 640 / 2, 480 / 2, 3); + + double fov = 60; // degrees + size_t w = 640, h = 480; + Cal3_S2Stereo actual(fov, w, h, 3); + + CHECK(assert_equal(expected, actual, 1e-3)); +} + +/* ************************************************************************* */ +TEST(Cal3_S2Stereo, Calibrate) { + Point2 intrinsic(2, 3); + Point2 expectedimage(1320.3, 1740); + Point2 imagecoordinates = K.uncalibrate(intrinsic); + CHECK(assert_equal(expectedimage, imagecoordinates)); + CHECK(assert_equal(intrinsic, K.calibrate(imagecoordinates))); +} + +/* ************************************************************************* */ +TEST(Cal3_S2Stereo, CalibrateHomogeneous) { + Vector3 intrinsic(2, 3, 1); + Vector3 image(1320.3, 1740, 1); + CHECK(assert_equal(intrinsic, K.calibrate(image))); +} + +/* ************************************************************************* */ +Point2 uncalibrate_(const Cal3_S2Stereo& k, const Point2& pt) { + return k.uncalibrate(pt); +} + +TEST(Cal3_S2Stereo, Duncalibrate) { + Matrix26 Dcal; + Matrix22 Dp; + K.uncalibrate(p, Dcal, Dp); + + Matrix numerical1 = numericalDerivative21(uncalibrate_, K, p); + CHECK(assert_equal(numerical1, Dcal, 1e-8)); + Matrix numerical2 = numericalDerivative22(uncalibrate_, K, p); + CHECK(assert_equal(numerical2, Dp, 1e-9)); +} + +Point2 calibrate_(const Cal3_S2Stereo& K, const Point2& pt) { + return K.calibrate(pt); +} +/* ************************************************************************* */ +TEST(Cal3_S2Stereo, Dcalibrate) { + Matrix26 Dcal; + Matrix22 Dp; + Point2 expected = K.calibrate(p_uv, Dcal, Dp); + CHECK(assert_equal(expected, p_xy, 1e-8)); + + Matrix numerical1 = numericalDerivative21(calibrate_, K, p_uv); + CHECK(assert_equal(numerical1, Dcal, 1e-8)); + Matrix numerical2 = numericalDerivative22(calibrate_, K, p_uv); + CHECK(assert_equal(numerical2, Dp, 1e-8)); +} + +/* ************************************************************************* */ +TEST(Cal3_S2Stereo, Equal) { + CHECK(assert_equal(K, K, 1e-9)); + + Cal3_S2Stereo K1(500, 500, 0.1, 640 / 2, 480 / 2, 1); + CHECK(assert_equal(K, K1, 1e-9)); +} + +/* ************************************************************************* */ +TEST(Cal3_S2Stereo, Retract) { + Cal3_S2Stereo expected(500 + 1, 500 + 2, 0.1 + 3, 640 / 2 + 4, 480 / 2 + 5, + 7); + EXPECT_LONGS_EQUAL(Cal3_S2Stereo::Dim(), 6); + EXPECT_LONGS_EQUAL(expected.dim(), 6); + + Vector6 d; + d << 1, 2, 3, 4, 5, 6; + Cal3_S2Stereo actual = K.retract(d); + CHECK(assert_equal(expected, actual, 1e-7)); + CHECK(assert_equal(d, K.localCoordinates(actual), 1e-7)); +} + +/* ************************************************************************* */ +TEST(Cal3_S2Stereo, Print) { + Cal3_S2Stereo cal(5, 5, 5, 5, 5, 2); + std::stringstream os; + os << "fx: " << cal.fx() << ", fy: " << cal.fy() << ", s: " << cal.skew() + << ", px: " << cal.px() << ", py: " << cal.py() + << ", b: " << cal.baseline(); + EXPECT(assert_stdout_equal(os.str(), cal)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testPinholeCamera.cpp b/gtsam/geometry/tests/testPinholeCamera.cpp index a9b68bdec..ad6f4e921 100644 --- a/gtsam/geometry/tests/testPinholeCamera.cpp +++ b/gtsam/geometry/tests/testPinholeCamera.cpp @@ -336,6 +336,15 @@ TEST( PinholeCamera, range3) { EXPECT(assert_equal(Hexpected2, D2, 1e-7)); } +/* ************************************************************************* */ +TEST( PinholeCamera, Cal3Bundler) { + Cal3Bundler calibration; + Pose3 wTc; + PinholeCamera camera(wTc, calibration); + Point2 p(50, 100); + camera.backproject(p, 10); +} + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testPoint2.cpp b/gtsam/geometry/tests/testPoint2.cpp index 8b9e8a7e6..6e4d408c7 100644 --- a/gtsam/geometry/tests/testPoint2.cpp +++ b/gtsam/geometry/tests/testPoint2.cpp @@ -237,16 +237,6 @@ TEST( Point2, circleCircleIntersection) { } -/* ************************************************************************* */ -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -TEST( Point2, stream) { - Point2 p(1, 2); - std::ostringstream os; - os << p; - EXPECT(os.str() == "(1, 2)"); -} -#endif - /* ************************************************************************* */ int main () { TestResult tr; diff --git a/gtsam/geometry/tests/testPoint3.cpp b/gtsam/geometry/tests/testPoint3.cpp index e2396f7e9..a655011a0 100644 --- a/gtsam/geometry/tests/testPoint3.cpp +++ b/gtsam/geometry/tests/testPoint3.cpp @@ -153,16 +153,6 @@ TEST( Point3, cross2) { } } -/* ************************************************************************* */ -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -TEST( Point3, stream) { - Point3 p(1, 2, -3); - std::ostringstream os; - os << p; - EXPECT(os.str() == "[1, 2, -3]'"); -} -#endif - //************************************************************************* TEST (Point3, normalize) { Matrix actualH; @@ -174,6 +164,26 @@ TEST (Point3, normalize) { EXPECT(assert_equal(expectedH, actualH, 1e-8)); } +//************************************************************************* +TEST(Point3, mean) { + Point3 expected(2, 2, 2); + Point3 a1(0, 0, 0), a2(1, 2, 3), a3(5, 4, 3); + std::vector a_points{a1, a2, a3}; + Point3 actual = mean(a_points); + EXPECT(assert_equal(expected, actual)); +} + +TEST(Point3, mean_pair) { + Point3 a_mean(2, 2, 2), b_mean(-1, 1, 0); + Point3Pair expected = std::make_pair(a_mean, b_mean); + Point3 a1(0, 0, 0), a2(1, 2, 3), a3(5, 4, 3); + Point3 b1(-1, 0, 0), b2(-2, 4, 0), b3(0, -1, 0); + std::vector point_pairs{{a1, b1}, {a2, b2}, {a3, b3}}; + Point3Pair actual = means(point_pairs); + EXPECT(assert_equal(expected.first, actual.first)); + EXPECT(assert_equal(expected.second, actual.second)); +} + //************************************************************************* double norm_proxy(const Point3& point) { return double(point.norm()); diff --git a/gtsam/geometry/tests/testPose3.cpp b/gtsam/geometry/tests/testPose3.cpp index 5808f36f8..594d15c91 100644 --- a/gtsam/geometry/tests/testPose3.cpp +++ b/gtsam/geometry/tests/testPose3.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include // for operator += using namespace boost::assign; @@ -418,6 +419,29 @@ TEST(Pose3, transform_to_rotate) { EXPECT(assert_equal(expected, actual, 0.001)); } +/* ************************************************************************* */ +// Check transformPoseFrom and its pushforward +Pose3 transformPoseFrom_(const Pose3& wTa, const Pose3& aTb) { + return wTa.transformPoseFrom(aTb); +} + +TEST(Pose3, transformPoseFrom) +{ + Matrix actual = (T2*T2).matrix(); + Matrix expected = T2.matrix()*T2.matrix(); + EXPECT(assert_equal(actual, expected, 1e-8)); + + Matrix H1, H2; + T2.transformPoseFrom(T2, H1, H2); + + Matrix numericalH1 = numericalDerivative21(transformPoseFrom_, T2, T2); + EXPECT(assert_equal(numericalH1, H1, 5e-3)); + EXPECT(assert_equal(T2.inverse().AdjointMap(), H1, 5e-3)); + + Matrix numericalH2 = numericalDerivative22(transformPoseFrom_, T2, T2); + EXPECT(assert_equal(numericalH2, H2, 1e-4)); +} + /* ************************************************************************* */ TEST(Pose3, transformTo) { Pose3 transform(Rot3::Rodrigues(0, 0, -1.570796), Point3(2, 4, 0)); @@ -784,6 +808,17 @@ TEST(Pose3, ExpmapDerivative2) { } } +TEST( Pose3, ExpmapDerivativeQr) { + Vector6 w = Vector6::Random(); + w.head<3>().normalize(); + w.head<3>() = w.head<3>() * 0.9e-2; + Matrix3 actualQr = Pose3::ComputeQforExpmapDerivative(w, 0.01); + Matrix expectedH = numericalDerivative21 >(&Pose3::Expmap, w, boost::none); + Matrix3 expectedQr = expectedH.bottomLeftCorner<3, 3>(); + EXPECT(assert_equal(expectedQr, actualQr, 1e-6)); +} + /* ************************************************************************* */ TEST( Pose3, LogmapDerivative) { Matrix6 actualH; @@ -834,12 +869,12 @@ TEST( Pose3, adjointTranspose) { } /* ************************************************************************* */ -TEST( Pose3, stream) -{ - Pose3 T; +TEST( Pose3, stream) { std::ostringstream os; - os << T; - EXPECT(os.str() == "\n|1, 0, 0|\n|0, 1, 0|\n|0, 0, 1|\n\n[0, 0, 0]';\n"); + os << Pose3(); + + string expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 0 0 0"; + EXPECT(os.str() == expected); } //****************************************************************************** @@ -872,9 +907,9 @@ TEST(Pose3 , ChartDerivatives) { Pose3 id; if (ROT3_DEFAULT_COORDINATES_MODE == Rot3::EXPMAP) { CHECK_CHART_DERIVATIVES(id,id); -// CHECK_CHART_DERIVATIVES(id,T2); -// CHECK_CHART_DERIVATIVES(T2,id); -// CHECK_CHART_DERIVATIVES(T2,T3); + CHECK_CHART_DERIVATIVES(id,T2); + CHECK_CHART_DERIVATIVES(T2,id); + CHECK_CHART_DERIVATIVES(T2,T3); } } @@ -981,6 +1016,33 @@ TEST(Pose3, TransformCovariance6) { TEST(Pose3, interpolate) { EXPECT(assert_equal(T2, interpolate(T2,T3, 0.0))); EXPECT(assert_equal(T3, interpolate(T2,T3, 1.0))); + + // Trivial example: start at origin and move to (1, 0, 0) while rotating pi/2 + // about z-axis. + Pose3 start; + Pose3 end(Rot3::Rz(M_PI_2), Point3(1, 0, 0)); + // This interpolation is easy to calculate by hand. + double t = 0.5; + Pose3 expected0(Rot3::Rz(M_PI_4), Point3(0.5, 0, 0)); + EXPECT(assert_equal(expected0, start.interpolateRt(end, t))); + + // Example from Peter Corke + // https://robotacademy.net.au/lesson/interpolating-pose-in-3d/ + t = 0.0759; // corresponds to the 10th element when calling `ctraj` in + // the video + Pose3 O; + Pose3 F(Rot3::Roll(0.6).compose(Rot3::Pitch(0.8)).compose(Rot3::Yaw(1.4)), + Point3(1, 2, 3)); + + // The expected answer matches the result presented in the video. + Pose3 expected1(interpolate(O.rotation(), F.rotation(), t), + interpolate(O.translation(), F.translation(), t)); + EXPECT(assert_equal(expected1, O.interpolateRt(F, t))); + + // Non-trivial interpolation, translation value taken from output. + Pose3 expected2(interpolate(T2.rotation(), T3.rotation(), t), + interpolate(T2.translation(), T3.translation(), t)); + EXPECT(assert_equal(expected2, T2.interpolateRt(T3, t))); } /* ************************************************************************* */ @@ -994,36 +1056,13 @@ TEST(Pose3, Create) { } /* ************************************************************************* */ -TEST(Pose3, print) { - std::stringstream redirectStream; - std::streambuf* ssbuf = redirectStream.rdbuf(); - std::streambuf* oldbuf = std::cout.rdbuf(); - // redirect cout to redirectStream - std::cout.rdbuf(ssbuf); - +TEST(Pose3, Print) { Pose3 pose(Rot3::identity(), Point3(1, 2, 3)); - // output is captured to redirectStream - pose.print(); // Generate the expected output - std::stringstream expected; - Point3 translation(1, 2, 3); - -#ifdef GTSAM_TYPEDEF_POINTS_TO_VECTORS - expected << "1\n" - "2\n" - "3;\n"; -#else - expected << '[' << translation.x() << ", " << translation.y() << ", " << translation.z() << "]\';"; -#endif - - // reset cout to the original stream - std::cout.rdbuf(oldbuf); - - // Get substring corresponding to translation part - std::string actual = redirectStream.str().substr(38, 11); + std::string expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\nt: 1 2 3\n"; - CHECK_EQUAL(expected.str(), actual); + EXPECT(assert_print_equal(expected, pose)); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testRot3.cpp b/gtsam/geometry/tests/testRot3.cpp index 598c57b24..290945837 100644 --- a/gtsam/geometry/tests/testRot3.cpp +++ b/gtsam/geometry/tests/testRot3.cpp @@ -608,7 +608,8 @@ TEST( Rot3, stream) Rot3 R; std::ostringstream os; os << R; - EXPECT(os.str() == "\n|1, 0, 0|\n|0, 1, 0|\n|0, 0, 1|\n"); + string expected = "[\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]"; + EXPECT(os.str() == expected); } /* ************************************************************************* */ @@ -742,6 +743,193 @@ TEST(Rot3, axisAngle) { CHECK_AXIS_ANGLE(_axis, theta165, Rot3::AxisAngle(axis, theta195)) } +/* ************************************************************************* */ +Rot3 RzRyRx_proxy(double const& a, double const& b, double const& c) { + return Rot3::RzRyRx(a, b, c); +} + +TEST(Rot3, RzRyRx_scalars_derivative) { + const auto x = 0.1, y = 0.4, z = 0.2; + const auto num_x = numericalDerivative31(RzRyRx_proxy, x, y, z); + const auto num_y = numericalDerivative32(RzRyRx_proxy, x, y, z); + const auto num_z = numericalDerivative33(RzRyRx_proxy, x, y, z); + + Vector3 act_x, act_y, act_z; + Rot3::RzRyRx(x, y, z, act_x, act_y, act_z); + + CHECK(assert_equal(num_x, act_x)); + CHECK(assert_equal(num_y, act_y)); + CHECK(assert_equal(num_z, act_z)); +} + +/* ************************************************************************* */ +Rot3 RzRyRx_proxy(Vector3 const& xyz) { return Rot3::RzRyRx(xyz); } + +TEST(Rot3, RzRyRx_vector_derivative) { + const auto xyz = Vector3{-0.3, 0.1, 0.7}; + const auto num = numericalDerivative11(RzRyRx_proxy, xyz); + + Matrix3 act; + Rot3::RzRyRx(xyz, act); + + CHECK(assert_equal(num, act)); +} + +/* ************************************************************************* */ +Rot3 Ypr_proxy(double const& y, double const& p, double const& r) { + return Rot3::Ypr(y, p, r); +} + +TEST(Rot3, Ypr_derivative) { + const auto y = 0.7, p = -0.3, r = 0.1; + const auto num_y = numericalDerivative31(Ypr_proxy, y, p, r); + const auto num_p = numericalDerivative32(Ypr_proxy, y, p, r); + const auto num_r = numericalDerivative33(Ypr_proxy, y, p, r); + + Vector3 act_y, act_p, act_r; + Rot3::Ypr(y, p, r, act_y, act_p, act_r); + + CHECK(assert_equal(num_y, act_y)); + CHECK(assert_equal(num_p, act_p)); + CHECK(assert_equal(num_r, act_r)); +} + +/* ************************************************************************* */ +Vector3 RQ_proxy(Matrix3 const& R) { + const auto RQ_ypr = RQ(R); + return RQ_ypr.second; +} + +TEST(Rot3, RQ_derivative) { + using VecAndErr = std::pair; + std::vector test_xyz; + // Test zeros and a couple of random values + test_xyz.push_back(VecAndErr{{0, 0, 0}, error}); + test_xyz.push_back(VecAndErr{{0, 0.5, -0.5}, error}); + test_xyz.push_back(VecAndErr{{0.3, 0, 0.2}, error}); + test_xyz.push_back(VecAndErr{{-0.6, 1.3, 0}, 1e-8}); + test_xyz.push_back(VecAndErr{{1.0, 0.7, 0.8}, error}); + test_xyz.push_back(VecAndErr{{3.0, 0.7, -0.6}, error}); + test_xyz.push_back(VecAndErr{{M_PI / 2, 0, 0}, error}); + test_xyz.push_back(VecAndErr{{0, 0, M_PI / 2}, error}); + + // Test close to singularity + test_xyz.push_back(VecAndErr{{0, M_PI / 2 - 1e-1, 0}, 1e-7}); + test_xyz.push_back(VecAndErr{{0, 3 * M_PI / 2 + 1e-1, 0}, 1e-7}); + test_xyz.push_back(VecAndErr{{0, M_PI / 2 - 1.1e-2, 0}, 1e-4}); + test_xyz.push_back(VecAndErr{{0, 3 * M_PI / 2 + 1.1e-2, 0}, 1e-4}); + + for (auto const& vec_err : test_xyz) { + auto const& xyz = vec_err.first; + + const auto R = Rot3::RzRyRx(xyz).matrix(); + const auto num = numericalDerivative11(RQ_proxy, R); + Matrix39 calc; + auto dummy = RQ(R, calc).second; + + const auto err = vec_err.second; + CHECK(assert_equal(num, calc, err)); + } +} + +/* ************************************************************************* */ +Vector3 xyz_proxy(Rot3 const& R) { return R.xyz(); } + +TEST(Rot3, xyz_derivative) { + const auto aa = Vector3{-0.6, 0.3, 0.2}; + const auto R = Rot3::Expmap(aa); + const auto num = numericalDerivative11(xyz_proxy, R); + Matrix3 calc; + R.xyz(calc); + + CHECK(assert_equal(num, calc)); +} + +/* ************************************************************************* */ +Vector3 ypr_proxy(Rot3 const& R) { return R.ypr(); } + +TEST(Rot3, ypr_derivative) { + const auto aa = Vector3{0.1, -0.3, -0.2}; + const auto R = Rot3::Expmap(aa); + const auto num = numericalDerivative11(ypr_proxy, R); + Matrix3 calc; + R.ypr(calc); + + CHECK(assert_equal(num, calc)); +} + +/* ************************************************************************* */ +Vector3 rpy_proxy(Rot3 const& R) { return R.rpy(); } + +TEST(Rot3, rpy_derivative) { + const auto aa = Vector3{1.2, 0.3, -0.9}; + const auto R = Rot3::Expmap(aa); + const auto num = numericalDerivative11(rpy_proxy, R); + Matrix3 calc; + R.rpy(calc); + + CHECK(assert_equal(num, calc)); +} + +/* ************************************************************************* */ +double roll_proxy(Rot3 const& R) { return R.roll(); } + +TEST(Rot3, roll_derivative) { + const auto aa = Vector3{0.8, -0.8, 0.8}; + const auto R = Rot3::Expmap(aa); + const auto num = numericalDerivative11(roll_proxy, R); + Matrix13 calc; + R.roll(calc); + + CHECK(assert_equal(num, calc)); +} + +/* ************************************************************************* */ +double pitch_proxy(Rot3 const& R) { return R.pitch(); } + +TEST(Rot3, pitch_derivative) { + const auto aa = Vector3{0.01, 0.1, 0.0}; + const auto R = Rot3::Expmap(aa); + const auto num = numericalDerivative11(pitch_proxy, R); + Matrix13 calc; + R.pitch(calc); + + CHECK(assert_equal(num, calc)); +} + +/* ************************************************************************* */ +double yaw_proxy(Rot3 const& R) { return R.yaw(); } + +TEST(Rot3, yaw_derivative) { + const auto aa = Vector3{0.0, 0.1, 0.6}; + const auto R = Rot3::Expmap(aa); + const auto num = numericalDerivative11(yaw_proxy, R); + Matrix13 calc; + R.yaw(calc); + + CHECK(assert_equal(num, calc)); +} + +/* ************************************************************************* */ +TEST(Rot3, determinant) { + size_t degree = 1; + Rot3 R_w0; // Zero rotation + Rot3 R_w1 = Rot3::Ry(degree * M_PI / 180); + + Rot3 R_01, R_w2; + double actual, expected = 1.0; + + for (size_t i = 2; i < 360; ++i) { + R_01 = R_w0.between(R_w1); + R_w2 = R_w1 * R_01; + R_w0 = R_w1; + R_w1 = R_w2.normalized(); + actual = R_w2.matrix().determinant(); + + EXPECT_DOUBLES_EQUAL(expected, actual, 1e-7); + } +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/geometry/tests/testSOn.cpp b/gtsam/geometry/tests/testSOn.cpp index 1cf8caed2..4d0ed98b3 100644 --- a/gtsam/geometry/tests/testSOn.cpp +++ b/gtsam/geometry/tests/testSOn.cpp @@ -39,8 +39,8 @@ using namespace std; using namespace gtsam; //****************************************************************************** -// Test dhynamic with n=0 -TEST(SOn, SO0) { +// Test dynamic with n=0 +TEST(SOn, SOn0) { const auto R = SOn(0); EXPECT_LONGS_EQUAL(0, R.rows()); EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension); @@ -50,7 +50,8 @@ TEST(SOn, SO0) { } //****************************************************************************** -TEST(SOn, SO5) { +// Test dynamic with n=5 +TEST(SOn, SOn5) { const auto R = SOn(5); EXPECT_LONGS_EQUAL(5, R.rows()); EXPECT_LONGS_EQUAL(Eigen::Dynamic, SOn::dimension); @@ -59,6 +60,28 @@ TEST(SOn, SO5) { EXPECT_LONGS_EQUAL(10, traits::GetDimension(R)); } +//****************************************************************************** +// Test fixed with n=2 +TEST(SOn, SO0) { + const auto R = SO<2>(); + EXPECT_LONGS_EQUAL(2, R.rows()); + EXPECT_LONGS_EQUAL(1, SO<2>::dimension); + EXPECT_LONGS_EQUAL(1, SO<2>::Dim()); + EXPECT_LONGS_EQUAL(1, R.dim()); + EXPECT_LONGS_EQUAL(1, traits>::GetDimension(R)); +} + +//****************************************************************************** +// Test fixed with n=5 +TEST(SOn, SO5) { + const auto R = SO<5>(); + EXPECT_LONGS_EQUAL(5, R.rows()); + EXPECT_LONGS_EQUAL(10, SO<5>::dimension); + EXPECT_LONGS_EQUAL(10, SO<5>::Dim()); + EXPECT_LONGS_EQUAL(10, R.dim()); + EXPECT_LONGS_EQUAL(10, traits>::GetDimension(R)); +} + //****************************************************************************** TEST(SOn, Concept) { BOOST_CONCEPT_ASSERT((IsGroup)); @@ -105,29 +128,29 @@ TEST(SOn, HatVee) { EXPECT(assert_equal((Vector)v.head<1>(), SOn::Vee(actual2))); Matrix expected3(3, 3); - expected3 << 0, -3, 2, // - 3, 0, -1, // - -2, 1, 0; + expected3 << 0, -3, 2, // + 3, 0, -1, // + -2, 1, 0; const auto actual3 = SOn::Hat(v.head<3>()); EXPECT(assert_equal(expected3, actual3)); EXPECT(assert_equal(skewSymmetric(1, 2, 3), actual3)); EXPECT(assert_equal((Vector)v.head<3>(), SOn::Vee(actual3))); Matrix expected4(4, 4); - expected4 << 0, -6, 5, 3, // - 6, 0, -4, -2, // - -5, 4, 0, 1, // - -3, 2, -1, 0; + expected4 << 0, -6, 5, 3, // + 6, 0, -4, -2, // + -5, 4, 0, 1, // + -3, 2, -1, 0; const auto actual4 = SOn::Hat(v.head<6>()); EXPECT(assert_equal(expected4, actual4)); EXPECT(assert_equal((Vector)v.head<6>(), SOn::Vee(actual4))); Matrix expected5(5, 5); - expected5 << 0,-10, 9, 7, -4, // - 10, 0, -8, -6, 3, // - -9, 8, 0, 5, -2, // - -7, 6, -5, 0, 1, // - 4, -3, 2, -1, 0; + expected5 << 0, -10, 9, 7, -4, // + 10, 0, -8, -6, 3, // + -9, 8, 0, 5, -2, // + -7, 6, -5, 0, 1, // + 4, -3, 2, -1, 0; const auto actual5 = SOn::Hat(v); EXPECT(assert_equal(expected5, actual5)); EXPECT(assert_equal((Vector)v, SOn::Vee(actual5))); @@ -159,6 +182,22 @@ TEST(SOn, RetractLocal) { CHECK(assert_equal(v1, SOn::ChartAtOrigin::Local(Q1), 1e-7)); } +//****************************************************************************** + +Matrix RetractJacobian(size_t n) { return SOn::VectorizedGenerators(n); } + +/// Test Jacobian of Retract at origin +TEST(SOn, RetractJacobian) { + Matrix actualH = RetractJacobian(3); + boost::function h = [](const Vector &v) { + return SOn::ChartAtOrigin::Retract(v).matrix(); + }; + Vector3 v; + v.setZero(); + const Matrix expectedH = numericalDerivative11(h, v, 1e-5); + CHECK(assert_equal(expectedH, actualH)); +} + //****************************************************************************** TEST(SOn, vec) { Vector10 v; @@ -166,11 +205,28 @@ TEST(SOn, vec) { SOn Q = SOn::ChartAtOrigin::Retract(v); Matrix actualH; const Vector actual = Q.vec(actualH); - boost::function h = [](const SOn& Q) { return Q.vec(); }; + boost::function h = [](const SOn &Q) { return Q.vec(); }; const Matrix H = numericalDerivative11(h, Q, 1e-5); CHECK(assert_equal(H, actualH)); } +//****************************************************************************** +TEST(SOn, VectorizedGenerators) { + // Default fixed + auto actual2 = SO<2>::VectorizedGenerators(); + CHECK(actual2.rows()==4 && actual2.cols()==1) + + // Specialized + auto actual3 = SO<3>::VectorizedGenerators(); + CHECK(actual3.rows()==9 && actual3.cols()==3) + auto actual4 = SO<4>::VectorizedGenerators(); + CHECK(actual4.rows()==16 && actual4.cols()==6) + + // Dynamic + auto actual5 = SOn::VectorizedGenerators(5); + CHECK(actual5.rows()==25 && actual5.cols()==10) +} + //****************************************************************************** int main() { TestResult tr; diff --git a/gtsam/geometry/tests/testSerializationGeometry.cpp b/gtsam/geometry/tests/testSerializationGeometry.cpp index f7ff881eb..aa111c3ae 100644 --- a/gtsam/geometry/tests/testSerializationGeometry.cpp +++ b/gtsam/geometry/tests/testSerializationGeometry.cpp @@ -57,7 +57,7 @@ static StereoCamera cam2(pose3, cal4ptr); static StereoPoint2 spt(1.0, 2.0, 3.0); /* ************************************************************************* */ -TEST_DISABLED (Serialization, text_geometry) { +TEST (Serialization, text_geometry) { EXPECT(equalsObj(Point2(1.0, 2.0))); EXPECT(equalsObj(Pose2(1.0, 2.0, 0.3))); EXPECT(equalsObj(Rot2::fromDegrees(30.0))); @@ -82,7 +82,7 @@ TEST_DISABLED (Serialization, text_geometry) { } /* ************************************************************************* */ -TEST_DISABLED (Serialization, xml_geometry) { +TEST (Serialization, xml_geometry) { EXPECT(equalsXML(Point2(1.0, 2.0))); EXPECT(equalsXML(Pose2(1.0, 2.0, 0.3))); EXPECT(equalsXML(Rot2::fromDegrees(30.0))); @@ -106,7 +106,7 @@ TEST_DISABLED (Serialization, xml_geometry) { } /* ************************************************************************* */ -TEST_DISABLED (Serialization, binary_geometry) { +TEST (Serialization, binary_geometry) { EXPECT(equalsBinary(Point2(1.0, 2.0))); EXPECT(equalsBinary(Pose2(1.0, 2.0, 0.3))); EXPECT(equalsBinary(Rot2::fromDegrees(30.0))); diff --git a/gtsam/geometry/tests/testSimpleCamera.cpp b/gtsam/geometry/tests/testSimpleCamera.cpp index edf122d3c..18a25c553 100644 --- a/gtsam/geometry/tests/testSimpleCamera.cpp +++ b/gtsam/geometry/tests/testSimpleCamera.cpp @@ -26,6 +26,8 @@ using namespace std; using namespace gtsam; +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 + static const Cal3_S2 K(625, 625, 0, 0, 0); static const Pose3 pose1(Rot3(Vector3(1, -1, -1).asDiagonal()), @@ -149,6 +151,8 @@ TEST( SimpleCamera, simpleCamera) CHECK(assert_equal(expected, actual,1e-1)); } +#endif + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ diff --git a/gtsam/geometry/tests/testUnit3.cpp b/gtsam/geometry/tests/testUnit3.cpp index a25ab25c7..ddf60a256 100644 --- a/gtsam/geometry/tests/testUnit3.cpp +++ b/gtsam/geometry/tests/testUnit3.cpp @@ -484,6 +484,15 @@ TEST(Unit3, ErrorBetweenFactor) { } } +TEST(Unit3, CopyAssign) { + Unit3 p{1, 0.2, 0.3}; + + EXPECT(p.error(p).isZero()); + + p = Unit3{-1, 2, 8}; + EXPECT(p.error(p).isZero()); +} + /* ************************************************************************* */ TEST(actualH, Serialization) { Unit3 p(0, 1, 0); diff --git a/gtsam/geometry/triangulation.h b/gtsam/geometry/triangulation.h index 586b7b165..1df9efd22 100644 --- a/gtsam/geometry/triangulation.h +++ b/gtsam/geometry/triangulation.h @@ -18,8 +18,10 @@ #pragma once -#include +#include +#include #include +#include #include #include #include @@ -123,27 +125,6 @@ std::pair triangulationGraph( return std::make_pair(graph, values); } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -/// DEPRECATED: PinholeCamera specific version -template -Point3 triangulateNonlinear( - const CameraSet >& cameras, - const Point2Vector& measurements, const Point3& initialEstimate) { - return triangulateNonlinear > // - (cameras, measurements, initialEstimate); -} - -/// DEPRECATED: PinholeCamera specific version -template -std::pair triangulationGraph( - const CameraSet >& cameras, - const Point2Vector& measurements, Key landmarkKey, - const Point3& initialEstimate) { - return triangulationGraph > // - (cameras, measurements, landmarkKey, initialEstimate); -} -#endif - /** * Optimize for triangulation * @param graph nonlinear factors for projection @@ -215,7 +196,7 @@ struct CameraProjectionMatrix { private: const Matrix3 K_; public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; /** @@ -515,5 +496,9 @@ TriangulationResult triangulateSafe(const CameraSet& cameras, } } +// Vector of Cameras - used by the Python/MATLAB wrapper +using CameraSetCal3Bundler = CameraSet>; +using CameraSetCal3_S2 = CameraSet>; + } // \namespace gtsam diff --git a/gtsam.h b/gtsam/gtsam.i similarity index 85% rename from gtsam.h rename to gtsam/gtsam.i index 614db91c7..e1e11964f 100644 --- a/gtsam.h +++ b/gtsam/gtsam.i @@ -93,9 +93,9 @@ * - Add "void serializable()" to a class if you only want the class to be serialized as a * part of a container (such as noisemodel). This version does not require a publicly * accessible default constructor. - * Forward declarations and class definitions for Cython: - * - Need to specify the base class (both this forward class and base class are declared in an external cython header) - * This is so Cython can generate proper inheritance. + * Forward declarations and class definitions for Pybind: + * - Need to specify the base class (both this forward class and base class are declared in an external Pybind header) + * This is so Pybind can generate proper inheritance. * Example when wrapping a gtsam-based project: * // forward declarations * virtual class gtsam::NonlinearFactor @@ -104,7 +104,7 @@ * #include * virtual class MyFactor : gtsam::NoiseModelFactor {...}; * - *DO NOT* re-define overriden function already declared in the external (forward-declared) base class - * - This will cause an ambiguity problem in Cython pxd header file + * - This will cause an ambiguity problem in Pybind header file */ /** @@ -259,124 +259,79 @@ class IndexPair { size_t j() const; }; -template -class DSFMap { - DSFMap(); - KEY find(const KEY& key) const; - void merge(const KEY& x, const KEY& y); -}; - -#include -bool linear_independent(Matrix A, Matrix B, double tol); - -#include -virtual class Value { - // No constructors because this is an abstract class - - // Testable - void print(string s) const; +// template +// class DSFMap { +// DSFMap(); +// KEY find(const KEY& key) const; +// void merge(const KEY& x, const KEY& y); +// std::map sets(); +// }; - // Manifold - size_t dim() const; -}; +class IndexPairSet { + IndexPairSet(); + // common STL methods + size_t size() const; + bool empty() const; + void clear(); -#include -template -virtual class GenericValue : gtsam::Value { - void serializable() const; + // structure specific methods + void insert(gtsam::IndexPair key); + bool erase(gtsam::IndexPair key); // returns true if value was removed + bool count(gtsam::IndexPair key) const; // returns true if value exists }; -#include -class LieScalar { - // Standard constructors - LieScalar(); - LieScalar(double d); - - // Standard interface - double value() const; - - // Testable - void print(string s) const; - bool equals(const gtsam::LieScalar& expected, double tol) const; +class IndexPairVector { + IndexPairVector(); + IndexPairVector(const gtsam::IndexPairVector& other); - // Group - static gtsam::LieScalar identity(); - gtsam::LieScalar inverse() const; - gtsam::LieScalar compose(const gtsam::LieScalar& p) const; - gtsam::LieScalar between(const gtsam::LieScalar& l2) const; - - // Manifold - size_t dim() const; - gtsam::LieScalar retract(Vector v) const; - Vector localCoordinates(const gtsam::LieScalar& t2) const; + // common STL methods + size_t size() const; + bool empty() const; + void clear(); - // Lie group - static gtsam::LieScalar Expmap(Vector v); - static Vector Logmap(const gtsam::LieScalar& p); + // structure specific methods + gtsam::IndexPair at(size_t i) const; + void push_back(gtsam::IndexPair key) const; }; -#include -class LieVector { - // Standard constructors - LieVector(); - LieVector(Vector v); +gtsam::IndexPairVector IndexPairSetAsArray(gtsam::IndexPairSet& set); - // Standard interface - Vector vector() const; - - // Testable - void print(string s) const; - bool equals(const gtsam::LieVector& expected, double tol) const; - - // Group - static gtsam::LieVector identity(); - gtsam::LieVector inverse() const; - gtsam::LieVector compose(const gtsam::LieVector& p) const; - gtsam::LieVector between(const gtsam::LieVector& l2) const; - - // Manifold - size_t dim() const; - gtsam::LieVector retract(Vector v) const; - Vector localCoordinates(const gtsam::LieVector& t2) const; +class IndexPairSetMap { + IndexPairSetMap(); + // common STL methods + size_t size() const; + bool empty() const; + void clear(); - // Lie group - static gtsam::LieVector Expmap(Vector v); - static Vector Logmap(const gtsam::LieVector& p); + // structure specific methods + gtsam::IndexPairSet at(gtsam::IndexPair& key); +}; - // enabling serialization functionality - void serialize() const; +class DSFMapIndexPair { + DSFMapIndexPair(); + gtsam::IndexPair find(const gtsam::IndexPair& key) const; + void merge(const gtsam::IndexPair& x, const gtsam::IndexPair& y); + gtsam::IndexPairSetMap sets(); }; -#include -class LieMatrix { - // Standard constructors - LieMatrix(); - LieMatrix(Matrix v); +#include +bool linear_independent(Matrix A, Matrix B, double tol); - // Standard interface - Matrix matrix() const; +#include +virtual class Value { + // No constructors because this is an abstract class // Testable void print(string s) const; - bool equals(const gtsam::LieMatrix& expected, double tol) const; - - // Group - static gtsam::LieMatrix identity(); - gtsam::LieMatrix inverse() const; - gtsam::LieMatrix compose(const gtsam::LieMatrix& p) const; - gtsam::LieMatrix between(const gtsam::LieMatrix& l2) const; // Manifold size_t dim() const; - gtsam::LieMatrix retract(Vector v) const; - Vector localCoordinates(const gtsam::LieMatrix & t2) const; - - // Lie group - static gtsam::LieMatrix Expmap(Vector v); - static Vector Logmap(const gtsam::LieMatrix& p); +}; - // enabling serialization functionality - void serialize() const; +#include +template +virtual class GenericValue : gtsam::Value { + void serializable() const; }; //************************************************************************* @@ -519,6 +474,7 @@ class Rot2 { // Lie Group static gtsam::Rot2 Expmap(Vector v); static Vector Logmap(const gtsam::Rot2& p); + Vector logmap(const gtsam::Rot2& p); // Group Action on Point2 gtsam::Point2 rotate(const gtsam::Point2& point) const; @@ -598,6 +554,7 @@ class SOn { // Standard Constructors SOn(size_t n); static gtsam::SOn FromMatrix(Matrix R); + static gtsam::SOn Lift(size_t n, Matrix R); // Testable void print(string s) const; @@ -617,6 +574,18 @@ class SOn { // Other methods Vector vec() const; Matrix matrix() const; + + // enabling serialization functionality + void serialize() const; +}; + +#include +class Quaternion { + double w() const; + double x() const; + double y() const; + double z() const; + Vector coeffs() const; }; #include @@ -628,6 +597,7 @@ class Rot3 { Rot3(double R11, double R12, double R13, double R21, double R22, double R23, double R31, double R32, double R33); + Rot3(double w, double x, double y, double z); static gtsam::Rot3 Rx(double t); static gtsam::Rot3 Ry(double t); @@ -655,7 +625,7 @@ class Rot3 { gtsam::Rot3 between(const gtsam::Rot3& p2) const; // Manifold - //gtsam::Rot3 retractCayley(Vector v) const; // FIXME, does not exist in both Matrix and Quaternion options + //gtsam::Rot3 retractCayley(Vector v) const; // TODO, does not exist in both Matrix and Quaternion options gtsam::Rot3 retract(Vector v) const; Vector localCoordinates(const gtsam::Rot3& p) const; @@ -666,6 +636,7 @@ class Rot3 { // Standard Interface static gtsam::Rot3 Expmap(Vector v); static Vector Logmap(const gtsam::Rot3& p); + Vector logmap(const gtsam::Rot3& p); Matrix matrix() const; Matrix transpose() const; gtsam::Point3 column(size_t index) const; @@ -676,7 +647,7 @@ class Rot3 { double pitch() const; double yaw() const; pair axisAngle() const; -// Vector toQuaternion() const; // FIXME: Can't cast to Vector properly + gtsam::Quaternion toQuaternion() const; Vector quaternion() const; gtsam::Rot3 slerp(double t, const gtsam::Rot3& other) const; @@ -711,6 +682,7 @@ class Pose2 { // Lie Group static gtsam::Pose2 Expmap(Vector v); static Vector Logmap(const gtsam::Pose2& p); + Vector logmap(const gtsam::Pose2& p); static Matrix ExpmapDerivative(Vector v); static Matrix LogmapDerivative(const gtsam::Pose2& v); Matrix AdjointMap() const; @@ -744,7 +716,7 @@ class Pose3 { Pose3(); Pose3(const gtsam::Pose3& other); Pose3(const gtsam::Rot3& r, const gtsam::Point3& t); - Pose3(const gtsam::Pose2& pose2); // FIXME: shadows Pose3(Pose3 pose) + Pose3(const gtsam::Pose2& pose2); Pose3(Matrix mat); // Testable @@ -764,6 +736,7 @@ class Pose3 { // Lie Group static gtsam::Pose3 Expmap(Vector v); static Vector Logmap(const gtsam::Pose3& pose); + Vector logmap(const gtsam::Pose3& pose); Matrix AdjointMap() const; Vector Adjoint(Vector xi) const; static Matrix adjointMap_(Vector xi); @@ -879,8 +852,7 @@ class Cal3_S2 { gtsam::Point2 principalPoint() const; Vector vector() const; Matrix K() const; - Matrix matrix() const; - Matrix matrix_inverse() const; + Matrix inverse() const; // enabling serialization functionality void serialize() const; @@ -908,7 +880,7 @@ virtual class Cal3DS2_Base { // Action on Point2 gtsam::Point2 uncalibrate(const gtsam::Point2& p) const; - gtsam::Point2 calibrate(const gtsam::Point2& p, double tol) const; + gtsam::Point2 calibrate(const gtsam::Point2& p) const; // enabling serialization functionality void serialize() const; @@ -987,6 +959,7 @@ class Cal3Bundler { // Standard Constructors Cal3Bundler(); Cal3Bundler(double fx, double k1, double k2, double u0, double v0); + Cal3Bundler(double fx, double k1, double k2, double u0, double v0, double tol); // Testable void print(string s) const; @@ -999,7 +972,7 @@ class Cal3Bundler { Vector localCoordinates(const gtsam::Cal3Bundler& c) const; // Action on Point2 - gtsam::Point2 calibrate(const gtsam::Point2& p, double tol) const; + gtsam::Point2 calibrate(const gtsam::Point2& p) const; gtsam::Point2 uncalibrate(const gtsam::Point2& p) const; // Standard Interface @@ -1007,11 +980,11 @@ class Cal3Bundler { double fy() const; double k1() const; double k2() const; - double u0() const; - double v0() const; + double px() const; + double py() const; Vector vector() const; Vector k() const; - //Matrix K() const; //FIXME: Uppercase + Matrix K() const; // enabling serialization functionality void serialize() const; @@ -1085,55 +1058,23 @@ class PinholeCamera { void serialize() const; }; +// Forward declaration of PinholeCameraCalX is defined here. #include -virtual class SimpleCamera { - // Standard Constructors and Named Constructors - SimpleCamera(); - SimpleCamera(const gtsam::Pose3& pose); - SimpleCamera(const gtsam::Pose3& pose, const gtsam::Cal3_S2& K); - static gtsam::SimpleCamera Level(const gtsam::Cal3_S2& K, const gtsam::Pose2& pose, double height); - static gtsam::SimpleCamera Level(const gtsam::Pose2& pose, double height); - static gtsam::SimpleCamera Lookat(const gtsam::Point3& eye, const gtsam::Point3& target, - const gtsam::Point3& upVector, const gtsam::Cal3_S2& K); - static gtsam::SimpleCamera Lookat(const gtsam::Point3& eye, const gtsam::Point3& target, - const gtsam::Point3& upVector); - - // Testable - void print(string s) const; - bool equals(const gtsam::SimpleCamera& camera, double tol) const; - - // Standard Interface - gtsam::Pose3 pose() const; - gtsam::Cal3_S2 calibration() const; - - // Manifold - gtsam::SimpleCamera retract(Vector d) const; - Vector localCoordinates(const gtsam::SimpleCamera& T2) const; - size_t dim() const; - static size_t Dim(); - - // Transformations and measurement functions - static gtsam::Point2 Project(const gtsam::Point3& cameraPoint); - pair projectSafe(const gtsam::Point3& pw) const; - gtsam::Point2 project(const gtsam::Point3& point); - gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const; - double range(const gtsam::Point3& point); - double range(const gtsam::Pose3& pose); - - // enabling serialization functionality - void serialize() const; - -}; - -gtsam::SimpleCamera simpleCamera(const Matrix& P); - // Some typedefs for common camera types // PinholeCameraCal3_S2 is the same as SimpleCamera above typedef gtsam::PinholeCamera PinholeCameraCal3_S2; -// due to lack of jacobians of Cal3DS2_Base::calibrate, PinholeCamera does not apply to Cal3DS2/Unified -//typedef gtsam::PinholeCamera PinholeCameraCal3DS2; -//typedef gtsam::PinholeCamera PinholeCameraCal3Unified; -//typedef gtsam::PinholeCamera PinholeCameraCal3Bundler; +typedef gtsam::PinholeCamera PinholeCameraCal3DS2; +typedef gtsam::PinholeCamera PinholeCameraCal3Unified; +typedef gtsam::PinholeCamera PinholeCameraCal3Bundler; + +template +class CameraSet { + CameraSet(); + + // structure specific methods + T at(size_t i) const; + void push_back(const T& cam); +}; #include class StereoCamera { @@ -1166,7 +1107,7 @@ class StereoCamera { #include -// Templates appear not yet supported for free functions +// Templates appear not yet supported for free functions - issue raised at borglab/wrap#14 to add support gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, gtsam::Cal3_S2* sharedCal, const gtsam::Point2Vector& measurements, double rank_tol, bool optimize); @@ -1176,7 +1117,13 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, gtsam::Cal3Bundler* sharedCal, const gtsam::Point2Vector& measurements, double rank_tol, bool optimize); - +gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3_S2& cameras, + const gtsam::Point2Vector& measurements, double rank_tol, + bool optimize); +gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Bundler& cameras, + const gtsam::Point2Vector& measurements, double rank_tol, + bool optimize); + //************************************************************************* // Symbolic //************************************************************************* @@ -1326,7 +1273,7 @@ class SymbolicBayesTree { // // const std::list& children() const { return children_; } // // derived_ptr parent() const { return parent_.lock(); } // -// // FIXME: need wrapped versions graphs, BayesNet +// // TODO: need wrapped versions graphs, BayesNet // // BayesNet shortcut(derived_ptr root, Eliminate function) const; // // FactorGraph marginal(derived_ptr root, Eliminate function) const; // // FactorGraph joint(derived_ptr C2, derived_ptr root, Eliminate function) const; @@ -1458,7 +1405,7 @@ virtual class Null: gtsam::noiseModel::mEstimator::Base { void serializable() const; double weight(double error) const; - double residual(double error) const; + double loss(double error) const; }; virtual class Fair: gtsam::noiseModel::mEstimator::Base { @@ -1469,7 +1416,7 @@ virtual class Fair: gtsam::noiseModel::mEstimator::Base { void serializable() const; double weight(double error) const; - double residual(double error) const; + double loss(double error) const; }; virtual class Huber: gtsam::noiseModel::mEstimator::Base { @@ -1480,7 +1427,7 @@ virtual class Huber: gtsam::noiseModel::mEstimator::Base { void serializable() const; double weight(double error) const; - double residual(double error) const; + double loss(double error) const; }; virtual class Cauchy: gtsam::noiseModel::mEstimator::Base { @@ -1491,7 +1438,7 @@ virtual class Cauchy: gtsam::noiseModel::mEstimator::Base { void serializable() const; double weight(double error) const; - double residual(double error) const; + double loss(double error) const; }; virtual class Tukey: gtsam::noiseModel::mEstimator::Base { @@ -1502,7 +1449,7 @@ virtual class Tukey: gtsam::noiseModel::mEstimator::Base { void serializable() const; double weight(double error) const; - double residual(double error) const; + double loss(double error) const; }; virtual class Welsch: gtsam::noiseModel::mEstimator::Base { @@ -1513,7 +1460,7 @@ virtual class Welsch: gtsam::noiseModel::mEstimator::Base { void serializable() const; double weight(double error) const; - double residual(double error) const; + double loss(double error) const; }; virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base { @@ -1524,7 +1471,7 @@ virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base { void serializable() const; double weight(double error) const; - double residual(double error) const; + double loss(double error) const; }; virtual class DCS: gtsam::noiseModel::mEstimator::Base { @@ -1535,7 +1482,7 @@ virtual class DCS: gtsam::noiseModel::mEstimator::Base { void serializable() const; double weight(double error) const; - double residual(double error) const; + double loss(double error) const; }; virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base { @@ -1546,7 +1493,7 @@ virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base { void serializable() const; double weight(double error) const; - double residual(double error) const; + double loss(double error) const; }; }///\namespace mEstimator @@ -1937,6 +1884,22 @@ virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParamete void print() const; }; +#include +virtual class PreconditionerParameters { + PreconditionerParameters(); +}; + +virtual class DummyPreconditionerParameters : gtsam::PreconditionerParameters { + DummyPreconditionerParameters(); +}; + +#include +virtual class PCGSolverParameters : gtsam::ConjugateGradientParameters { + PCGSolverParameters(); + void print(string s); + void setPreconditionerParams(gtsam::PreconditionerParameters* preconditioner); +}; + #include virtual class SubgraphSolverParameters : gtsam::ConjugateGradientParameters { SubgraphSolverParameters(); @@ -1973,6 +1936,21 @@ class KalmanFilter { //************************************************************************* #include + +class Symbol { + Symbol(); + Symbol(char c, uint64_t j); + Symbol(size_t key); + + size_t key() const; + void print(const string& s) const; + bool equals(const gtsam::Symbol& expected, double tol) const; + + char chr() const; + uint64_t index() const; + string string() const; +}; + size_t symbol(char chr, size_t index); char symbolChr(size_t key); size_t symbolIndex(size_t key); @@ -2079,7 +2057,7 @@ class NonlinearFactorGraph { gtsam::KeySet keys() const; gtsam::KeyVector keyVector() const; - template + template, gtsam::imuBias::ConstantBias}> void addPrior(size_t key, const T& prior, const gtsam::noiseModel::Base* noiseModel); // NonlinearFactorGraph @@ -2093,6 +2071,7 @@ class NonlinearFactorGraph { // enabling serialization functionality void serialize() const; + void saveGraph(const string& s) const; }; #include @@ -2109,7 +2088,7 @@ virtual class NonlinearFactor { bool active(const gtsam::Values& c) const; gtsam::GaussianFactor* linearize(const gtsam::Values& c) const; gtsam::NonlinearFactor* clone() const; - // gtsam::NonlinearFactor* rekey(const gtsam::KeyVector& newKeys) const; //FIXME: Conversion from KeyVector to std::vector does not happen + // gtsam::NonlinearFactor* rekey(const gtsam::KeyVector& newKeys) const; //TODO: Conversion from KeyVector to std::vector does not happen }; #include @@ -2154,7 +2133,11 @@ class Values { // void insert(size_t j, const gtsam::Value& value); // void update(size_t j, const gtsam::Value& val); // gtsam::Value at(size_t j) const; - + + // The order is important: Vector has to precede Point2/Point3 so `atVector` + // can work for those fixed-size vectors. + void insert(size_t j, Vector vector); + void insert(size_t j, Matrix matrix); void insert(size_t j, const gtsam::Point2& point2); void insert(size_t j, const gtsam::Point3& point3); void insert(size_t j, const gtsam::Rot2& rot2); @@ -2164,15 +2147,15 @@ class Values { void insert(size_t j, const gtsam::SOn& P); void insert(size_t j, const gtsam::Rot3& rot3); void insert(size_t j, const gtsam::Pose3& pose3); + void insert(size_t j, const gtsam::Unit3& unit3); void insert(size_t j, const gtsam::Cal3_S2& cal3_s2); void insert(size_t j, const gtsam::Cal3DS2& cal3ds2); void insert(size_t j, const gtsam::Cal3Bundler& cal3bundler); void insert(size_t j, const gtsam::EssentialMatrix& essential_matrix); void insert(size_t j, const gtsam::PinholeCameraCal3_S2& simple_camera); + void insert(size_t j, const gtsam::PinholeCamera& camera); void insert(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void insert(size_t j, const gtsam::NavState& nav_state); - void insert(size_t j, Vector vector); - void insert(size_t j, Matrix matrix); void update(size_t j, const gtsam::Point2& point2); void update(size_t j, const gtsam::Point3& point3); @@ -2183,16 +2166,19 @@ class Values { void update(size_t j, const gtsam::SOn& P); void update(size_t j, const gtsam::Rot3& rot3); void update(size_t j, const gtsam::Pose3& pose3); + void update(size_t j, const gtsam::Unit3& unit3); void update(size_t j, const gtsam::Cal3_S2& cal3_s2); void update(size_t j, const gtsam::Cal3DS2& cal3ds2); void update(size_t j, const gtsam::Cal3Bundler& cal3bundler); void update(size_t j, const gtsam::EssentialMatrix& essential_matrix); + void update(size_t j, const gtsam::PinholeCameraCal3_S2& simple_camera); + void update(size_t j, const gtsam::PinholeCamera& camera); void update(size_t j, const gtsam::imuBias::ConstantBias& constant_bias); void update(size_t j, const gtsam::NavState& nav_state); void update(size_t j, Vector vector); void update(size_t j, Matrix matrix); - template + template, gtsam::imuBias::ConstantBias, gtsam::NavState, Vector, Matrix}> T at(size_t j); /// version for double @@ -2352,6 +2338,7 @@ virtual class NonlinearOptimizer { double error() const; int iterations() const; gtsam::Values values() const; + gtsam::NonlinearFactorGraph graph() const; gtsam::GaussianFactorGraph* iterate() const; }; @@ -2468,6 +2455,8 @@ class ISAM2Result { size_t getVariablesRelinearized() const; size_t getVariablesReeliminated() const; size_t getCliques() const; + double getErrorBefore() const; + double getErrorAfter() const; }; class ISAM2 { @@ -2484,16 +2473,17 @@ class ISAM2 { gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta); gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::FactorIndices& removeFactorIndices); gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::FactorIndices& removeFactorIndices, const gtsam::KeyGroupMap& constrainedKeys); - // TODO: wrap the full version of update - //void update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::KeyVector& removeFactorIndices, FastMap& constrainedKeys); - //void update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::KeyVector& removeFactorIndices, FastMap& constrainedKeys, bool force_relinearize); + gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::FactorIndices& removeFactorIndices, gtsam::KeyGroupMap& constrainedKeys, const gtsam::KeyList& noRelinKeys); + gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::FactorIndices& removeFactorIndices, gtsam::KeyGroupMap& constrainedKeys, const gtsam::KeyList& noRelinKeys, const gtsam::KeyList& extraReelimKeys); + gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors, const gtsam::Values& newTheta, const gtsam::FactorIndices& removeFactorIndices, gtsam::KeyGroupMap& constrainedKeys, const gtsam::KeyList& noRelinKeys, const gtsam::KeyList& extraReelimKeys, bool force_relinearize); gtsam::Values getLinearizationPoint() const; gtsam::Values calculateEstimate() const; template + gtsam::PinholeCameraCal3_S2, gtsam::PinholeCamera, + Vector, Matrix}> VALUE calculateEstimate(size_t key) const; gtsam::Values calculateBestEstimate() const; Matrix marginalCovariance(size_t key) const; @@ -2526,12 +2516,11 @@ class NonlinearISAM { //************************************************************************* // Nonlinear factor types //************************************************************************* -#include #include #include #include -template +template}> virtual class PriorFactor : gtsam::NoiseModelFactor { PriorFactor(size_t key, const T& prior, const gtsam::noiseModel::Base* noiseModel); T prior() const; @@ -2551,10 +2540,12 @@ virtual class BetweenFactor : gtsam::NoiseModelFactor { void serialize() const; }; - - #include -template +template virtual class NonlinearEquality : gtsam::NoiseModelFactor { // Constructor - forces exact evaluation NonlinearEquality(size_t j, const T& feasible); @@ -2565,7 +2556,6 @@ virtual class NonlinearEquality : gtsam::NoiseModelFactor { void serialize() const; }; - #include template virtual class RangeFactor : gtsam::NoiseModelFactor { @@ -2609,6 +2599,7 @@ virtual class BearingFactor : gtsam::NoiseModelFactor { }; typedef gtsam::BearingFactor BearingFactor2D; +typedef gtsam::BearingFactor BearingFactor3D; typedef gtsam::BearingFactor BearingFactorPose2; #include @@ -2617,8 +2608,7 @@ class BearingRange { BearingRange(const BEARING& b, const RANGE& r); BEARING bearing() const; RANGE range() const; - // TODO(frank): can't class instance itself? - // static gtsam::BearingRange Measure(const POSE& pose, const POINT& point); + static This Measure(const POSE& pose, const POINT& point); static BEARING MeasureBearing(const POSE& pose, const POINT& point); static RANGE MeasureRange(const POSE& pose, const POINT& point); void print(string s) const; @@ -2674,10 +2664,10 @@ virtual class GeneralSFMFactor : gtsam::NoiseModelFactor { gtsam::Point2 measured() const; }; typedef gtsam::GeneralSFMFactor GeneralSFMFactorCal3_S2; -// due to lack of jacobians of Cal3DS2_Base::calibrate, GeneralSFMFactor does not apply to Cal3DS2 -//typedef gtsam::GeneralSFMFactor GeneralSFMFactorCal3DS2; +typedef gtsam::GeneralSFMFactor GeneralSFMFactorCal3DS2; +typedef gtsam::GeneralSFMFactor, gtsam::Point3> GeneralSFMFactorCal3Bundler; -template +template virtual class GeneralSFMFactor2 : gtsam::NoiseModelFactor { GeneralSFMFactor2(const gtsam::Point2& measured, const gtsam::noiseModel::Base* model, size_t poseKey, size_t landmarkKey, size_t calibKey); gtsam::Point2 measured() const; @@ -2762,44 +2752,94 @@ virtual class EssentialMatrixFactor : gtsam::NoiseModelFactor { }; #include + class SfmTrack { + SfmTrack(); + SfmTrack(const gtsam::Point3& pt); + const Point3& point3() const; + + double r; + double g; + double b; + // TODO Need to close wrap#10 to allow this to work. + // std::vector> measurements; + size_t number_measurements() const; pair measurement(size_t idx) const; pair siftIndex(size_t idx) const; + void add_measurement(size_t idx, const gtsam::Point2& m); + + // enabling serialization functionality + void serialize() const; + + // enabling function to compare objects + bool equals(const gtsam::SfmTrack& expected, double tol) const; }; class SfmData { + SfmData(); size_t number_cameras() const; size_t number_tracks() const; - //TODO(Varun) Need to fix issue #237 first before this can work - // gtsam::PinholeCamera camera(size_t idx) const; + gtsam::PinholeCamera camera(size_t idx) const; gtsam::SfmTrack track(size_t idx) const; + void add_track(const gtsam::SfmTrack& t) ; + void add_camera(const gtsam::SfmCamera& cam); + + // enabling serialization functionality + void serialize() const; + + // enabling function to compare objects + bool equals(const gtsam::SfmData& expected, double tol) const; }; -string findExampleDataFile(string name); +gtsam::SfmData readBal(string filename); +bool writeBAL(string filename, gtsam::SfmData& data); +gtsam::Values initialCamerasEstimate(const gtsam::SfmData& db); +gtsam::Values initialCamerasAndPointsEstimate(const gtsam::SfmData& db); + pair load2D(string filename, - gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise, bool smart); + gtsam::noiseModel::Diagonal* model, int maxIndex, bool addNoise, bool smart); pair load2D(string filename, - gtsam::noiseModel::Diagonal* model, int maxID, bool addNoise); + gtsam::noiseModel::Diagonal* model, int maxIndex, bool addNoise); pair load2D(string filename, - gtsam::noiseModel::Diagonal* model, int maxID); + gtsam::noiseModel::Diagonal* model, int maxIndex); pair load2D(string filename, gtsam::noiseModel::Diagonal* model); pair load2D(string filename); pair load2D_robust(string filename, - gtsam::noiseModel::Base* model); + gtsam::noiseModel::Base* model, int maxIndex); void save2D(const gtsam::NonlinearFactorGraph& graph, const gtsam::Values& config, gtsam::noiseModel::Diagonal* model, string filename); +// std::vector::shared_ptr> +// Ignored by pybind -> will be List[BetweenFactorPose2] +class BetweenFactorPose2s +{ + BetweenFactorPose2s(); + size_t size() const; + gtsam::BetweenFactor* at(size_t i) const; + void push_back(const gtsam::BetweenFactor* factor); +}; +gtsam::BetweenFactorPose2s parse2DFactors(string filename); + // std::vector::shared_ptr> +// Ignored by pybind -> will be List[BetweenFactorPose3] class BetweenFactorPose3s { BetweenFactorPose3s(); size_t size() const; - gtsam::BetweenFactorPose3* at(size_t i) const; - void push_back(const gtsam::BetweenFactorPose3* factor); + gtsam::BetweenFactor* at(size_t i) const; + void push_back(const gtsam::BetweenFactor* factor); }; +gtsam::BetweenFactorPose3s parse3DFactors(string filename); + +pair load3D(string filename); + +pair readG2o(string filename); +pair readG2o(string filename, bool is3D); +void writeG2o(const gtsam::NonlinearFactorGraph& graph, + const gtsam::Values& estimate, string filename); #include class InitializePose3 { @@ -2821,20 +2861,214 @@ class InitializePose3 { static gtsam::Values initialize(const gtsam::NonlinearFactorGraph& graph); }; -gtsam::BetweenFactorPose3s parse3DFactors(string filename); -pair load3D(string filename); - -pair readG2o(string filename); -pair readG2o(string filename, bool is3D); -void writeG2o(const gtsam::NonlinearFactorGraph& graph, - const gtsam::Values& estimate, string filename); - #include template virtual class KarcherMeanFactor : gtsam::NonlinearFactor { KarcherMeanFactor(const gtsam::KeyVector& keys); }; +#include +gtsam::noiseModel::Isotropic* ConvertNoiseModel( + gtsam::noiseModel::Base* model, size_t d); + +template +virtual class FrobeniusFactor : gtsam::NoiseModelFactor { + FrobeniusFactor(size_t key1, size_t key2); + FrobeniusFactor(size_t key1, size_t key2, gtsam::noiseModel::Base* model); + + Vector evaluateError(const T& R1, const T& R2); +}; + +template +virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor { + FrobeniusBetweenFactor(size_t key1, size_t key2, const T& R12); + FrobeniusBetweenFactor(size_t key1, size_t key2, const T& R12, gtsam::noiseModel::Base* model); + + Vector evaluateError(const T& R1, const T& R2); +}; + +#include + +virtual class ShonanFactor3 : gtsam::NoiseModelFactor { + ShonanFactor3(size_t key1, size_t key2, const gtsam::Rot3 &R12, + size_t p); + ShonanFactor3(size_t key1, size_t key2, const gtsam::Rot3 &R12, + size_t p, gtsam::noiseModel::Base *model); + Vector evaluateError(const gtsam::SOn &Q1, const gtsam::SOn &Q2); +}; + +#include +template +class BinaryMeasurement { + BinaryMeasurement(size_t key1, size_t key2, const T& measured, + const gtsam::noiseModel::Base* model); + size_t key1() const; + size_t key2() const; + T measured() const; + gtsam::noiseModel::Base* noiseModel() const; +}; + +typedef gtsam::BinaryMeasurement BinaryMeasurementUnit3; +typedef gtsam::BinaryMeasurement BinaryMeasurementRot3; + +class BinaryMeasurementsUnit3 { + BinaryMeasurementsUnit3(); + size_t size() const; + gtsam::BinaryMeasurement at(size_t idx) const; + void push_back(const gtsam::BinaryMeasurement& measurement); +}; + +#include + +// TODO(frank): copy/pasta below until we have integer template paremeters in wrap! + +class ShonanAveragingParameters2 { + ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters2(const gtsam::LevenbergMarquardtParams& lm, string method); + gtsam::LevenbergMarquardtParams getLMParams() const; + void setOptimalityThreshold(double value); + double getOptimalityThreshold() const; + void setAnchor(size_t index, const gtsam::Rot2& value); + pair getAnchor(); + void setAnchorWeight(double value); + double getAnchorWeight() const; + void setKarcherWeight(double value); + double getKarcherWeight(); + void setGaugesWeight(double value); + double getGaugesWeight(); +}; + +class ShonanAveragingParameters3 { + ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm); + ShonanAveragingParameters3(const gtsam::LevenbergMarquardtParams& lm, string method); + gtsam::LevenbergMarquardtParams getLMParams() const; + void setOptimalityThreshold(double value); + double getOptimalityThreshold() const; + void setAnchor(size_t index, const gtsam::Rot3& value); + pair getAnchor(); + void setAnchorWeight(double value); + double getAnchorWeight() const; + void setKarcherWeight(double value); + double getKarcherWeight(); + void setGaugesWeight(double value); + double getGaugesWeight(); +}; + +class ShonanAveraging2 { + ShonanAveraging2(string g2oFile); + ShonanAveraging2(string g2oFile, + const gtsam::ShonanAveragingParameters2 ¶meters); + + // Query properties + size_t nrUnknowns() const; + size_t nrMeasurements() const; + gtsam::Rot2 measured(size_t i); + gtsam::KeyVector keys(size_t i); + + // Matrix API (advanced use, debugging) + Matrix denseD() const; + Matrix denseQ() const; + Matrix denseL() const; + // Matrix computeLambda_(Matrix S) const; + Matrix computeLambda_(const gtsam::Values& values) const; + Matrix computeA_(const gtsam::Values& values) const; + double computeMinEigenValue(const gtsam::Values& values) const; + gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values, + const Vector& minEigenVector, double minEigenValue) const; + + // Advanced API + gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const; + gtsam::Values initializeRandomlyAt(size_t p) const; + double costAt(size_t p, const gtsam::Values& values) const; + pair computeMinEigenVector(const gtsam::Values& values) const; + bool checkOptimality(const gtsam::Values& values) const; + gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial); + // gtsam::Values tryOptimizingAt(size_t p) const; + gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const; + gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const; + gtsam::Values roundSolution(const gtsam::Values& values) const; + + // Basic API + double cost(const gtsam::Values& values) const; + gtsam::Values initializeRandomly() const; + pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; +}; + +class ShonanAveraging3 { + ShonanAveraging3(string g2oFile); + ShonanAveraging3(string g2oFile, + const gtsam::ShonanAveragingParameters3 ¶meters); + + // TODO(frank): deprecate once we land pybind wrapper + ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors); + ShonanAveraging3(const gtsam::BetweenFactorPose3s &factors, + const gtsam::ShonanAveragingParameters3 ¶meters); + + // Query properties + size_t nrUnknowns() const; + size_t nrMeasurements() const; + gtsam::Rot3 measured(size_t i); + gtsam::KeyVector keys(size_t i); + + // Matrix API (advanced use, debugging) + Matrix denseD() const; + Matrix denseQ() const; + Matrix denseL() const; + // Matrix computeLambda_(Matrix S) const; + Matrix computeLambda_(const gtsam::Values& values) const; + Matrix computeA_(const gtsam::Values& values) const; + double computeMinEigenValue(const gtsam::Values& values) const; + gtsam::Values initializeWithDescent(size_t p, const gtsam::Values& values, + const Vector& minEigenVector, double minEigenValue) const; + + // Advanced API + gtsam::NonlinearFactorGraph buildGraphAt(size_t p) const; + gtsam::Values initializeRandomlyAt(size_t p) const; + double costAt(size_t p, const gtsam::Values& values) const; + pair computeMinEigenVector(const gtsam::Values& values) const; + bool checkOptimality(const gtsam::Values& values) const; + gtsam::LevenbergMarquardtOptimizer* createOptimizerAt(size_t p, const gtsam::Values& initial); + // gtsam::Values tryOptimizingAt(size_t p) const; + gtsam::Values tryOptimizingAt(size_t p, const gtsam::Values& initial) const; + gtsam::Values projectFrom(size_t p, const gtsam::Values& values) const; + gtsam::Values roundSolution(const gtsam::Values& values) const; + + // Basic API + double cost(const gtsam::Values& values) const; + gtsam::Values initializeRandomly() const; + pair run(const gtsam::Values& initial, size_t min_p, size_t max_p) const; +}; + +#include + +class KeyPairDoubleMap { + KeyPairDoubleMap(); + KeyPairDoubleMap(const gtsam::KeyPairDoubleMap& other); + + size_t size() const; + bool empty() const; + void clear(); + size_t at(const pair& keypair) const; +}; + +class MFAS { + MFAS(const gtsam::BinaryMeasurementsUnit3& relativeTranslations, + const gtsam::Unit3& projectionDirection); + + gtsam::KeyPairDoubleMap computeOutlierWeights() const; + gtsam::KeyVector computeOrdering() const; +}; + +#include +class TranslationRecovery { + TranslationRecovery(const gtsam::BinaryMeasurementsUnit3 &relativeTranslations, + const gtsam::LevenbergMarquardtParams &lmParams); + TranslationRecovery( + const gtsam::BinaryMeasurementsUnit3 & relativeTranslations); // default LevenbergMarquardtParams + gtsam::Values run(const double scale) const; + gtsam::Values run() const; // default scale = 1.0 +}; + //************************************************************************* // Navigation //************************************************************************* @@ -2951,10 +3185,12 @@ class PreintegratedImuMeasurements { void resetIntegrationAndSetBias(const gtsam::imuBias::ConstantBias& biasHat); Matrix preintMeasCov() const; + Vector preintegrated() const; double deltaTij() const; gtsam::Rot3 deltaRij() const; Vector deltaPij() const; Vector deltaVij() const; + gtsam::imuBias::ConstantBias biasHat() const; Vector biasHatVector() const; gtsam::NavState predict(const gtsam::NavState& state_i, const gtsam::imuBias::ConstantBias& bias) const; @@ -3016,6 +3252,7 @@ class PreintegratedCombinedMeasurements { gtsam::Rot3 deltaRij() const; Vector deltaPij() const; Vector deltaVij() const; + gtsam::imuBias::ConstantBias biasHat() const; Vector biasHatVector() const; gtsam::NavState predict(const gtsam::NavState& state_i, const gtsam::imuBias::ConstantBias& bias) const; @@ -3187,6 +3424,7 @@ namespace utilities { gtsam::KeySet createKeySet(string s, Vector I); Matrix extractPoint2(const gtsam::Values& values); Matrix extractPoint3(const gtsam::Values& values); + gtsam::Values allPose2s(gtsam::Values& values); Matrix extractPose2(const gtsam::Values& values); gtsam::Values allPose3s(gtsam::Values& values); Matrix extractPose3(const gtsam::Values& values); diff --git a/gtsam/inference/BayesNet.h b/gtsam/inference/BayesNet.h index a69fb9b8c..0597ece98 100644 --- a/gtsam/inference/BayesNet.h +++ b/gtsam/inference/BayesNet.h @@ -69,4 +69,6 @@ namespace gtsam { void saveGraph(const std::string &s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; }; -} \ No newline at end of file +} + +#include diff --git a/gtsam/inference/BayesTree.h b/gtsam/inference/BayesTree.h index 9d632ff06..7199da0ad 100644 --- a/gtsam/inference/BayesTree.h +++ b/gtsam/inference/BayesTree.h @@ -249,25 +249,6 @@ namespace gtsam { // Friend JunctionTree because it directly fills roots and nodes index. template friend class EliminatableClusterTree; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - public: - /// @name Deprecated - /// @{ - void removePath(sharedClique clique, BayesNetType& bn, Cliques& orphans) { - removePath(clique, &bn, &orphans); - } - void removeTop(const KeyVector& keys, BayesNetType& bn, Cliques& orphans) { - removeTop(keys, &bn, &orphans); - } - void getCliqueData(BayesTreeCliqueData& stats, sharedClique clique) const { - getCliqueData(clique, &stats); - } - void addFactorsToGraph(FactorGraph& graph) const{ - addFactorsToGraph(& graph); - } - /// @} -#endif - private: /** Serialization function */ friend class boost::serialization::access; @@ -299,7 +280,7 @@ namespace gtsam { this->keys_.assign(clique->conditional()->beginParents(), clique->conditional()->endParents()); } - void print(const std::string& s="", const KeyFormatter& formatter = DefaultKeyFormatter) const { + void print(const std::string& s="", const KeyFormatter& formatter = DefaultKeyFormatter) const override { clique->print(s + "stored clique", formatter); } }; diff --git a/gtsam/inference/BayesTreeCliqueBase-inst.h b/gtsam/inference/BayesTreeCliqueBase-inst.h index e762786f5..dfcc7c318 100644 --- a/gtsam/inference/BayesTreeCliqueBase-inst.h +++ b/gtsam/inference/BayesTreeCliqueBase-inst.h @@ -91,6 +91,7 @@ namespace gtsam { template size_t BayesTreeCliqueBase::numCachedSeparatorMarginals() const { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); if (!cachedSeparatorMarginal_) return 0; @@ -136,57 +137,62 @@ namespace gtsam { } } - /* ************************************************************************* */ + /* *********************************************************************** */ // separator marginal, uses separator marginal of parent recursively // P(C) = P(F|S) P(S) - /* ************************************************************************* */ - template + /* *********************************************************************** */ + template typename BayesTreeCliqueBase::FactorGraphType - BayesTreeCliqueBase::separatorMarginal(Eliminate function) const - { + BayesTreeCliqueBase::separatorMarginal( + Eliminate function) const { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); gttic(BayesTreeCliqueBase_separatorMarginal); // Check if the Separator marginal was already calculated - if (!cachedSeparatorMarginal_) - { + if (!cachedSeparatorMarginal_) { gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss); + // If this is the root, there is no separator - if (parent_.expired() /*(if we're the root)*/) - { + if (parent_.expired() /*(if we're the root)*/) { // we are root, return empty FactorGraphType empty; cachedSeparatorMarginal_ = empty; - } - else - { + } else { + // Flatten recursion in timing outline + gttoc(BayesTreeCliqueBase_separatorMarginal_cachemiss); + gttoc(BayesTreeCliqueBase_separatorMarginal); + // Obtain P(S) = \int P(Cp) = \int P(Fp|Sp) P(Sp) // initialize P(Cp) with the parent separator marginal derived_ptr parent(parent_.lock()); - gttoc(BayesTreeCliqueBase_separatorMarginal_cachemiss); // Flatten recursion in timing outline - gttoc(BayesTreeCliqueBase_separatorMarginal); - FactorGraphType p_Cp(parent->separatorMarginal(function)); // P(Sp) + FactorGraphType p_Cp(parent->separatorMarginal(function)); // P(Sp) + gttic(BayesTreeCliqueBase_separatorMarginal); gttic(BayesTreeCliqueBase_separatorMarginal_cachemiss); + // now add the parent conditional - p_Cp += parent->conditional_; // P(Fp|Sp) + p_Cp += parent->conditional_; // P(Fp|Sp) // The variables we want to keepSet are exactly the ones in S - KeyVector indicesS(this->conditional()->beginParents(), this->conditional()->endParents()); - cachedSeparatorMarginal_ = *p_Cp.marginalMultifrontalBayesNet(Ordering(indicesS), function); + KeyVector indicesS(this->conditional()->beginParents(), + this->conditional()->endParents()); + auto separatorMarginal = + p_Cp.marginalMultifrontalBayesNet(Ordering(indicesS), function); + cachedSeparatorMarginal_.reset(*separatorMarginal); } } // return the shortcut P(S||B) - return *cachedSeparatorMarginal_; // return the cached version + return *cachedSeparatorMarginal_; // return the cached version } - /* ************************************************************************* */ - // marginal2, uses separator marginal of parent recursively + /* *********************************************************************** */ + // marginal2, uses separator marginal of parent // P(C) = P(F|S) P(S) - /* ************************************************************************* */ - template + /* *********************************************************************** */ + template typename BayesTreeCliqueBase::FactorGraphType - BayesTreeCliqueBase::marginal2(Eliminate function) const - { + BayesTreeCliqueBase::marginal2( + Eliminate function) const { gttic(BayesTreeCliqueBase_marginal2); // initialize with separator marginal P(S) FactorGraphType p_C = this->separatorMarginal(function); @@ -202,6 +208,8 @@ namespace gtsam { // When a shortcut is requested, all of the shortcuts between it and the // root are also generated. So, if this clique's cached shortcut is set, // recursively call over all child cliques. Otherwise, it is unnecessary. + + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); if (cachedSeparatorMarginal_) { for(derived_ptr& child: children) { child->deleteCachedShortcuts(); diff --git a/gtsam/inference/BayesTreeCliqueBase.h b/gtsam/inference/BayesTreeCliqueBase.h index 6266e961c..7b79ccf68 100644 --- a/gtsam/inference/BayesTreeCliqueBase.h +++ b/gtsam/inference/BayesTreeCliqueBase.h @@ -24,6 +24,7 @@ #include #include +#include namespace gtsam { @@ -75,10 +76,28 @@ namespace gtsam { /** Construct from a conditional, leaving parent and child pointers uninitialized */ BayesTreeCliqueBase(const sharedConditional& conditional) : conditional_(conditional), problemSize_(1) {} + /** Shallow copy constructor */ + BayesTreeCliqueBase(const BayesTreeCliqueBase& c) : conditional_(c.conditional_), parent_(c.parent_), children(c.children), problemSize_(c.problemSize_), is_root(c.is_root) {} + + /** Shallow copy assignment constructor */ + BayesTreeCliqueBase& operator=(const BayesTreeCliqueBase& c) { + conditional_ = c.conditional_; + parent_ = c.parent_; + children = c.children; + problemSize_ = c.problemSize_; + is_root = c.is_root; + return *this; + } + /// @} - /// This stores the Cached separator margnal P(S) + /// This stores the Cached separator marginal P(S) mutable boost::optional cachedSeparatorMarginal_; + /// This protects Cached seperator marginal P(S) from concurrent read/writes + /// as many the functions which access it are const (hence the mutable) + /// leading to the false impression that these const functions are thread-safe + /// which is not true due to these mutable values. This is fixed by applying this mutex. + mutable std::mutex cachedSeparatorMarginalMutex_; public: sharedConditional conditional_; @@ -100,7 +119,7 @@ namespace gtsam { bool equals(const DERIVED& other, double tol = 1e-9) const; /** print this node */ - void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + virtual void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; /// @} /// @name Standard Interface @@ -144,7 +163,9 @@ namespace gtsam { void deleteCachedShortcuts(); const boost::optional& cachedSeparatorMarginal() const { - return cachedSeparatorMarginal_; } + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); + return cachedSeparatorMarginal_; + } friend class BayesTree; @@ -159,7 +180,10 @@ namespace gtsam { KeyVector shortcut_indices(const derived_ptr& B, const FactorGraphType& p_Cp_B) const; /** Non-recursive delete cached shortcuts and marginals - internal only. */ - void deleteCachedShortcutsNonRecursive() { cachedSeparatorMarginal_ = boost::none; } + void deleteCachedShortcutsNonRecursive() { + std::lock_guard marginalLock(cachedSeparatorMarginalMutex_); + cachedSeparatorMarginal_ = boost::none; + } private: diff --git a/gtsam/inference/Conditional.h b/gtsam/inference/Conditional.h index 1d486030c..295122879 100644 --- a/gtsam/inference/Conditional.h +++ b/gtsam/inference/Conditional.h @@ -65,6 +65,8 @@ namespace gtsam { Conditional(size_t nrFrontals) : nrFrontals_(nrFrontals) {} /// @} + + public: /// @name Testable /// @{ @@ -76,7 +78,6 @@ namespace gtsam { /// @} - public: /// @name Standard Interface /// @{ diff --git a/gtsam/inference/ISAM.h b/gtsam/inference/ISAM.h index fe6763a13..b4a5db740 100644 --- a/gtsam/inference/ISAM.h +++ b/gtsam/inference/ISAM.h @@ -72,17 +72,6 @@ class ISAM : public BAYESTREE { const Eliminate& function = EliminationTraitsType::DefaultEliminate); /// @} - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - void update_internal( - const FactorGraphType& newFactors, Cliques& orphans, - const Eliminate& function = EliminationTraitsType::DefaultEliminate) { - updateInternal(newFactors, &orphans, function); - } - /// @} -#endif }; } // namespace gtsam diff --git a/gtsam/inference/Symbol.h b/gtsam/inference/Symbol.h index 5be195db3..d5699e7fe 100644 --- a/gtsam/inference/Symbol.h +++ b/gtsam/inference/Symbol.h @@ -80,6 +80,9 @@ class GTSAM_EXPORT Symbol { /** Create a string from the key */ operator std::string() const; + /// Return string representation of the key + std::string string() const { return std::string(*this); }; + /** Comparison for use in maps */ bool operator<(const Symbol& comp) const { return c_ < comp.c_ || (comp.c_ == c_ && j_ < comp.j_); @@ -164,8 +167,17 @@ inline Key Y(std::uint64_t j) { return Symbol('y', j); } inline Key Z(std::uint64_t j) { return Symbol('z', j); } } +/** Generates symbol shorthands with alternative names different than the + * one-letter predefined ones. */ +class SymbolGenerator { + const unsigned char c_; +public: + constexpr SymbolGenerator(const unsigned char c) : c_(c) {} + Symbol operator()(const std::uint64_t j) const { return Symbol(c_, j); } + constexpr unsigned char chr() const { return c_; } +}; + /// traits template<> struct traits : public Testable {}; } // \ namespace gtsam - diff --git a/gtsam/inference/inferenceExceptions.h b/gtsam/inference/inferenceExceptions.h index edd0e0aa5..4409b16c7 100644 --- a/gtsam/inference/inferenceExceptions.h +++ b/gtsam/inference/inferenceExceptions.h @@ -28,9 +28,9 @@ namespace gtsam { * with an ordering that does not include all of the variables. */ class InconsistentEliminationRequested : public std::exception { public: - InconsistentEliminationRequested() throw() {} - virtual ~InconsistentEliminationRequested() throw() {} - virtual const char* what() const throw() { + InconsistentEliminationRequested() noexcept {} + virtual ~InconsistentEliminationRequested() noexcept {} + const char* what() const noexcept override { return "An inference algorithm was called with inconsistent arguments. The\n" "factor graph, ordering, or variable index were inconsistent with each\n" diff --git a/gtsam/inference/tests/testKey.cpp b/gtsam/inference/tests/testKey.cpp index a60258581..98c5d36bf 100644 --- a/gtsam/inference/tests/testKey.cpp +++ b/gtsam/inference/tests/testKey.cpp @@ -40,6 +40,31 @@ TEST(Key, KeySymbolConversion) { EXPECT(assert_equal(original, actual)) } +/* ************************************************************************* */ +TEST(Key, SymbolGenerator) { + const auto x1 = gtsam::symbol_shorthand::X(1); + const auto v1 = gtsam::symbol_shorthand::V(1); + const auto a1 = gtsam::symbol_shorthand::A(1); + + const auto Z = gtsam::SymbolGenerator('x'); + const auto DZ = gtsam::SymbolGenerator('v'); + const auto DDZ = gtsam::SymbolGenerator('a'); + + const auto z1 = Z(1); + const auto dz1 = DZ(1); + const auto ddz1 = DDZ(1); + + EXPECT(assert_equal(x1, z1)); + EXPECT(assert_equal(v1, dz1)); + EXPECT(assert_equal(a1, ddz1)); +} + +/* ************************************************************************* */ +TEST(Key, SymbolGeneratorConstexpr) { + constexpr auto Z = gtsam::SymbolGenerator('x'); + EXPECT(assert_equal(Z.chr(), 'x')); +} + /* ************************************************************************* */ template Key KeyTestValue(); @@ -106,4 +131,3 @@ int main() { return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ - diff --git a/gtsam/linear/BinaryJacobianFactor.h b/gtsam/linear/BinaryJacobianFactor.h index 343548613..8b53c34dd 100644 --- a/gtsam/linear/BinaryJacobianFactor.h +++ b/gtsam/linear/BinaryJacobianFactor.h @@ -49,7 +49,7 @@ struct BinaryJacobianFactor: JacobianFactor { // Fixed-size matrix update void updateHessian(const KeyVector& infoKeys, - SymmetricBlockMatrix* info) const { + SymmetricBlockMatrix* info) const override { gttic(updateHessian_BinaryJacobianFactor); // Whiten the factor if it has a noise model const SharedDiagonal& model = get_model(); diff --git a/gtsam/linear/ConjugateGradientSolver.h b/gtsam/linear/ConjugateGradientSolver.h index 2596a7403..c92390491 100644 --- a/gtsam/linear/ConjugateGradientSolver.h +++ b/gtsam/linear/ConjugateGradientSolver.h @@ -80,7 +80,7 @@ class GTSAM_EXPORT ConjugateGradientParameters : public IterativeOptimizationPar void print() const { Base::print(); } - virtual void print(std::ostream &os) const; + void print(std::ostream &os) const override; static std::string blasTranslator(const BLASKernel k) ; static BLASKernel blasTranslator(const std::string &s) ; diff --git a/gtsam/linear/GaussianBayesTree.h b/gtsam/linear/GaussianBayesTree.h index b6f5acd52..e2d8ae87a 100644 --- a/gtsam/linear/GaussianBayesTree.h +++ b/gtsam/linear/GaussianBayesTree.h @@ -41,6 +41,7 @@ namespace gtsam { typedef boost::shared_ptr shared_ptr; typedef boost::weak_ptr weak_ptr; GaussianBayesTreeClique() {} + virtual ~GaussianBayesTreeClique() {} GaussianBayesTreeClique(const boost::shared_ptr& conditional) : Base(conditional) {} }; diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h index 8b41a4def..0ea597f99 100644 --- a/gtsam/linear/GaussianConditional.h +++ b/gtsam/linear/GaussianConditional.h @@ -88,10 +88,10 @@ namespace gtsam { /** print */ void print(const std::string& = "GaussianConditional", - const KeyFormatter& formatter = DefaultKeyFormatter) const; + const KeyFormatter& formatter = DefaultKeyFormatter) const override; /** equals function */ - bool equals(const GaussianFactor&cg, double tol = 1e-9) const; + bool equals(const GaussianFactor&cg, double tol = 1e-9) const override; /** Return a view of the upper-triangular R block of the conditional */ constABlock R() const { return Ab_.range(0, nrFrontals()); } @@ -128,17 +128,9 @@ namespace gtsam { /** Scale the values in \c gy according to the sigmas for the frontal variables in this * conditional. */ void scaleFrontalsBySigma(VectorValues& gy) const; -// __declspec(deprecated) void scaleFrontalsBySigma(VectorValues& gy) const; // FIXME: depreciated flag doesn't appear to exist? - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - constABlock get_R() const { return R(); } - constABlock get_S() const { return S(); } - constABlock get_S(const_iterator it) const { return S(it); } - const constBVector get_d() const { return d(); } - /// @} -#endif + + // FIXME: deprecated flag doesn't appear to exist? + // __declspec(deprecated) void scaleFrontalsBySigma(VectorValues& gy) const; private: /** Serialization function */ diff --git a/gtsam/linear/GaussianDensity.h b/gtsam/linear/GaussianDensity.h index 03ffe9326..71af704ab 100644 --- a/gtsam/linear/GaussianDensity.h +++ b/gtsam/linear/GaussianDensity.h @@ -57,7 +57,7 @@ namespace gtsam { /// print void print(const std::string& = "GaussianDensity", - const KeyFormatter& formatter = DefaultKeyFormatter) const; + const KeyFormatter& formatter = DefaultKeyFormatter) const override; /// Mean \f$ \mu = R^{-1} d \f$ Vector mean() const; diff --git a/gtsam/linear/GaussianISAM.h b/gtsam/linear/GaussianISAM.h index 55208d4d1..b77b26240 100644 --- a/gtsam/linear/GaussianISAM.h +++ b/gtsam/linear/GaussianISAM.h @@ -20,6 +20,7 @@ #include #include +#include namespace gtsam { @@ -43,4 +44,8 @@ namespace gtsam { }; + /// traits + template <> + struct traits : public Testable {}; + } diff --git a/gtsam/linear/HessianFactor.h b/gtsam/linear/HessianFactor.h index 64b764087..a4de46104 100644 --- a/gtsam/linear/HessianFactor.h +++ b/gtsam/linear/HessianFactor.h @@ -354,16 +354,7 @@ namespace gtsam { /// Solve the system A'*A delta = A'*b in-place, return delta as VectorValues VectorValues solve(); - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - const SymmetricBlockMatrix& matrixObject() const { return info_; } - /// @} -#endif - - private: - + private: /// Allocate for given scatter pattern void Allocate(const Scatter& scatter); diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index 09a9a6103..2e0ffb034 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -421,21 +421,11 @@ JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph, /* ************************************************************************* */ void JacobianFactor::print(const string& s, const KeyFormatter& formatter) const { - static const Eigen::IOFormat matlab( - Eigen::StreamPrecision, // precision - 0, // flags - " ", // coeffSeparator - ";\n", // rowSeparator - "\t", // rowPrefix - "", // rowSuffix - "[\n", // matPrefix - "\n ]" // matSuffix - ); if (!s.empty()) cout << s << "\n"; for (const_iterator key = begin(); key != end(); ++key) { cout << boost::format(" A[%1%] = ") % formatter(*key); - cout << getA(key).format(matlab) << endl; + cout << getA(key).format(matlabFormat()) << endl; } cout << formatMatrixIndented(" b = ", getb(), true) << "\n"; if (model_) @@ -515,7 +505,7 @@ Vector JacobianFactor::error_vector(const VectorValues& c) const { double JacobianFactor::error(const VectorValues& c) const { Vector e = unweighted_error(c); // Use the noise model distance function to get the correct error if available. - if (model_) return 0.5 * model_->distance(e); + if (model_) return 0.5 * model_->squaredMahalanobisDistance(e); return 0.5 * e.dot(e); } diff --git a/gtsam/linear/LossFunctions.cpp b/gtsam/linear/LossFunctions.cpp index de2a5142d..bf799a2ba 100644 --- a/gtsam/linear/LossFunctions.cpp +++ b/gtsam/linear/LossFunctions.cpp @@ -137,12 +137,12 @@ Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) { } } -double Fair::weight(double error) const { - return 1.0 / (1.0 + std::abs(error) / c_); +double Fair::weight(double distance) const { + return 1.0 / (1.0 + std::abs(distance) / c_); } -double Fair::residual(double error) const { - const double absError = std::abs(error); +double Fair::loss(double distance) const { + const double absError = std::abs(distance); const double normalizedError = absError / c_; const double c_2 = c_ * c_; return c_2 * (normalizedError - std::log1p(normalizedError)); @@ -170,15 +170,15 @@ Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) { } } -double Huber::weight(double error) const { - const double absError = std::abs(error); +double Huber::weight(double distance) const { + const double absError = std::abs(distance); return (absError <= k_) ? (1.0) : (k_ / absError); } -double Huber::residual(double error) const { - const double absError = std::abs(error); +double Huber::loss(double distance) const { + const double absError = std::abs(distance); if (absError <= k_) { // |x| <= k - return error*error / 2; + return distance*distance / 2; } else { // |x| > k return k_ * (absError - (k_/2)); } @@ -208,12 +208,12 @@ Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k), } } -double Cauchy::weight(double error) const { - return ksquared_ / (ksquared_ + error*error); +double Cauchy::weight(double distance) const { + return ksquared_ / (ksquared_ + distance*distance); } -double Cauchy::residual(double error) const { - const double val = std::log1p(error * error / ksquared_); +double Cauchy::loss(double distance) const { + const double val = std::log1p(distance * distance / ksquared_); return ksquared_ * val * 0.5; } @@ -241,18 +241,18 @@ Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), c } } -double Tukey::weight(double error) const { - if (std::abs(error) <= c_) { - const double one_minus_xc2 = 1.0 - error*error/csquared_; +double Tukey::weight(double distance) const { + if (std::abs(distance) <= c_) { + const double one_minus_xc2 = 1.0 - distance*distance/csquared_; return one_minus_xc2 * one_minus_xc2; } return 0.0; } -double Tukey::residual(double error) const { - double absError = std::abs(error); +double Tukey::loss(double distance) const { + double absError = std::abs(distance); if (absError <= c_) { - const double one_minus_xc2 = 1.0 - error*error/csquared_; + const double one_minus_xc2 = 1.0 - distance*distance/csquared_; const double t = one_minus_xc2*one_minus_xc2*one_minus_xc2; return csquared_ * (1 - t) / 6.0; } else { @@ -280,13 +280,13 @@ Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) { Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {} -double Welsch::weight(double error) const { - const double xc2 = (error*error)/csquared_; +double Welsch::weight(double distance) const { + const double xc2 = (distance*distance)/csquared_; return std::exp(-xc2); } -double Welsch::residual(double error) const { - const double xc2 = (error*error)/csquared_; +double Welsch::loss(double distance) const { + const double xc2 = (distance*distance)/csquared_; return csquared_ * 0.5 * -std::expm1(-xc2); } @@ -311,16 +311,16 @@ GemanMcClure::GemanMcClure(double c, const ReweightScheme reweight) : Base(reweight), c_(c) { } -double GemanMcClure::weight(double error) const { +double GemanMcClure::weight(double distance) const { const double c2 = c_*c_; const double c4 = c2*c2; - const double c2error = c2 + error*error; + const double c2error = c2 + distance*distance; return c4/(c2error*c2error); } -double GemanMcClure::residual(double error) const { +double GemanMcClure::loss(double distance) const { const double c2 = c_*c_; - const double error2 = error*error; + const double error2 = distance*distance; return 0.5 * (c2 * error2) / (c2 + error2); } @@ -345,8 +345,8 @@ DCS::DCS(double c, const ReweightScheme reweight) : Base(reweight), c_(c) { } -double DCS::weight(double error) const { - const double e2 = error*error; +double DCS::weight(double distance) const { + const double e2 = distance*distance; if (e2 > c_) { const double w = 2.0*c_/(c_ + e2); @@ -356,10 +356,10 @@ double DCS::weight(double error) const { return 1.0; } -double DCS::residual(double error) const { +double DCS::loss(double distance) const { // This is the simplified version of Eq 9 from (Agarwal13icra) // after you simplify and cancel terms. - const double e2 = error*error; + const double e2 = distance*distance; const double e4 = e2*e2; const double c2 = c_*c_; @@ -391,17 +391,17 @@ L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight) } } -double L2WithDeadZone::weight(double error) const { +double L2WithDeadZone::weight(double distance) const { // note that this code is slightly uglier than residual, because there are three distinct // cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two // cases (deadzone, non-deadzone) in residual. - if (std::abs(error) <= k_) return 0.0; - else if (error > k_) return (-k_+error)/error; - else return (k_+error)/error; + if (std::abs(distance) <= k_) return 0.0; + else if (distance > k_) return (-k_+distance)/distance; + else return (k_+distance)/distance; } -double L2WithDeadZone::residual(double error) const { - const double abs_error = std::abs(error); +double L2WithDeadZone::loss(double distance) const { + const double abs_error = std::abs(distance); return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error); } diff --git a/gtsam/linear/LossFunctions.h b/gtsam/linear/LossFunctions.h index 1f7cc1377..771992e80 100644 --- a/gtsam/linear/LossFunctions.h +++ b/gtsam/linear/LossFunctions.h @@ -36,12 +36,12 @@ namespace noiseModel { * The mEstimator name space contains all robust error functions. * It mirrors the exposition at * https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf - * which talks about minimizing \sum \rho(r_i), where \rho is a residual function of choice. + * which talks about minimizing \sum \rho(r_i), where \rho is a loss function of choice. * * To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples: * * Name Symbol Least-Squares L1-norm Huber - * Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x| shared_ptr; Null(const ReweightScheme reweight = Block) : Base(reweight) {} ~Null() {} - double weight(double /*error*/) const { return 1.0; } - double residual(double error) const { return error; } - void print(const std::string &s) const; - bool equals(const Base & /*expected*/, double /*tol*/) const { return true; } + double weight(double /*error*/) const override { return 1.0; } + double loss(double distance) const override { return 0.5 * distance * distance; } + void print(const std::string &s) const override; + bool equals(const Base & /*expected*/, double /*tol*/) const override { return true; } static shared_ptr Create(); private: @@ -154,8 +154,8 @@ class GTSAM_EXPORT Fair : public Base { typedef boost::shared_ptr shared_ptr; Fair(double c = 1.3998, const ReweightScheme reweight = Block); - double weight(double error) const override; - double residual(double error) const override; + double weight(double distance) const override; + double loss(double distance) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double c, const ReweightScheme reweight = Block); @@ -179,8 +179,8 @@ class GTSAM_EXPORT Huber : public Base { typedef boost::shared_ptr shared_ptr; Huber(double k = 1.345, const ReweightScheme reweight = Block); - double weight(double error) const override; - double residual(double error) const override; + double weight(double distance) const override; + double loss(double distance) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); @@ -209,8 +209,8 @@ class GTSAM_EXPORT Cauchy : public Base { typedef boost::shared_ptr shared_ptr; Cauchy(double k = 0.1, const ReweightScheme reweight = Block); - double weight(double error) const override; - double residual(double error) const override; + double weight(double distance) const override; + double loss(double distance) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); @@ -234,8 +234,8 @@ class GTSAM_EXPORT Tukey : public Base { typedef boost::shared_ptr shared_ptr; Tukey(double c = 4.6851, const ReweightScheme reweight = Block); - double weight(double error) const override; - double residual(double error) const override; + double weight(double distance) const override; + double loss(double distance) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); @@ -259,8 +259,8 @@ class GTSAM_EXPORT Welsch : public Base { typedef boost::shared_ptr shared_ptr; Welsch(double c = 2.9846, const ReweightScheme reweight = Block); - double weight(double error) const override; - double residual(double error) const override; + double weight(double distance) const override; + double loss(double distance) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); @@ -274,14 +274,6 @@ class GTSAM_EXPORT Welsch : public Base { ar &BOOST_SERIALIZATION_NVP(c_); } }; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -/// @name Deprecated -/// @{ -// Welsh implements the "Welsch" robust error model (Zhang97ivc) -// This was misspelled in previous versions of gtsam and should be -// removed in the future. -using Welsh = Welsch; -#endif /// GemanMcClure implements the "Geman-McClure" robust error model /// (Zhang97ivc). @@ -295,8 +287,8 @@ class GTSAM_EXPORT GemanMcClure : public Base { GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block); ~GemanMcClure() {} - double weight(double error) const override; - double residual(double error) const override; + double weight(double distance) const override; + double loss(double distance) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); @@ -325,8 +317,8 @@ class GTSAM_EXPORT DCS : public Base { DCS(double c = 1.0, const ReweightScheme reweight = Block); ~DCS() {} - double weight(double error) const override; - double residual(double error) const override; + double weight(double distance) const override; + double loss(double distance) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); @@ -358,8 +350,8 @@ class GTSAM_EXPORT L2WithDeadZone : public Base { typedef boost::shared_ptr shared_ptr; L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block); - double weight(double error) const override; - double residual(double error) const override; + double weight(double distance) const override; + double loss(double distance) const override; void print(const std::string &s) const override; bool equals(const Base &expected, double tol = 1e-8) const override; static shared_ptr Create(double k, const ReweightScheme reweight = Block); diff --git a/gtsam/linear/NoiseModel.cpp b/gtsam/linear/NoiseModel.cpp index d7fd2d1ea..cf10cf115 100644 --- a/gtsam/linear/NoiseModel.cpp +++ b/gtsam/linear/NoiseModel.cpp @@ -74,6 +74,13 @@ Vector Base::sigmas() const { throw("Base::sigmas: sigmas() not implemented for this noise model"); } +/* ************************************************************************* */ +double Base::squaredMahalanobisDistance(const Vector& v) const { + // Note: for Diagonal, which does ediv_, will be correct for constraints + Vector w = whiten(v); + return w.dot(w); +} + /* ************************************************************************* */ Gaussian::shared_ptr Gaussian::SqrtInformation(const Matrix& R, bool smart) { size_t m = R.rows(), n = R.cols(); @@ -164,13 +171,6 @@ Vector Gaussian::unwhiten(const Vector& v) const { return backSubstituteUpper(thisR(), v); } -/* ************************************************************************* */ -double Gaussian::squaredMahalanobisDistance(const Vector& v) const { - // Note: for Diagonal, which does ediv_, will be correct for constraints - Vector w = whiten(v); - return w.dot(w); -} - /* ************************************************************************* */ Matrix Gaussian::Whiten(const Matrix& H) const { return thisR() * H; @@ -377,7 +377,7 @@ Vector Constrained::whiten(const Vector& v) const { } /* ************************************************************************* */ -double Constrained::distance(const Vector& v) const { +double Constrained::squaredMahalanobisDistance(const Vector& v) const { Vector w = Diagonal::whiten(v); // get noisemodel for constrained elements for (size_t i=0; i + virtual double squaredMahalanobisDistance(const Vector& v) const; + + /// Mahalanobis distance + virtual double mahalanobisDistance(const Vector& v) const { + return std::sqrt(squaredMahalanobisDistance(v)); + } + + /// loss function, input is Mahalanobis distance + virtual double loss(const double squared_distance) const { + return 0.5 * squared_distance; + } virtual void WhitenSystem(std::vector& A, Vector& b) const = 0; virtual void WhitenSystem(Matrix& A, Vector& b) const = 0; @@ -200,39 +211,17 @@ namespace gtsam { */ static shared_ptr Covariance(const Matrix& covariance, bool smart = true); - virtual void print(const std::string& name) const; - virtual bool equals(const Base& expected, double tol=1e-9) const; - virtual Vector sigmas() const; - virtual Vector whiten(const Vector& v) const; - virtual Vector unwhiten(const Vector& v) const; - - /** - * Squared Mahalanobis distance v'*R'*R*v = - */ - virtual double squaredMahalanobisDistance(const Vector& v) const; - - /** - * Mahalanobis distance - */ - virtual double mahalanobisDistance(const Vector& v) const { - return std::sqrt(squaredMahalanobisDistance(v)); - } - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - virtual double Mahalanobis(const Vector& v) const { - return squaredMahalanobisDistance(v); - } -#endif - - inline virtual double distance(const Vector& v) const { - return squaredMahalanobisDistance(v); - } + void print(const std::string& name) const override; + bool equals(const Base& expected, double tol=1e-9) const override; + Vector sigmas() const override; + Vector whiten(const Vector& v) const override; + Vector unwhiten(const Vector& v) const override; /** * Multiply a derivative with R (derivative of whiten) * Equivalent to whitening each column of the input matrix. */ - virtual Matrix Whiten(const Matrix& H) const; + Matrix Whiten(const Matrix& H) const override; /** * In-place version @@ -247,10 +236,10 @@ namespace gtsam { /** * Whiten a system, in place as well */ - virtual void WhitenSystem(std::vector& A, Vector& b) const; - virtual void WhitenSystem(Matrix& A, Vector& b) const; - virtual void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const; - virtual void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const; + void WhitenSystem(std::vector& A, Vector& b) const override; + void WhitenSystem(Matrix& A, Vector& b) const override; + void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const override; + void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const override; /** * Apply appropriately weighted QR factorization to the system [A b] @@ -335,13 +324,13 @@ namespace gtsam { return Variances(precisions.array().inverse(), smart); } - virtual void print(const std::string& name) const; - virtual Vector sigmas() const { return sigmas_; } - virtual Vector whiten(const Vector& v) const; - virtual Vector unwhiten(const Vector& v) const; - virtual Matrix Whiten(const Matrix& H) const; - virtual void WhitenInPlace(Matrix& H) const; - virtual void WhitenInPlace(Eigen::Block H) const; + void print(const std::string& name) const override; + Vector sigmas() const override { return sigmas_; } + Vector whiten(const Vector& v) const override; + Vector unwhiten(const Vector& v) const override; + Matrix Whiten(const Matrix& H) const override; + void WhitenInPlace(Matrix& H) const override; + void WhitenInPlace(Eigen::Block H) const override; /** * Return standard deviations (sqrt of diagonal) @@ -363,7 +352,7 @@ namespace gtsam { /** * Return R itself, but note that Whiten(H) is cheaper than R*H */ - virtual Matrix R() const { + Matrix R() const override { return invsigmas().asDiagonal(); } @@ -417,10 +406,10 @@ namespace gtsam { typedef boost::shared_ptr shared_ptr; - virtual ~Constrained() {} + ~Constrained() {} /// true if a constrained noise mode, saves slow/clumsy dynamic casting - virtual bool isConstrained() const { return true; } + bool isConstrained() const override { return true; } /// Return true if a particular dimension is free or constrained bool constrained(size_t i) const; @@ -472,12 +461,7 @@ namespace gtsam { return MixedVariances(precisions.array().inverse()); } - /** - * The distance function for a constrained noisemodel, - * for non-constrained versions, uses sigmas, otherwise - * uses the penalty function with mu - */ - virtual double distance(const Vector& v) const; + double squaredMahalanobisDistance(const Vector& v) const override; /** Fully constrained variations */ static shared_ptr All(size_t dim) { @@ -494,16 +478,16 @@ namespace gtsam { return shared_ptr(new Constrained(Vector::Constant(dim, mu), Vector::Constant(dim,0))); } - virtual void print(const std::string& name) const; + void print(const std::string& name) const override; /// Calculates error vector with weights applied - virtual Vector whiten(const Vector& v) const; + Vector whiten(const Vector& v) const override; /// Whitening functions will perform partial whitening on rows /// with a non-zero sigma. Other rows remain untouched. - virtual Matrix Whiten(const Matrix& H) const; - virtual void WhitenInPlace(Matrix& H) const; - virtual void WhitenInPlace(Eigen::Block H) const; + Matrix Whiten(const Matrix& H) const override; + void WhitenInPlace(Matrix& H) const override; + void WhitenInPlace(Eigen::Block H) const override; /** * Apply QR factorization to the system [A b], taking into account constraints @@ -514,7 +498,7 @@ namespace gtsam { * @param Ab is the m*(n+1) augmented system matrix [A b] * @return diagonal noise model can be all zeros, mixed, or not-constrained */ - virtual Diagonal::shared_ptr QR(Matrix& Ab) const; + Diagonal::shared_ptr QR(Matrix& Ab) const override; /** * Returns a Unit version of a constrained noisemodel in which @@ -576,14 +560,14 @@ namespace gtsam { return Variance(dim, 1.0/precision, smart); } - virtual void print(const std::string& name) const; - virtual double squaredMahalanobisDistance(const Vector& v) const; - virtual Vector whiten(const Vector& v) const; - virtual Vector unwhiten(const Vector& v) const; - virtual Matrix Whiten(const Matrix& H) const; - virtual void WhitenInPlace(Matrix& H) const; - virtual void whitenInPlace(Vector& v) const; - virtual void WhitenInPlace(Eigen::Block H) const; + void print(const std::string& name) const override; + double squaredMahalanobisDistance(const Vector& v) const override; + Vector whiten(const Vector& v) const override; + Vector unwhiten(const Vector& v) const override; + Matrix Whiten(const Matrix& H) const override; + void WhitenInPlace(Matrix& H) const override; + void whitenInPlace(Vector& v) const override; + void WhitenInPlace(Eigen::Block H) const override; /** * Return standard deviation @@ -616,7 +600,7 @@ namespace gtsam { typedef boost::shared_ptr shared_ptr; - virtual ~Unit() {} + ~Unit() {} /** * Create a unit covariance noise model @@ -626,19 +610,19 @@ namespace gtsam { } /// true if a unit noise model, saves slow/clumsy dynamic casting - virtual bool isUnit() const { return true; } - - virtual void print(const std::string& name) const; - virtual double squaredMahalanobisDistance(const Vector& v) const {return v.dot(v); } - virtual Vector whiten(const Vector& v) const { return v; } - virtual Vector unwhiten(const Vector& v) const { return v; } - virtual Matrix Whiten(const Matrix& H) const { return H; } - virtual void WhitenInPlace(Matrix& /*H*/) const {} - virtual void WhitenInPlace(Eigen::Block /*H*/) const {} - virtual void whitenInPlace(Vector& /*v*/) const {} - virtual void unwhitenInPlace(Vector& /*v*/) const {} - virtual void whitenInPlace(Eigen::Block& /*v*/) const {} - virtual void unwhitenInPlace(Eigen::Block& /*v*/) const {} + bool isUnit() const override { return true; } + + void print(const std::string& name) const override; + double squaredMahalanobisDistance(const Vector& v) const override {return v.dot(v); } + Vector whiten(const Vector& v) const override { return v; } + Vector unwhiten(const Vector& v) const override { return v; } + Matrix Whiten(const Matrix& H) const override { return H; } + void WhitenInPlace(Matrix& /*H*/) const override {} + void WhitenInPlace(Eigen::Block /*H*/) const override {} + void whitenInPlace(Vector& /*v*/) const override {} + void unwhitenInPlace(Vector& /*v*/) const override {} + void whitenInPlace(Eigen::Block& /*v*/) const override {} + void unwhitenInPlace(Eigen::Block& /*v*/) const override {} private: /** Serialization function */ @@ -687,10 +671,10 @@ namespace gtsam { : Base(noise->dim()), robust_(robust), noise_(noise) {} /// Destructor - virtual ~Robust() {} + ~Robust() {} - virtual void print(const std::string& name) const; - virtual bool equals(const Base& expected, double tol=1e-9) const; + void print(const std::string& name) const override; + bool equals(const Base& expected, double tol=1e-9) const override; /// Return the contained robust error function const RobustModel::shared_ptr& robust() const { return robust_; } @@ -699,28 +683,28 @@ namespace gtsam { const NoiseModel::shared_ptr& noise() const { return noise_; } // TODO: functions below are dummy but necessary for the noiseModel::Base - inline virtual Vector whiten(const Vector& v) const + inline Vector whiten(const Vector& v) const override { Vector r = v; this->WhitenSystem(r); return r; } - inline virtual Matrix Whiten(const Matrix& A) const + inline Matrix Whiten(const Matrix& A) const override { Vector b; Matrix B=A; this->WhitenSystem(B,b); return B; } - inline virtual Vector unwhiten(const Vector& /*v*/) const + inline Vector unwhiten(const Vector& /*v*/) const override { throw std::invalid_argument("unwhiten is not currently supported for robust noise models."); } - // Fold the use of the m-estimator residual(...) function into distance(...) - inline virtual double distance(const Vector& v) const - { return robust_->residual(this->unweightedWhiten(v).norm()); } - inline virtual double distance_non_whitened(const Vector& v) const - { return robust_->residual(v.norm()); } + + double loss(const double squared_distance) const override { + return robust_->loss(std::sqrt(squared_distance)); + } + // TODO: these are really robust iterated re-weighting support functions virtual void WhitenSystem(Vector& b) const; - virtual void WhitenSystem(std::vector& A, Vector& b) const; - virtual void WhitenSystem(Matrix& A, Vector& b) const; - virtual void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const; - virtual void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const; + void WhitenSystem(std::vector& A, Vector& b) const override; + void WhitenSystem(Matrix& A, Vector& b) const override; + void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const override; + void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const override; - virtual Vector unweightedWhiten(const Vector& v) const { + Vector unweightedWhiten(const Vector& v) const override { return noise_->unweightedWhiten(v); } - virtual double weight(const Vector& v) const { + double weight(const Vector& v) const override { // Todo(mikebosse): make the robust weight function input a vector. return robust_->weight(v.norm()); } diff --git a/gtsam/linear/PCGSolver.cpp b/gtsam/linear/PCGSolver.cpp index 08307c5ab..a7af7d8d8 100644 --- a/gtsam/linear/PCGSolver.cpp +++ b/gtsam/linear/PCGSolver.cpp @@ -45,6 +45,17 @@ PCGSolver::PCGSolver(const PCGSolverParameters &p) { preconditioner_ = createPreconditioner(p.preconditioner_); } +void PCGSolverParameters::setPreconditionerParams(const boost::shared_ptr preconditioner) { + preconditioner_ = preconditioner; +} + +void PCGSolverParameters::print(const std::string &s) const { + std::cout << s << std::endl;; + std::ostringstream os; + print(os); + std::cout << os.str() << std::endl; +} + /*****************************************************************************/ VectorValues PCGSolver::optimize(const GaussianFactorGraph &gfg, const KeyInfo &keyInfo, const std::map &lambda, diff --git a/gtsam/linear/PCGSolver.h b/gtsam/linear/PCGSolver.h index f5b278ae5..515f2eed2 100644 --- a/gtsam/linear/PCGSolver.h +++ b/gtsam/linear/PCGSolver.h @@ -41,14 +41,19 @@ struct GTSAM_EXPORT PCGSolverParameters: public ConjugateGradientParameters { PCGSolverParameters() { } - virtual void print(std::ostream &os) const; + void print(std::ostream &os) const override; /* interface to preconditioner parameters */ inline const PreconditionerParameters& preconditioner() const { return *preconditioner_; } + // needed for python wrapper + void print(const std::string &s) const; + boost::shared_ptr preconditioner_; + + void setPreconditionerParams(const boost::shared_ptr preconditioner); }; /** @@ -72,9 +77,9 @@ class GTSAM_EXPORT PCGSolver: public IterativeSolver { using IterativeSolver::optimize; - virtual VectorValues optimize(const GaussianFactorGraph &gfg, + VectorValues optimize(const GaussianFactorGraph &gfg, const KeyInfo &keyInfo, const std::map &lambda, - const VectorValues &initial); + const VectorValues &initial) override; }; diff --git a/gtsam/linear/Preconditioner.cpp b/gtsam/linear/Preconditioner.cpp index 1c602a963..24a42afb8 100644 --- a/gtsam/linear/Preconditioner.cpp +++ b/gtsam/linear/Preconditioner.cpp @@ -145,7 +145,7 @@ void BlockJacobiPreconditioner::build( /* getting the block diagonals over the factors */ std::map hessianMap =gfg.hessianBlockDiagonal(); - for ( const Matrix hessian: hessianMap | boost::adaptors::map_values) + for (const Matrix& hessian: hessianMap | boost::adaptors::map_values) blocks.push_back(hessian); /* if necessary, allocating the memory for cacheing the factorization results */ diff --git a/gtsam/linear/Preconditioner.h b/gtsam/linear/Preconditioner.h index 31901db3f..eceb19982 100644 --- a/gtsam/linear/Preconditioner.h +++ b/gtsam/linear/Preconditioner.h @@ -111,13 +111,13 @@ class GTSAM_EXPORT DummyPreconditioner : public Preconditioner { virtual ~DummyPreconditioner() {} /* Computation Interfaces for raw vector */ - virtual void solve(const Vector& y, Vector &x) const { x = y; } - virtual void transposeSolve(const Vector& y, Vector& x) const { x = y; } - virtual void build( + void solve(const Vector& y, Vector &x) const override { x = y; } + void transposeSolve(const Vector& y, Vector& x) const override { x = y; } + void build( const GaussianFactorGraph &gfg, const KeyInfo &info, const std::map &lambda - ) {} + ) override {} }; /*******************************************************************************************/ @@ -135,13 +135,13 @@ class GTSAM_EXPORT BlockJacobiPreconditioner : public Preconditioner { virtual ~BlockJacobiPreconditioner() ; /* Computation Interfaces for raw vector */ - virtual void solve(const Vector& y, Vector &x) const; - virtual void transposeSolve(const Vector& y, Vector& x) const ; - virtual void build( + void solve(const Vector& y, Vector &x) const override; + void transposeSolve(const Vector& y, Vector& x) const override; + void build( const GaussianFactorGraph &gfg, const KeyInfo &info, const std::map &lambda - ) ; + ) override; protected: diff --git a/gtsam/linear/RegularHessianFactor.h b/gtsam/linear/RegularHessianFactor.h index a24acfacd..707f519ca 100644 --- a/gtsam/linear/RegularHessianFactor.h +++ b/gtsam/linear/RegularHessianFactor.h @@ -109,8 +109,8 @@ class RegularHessianFactor: public HessianFactor { public: /** y += alpha * A'*A*x */ - virtual void multiplyHessianAdd(double alpha, const VectorValues& x, - VectorValues& y) const { + void multiplyHessianAdd(double alpha, const VectorValues& x, + VectorValues& y) const override { HessianFactor::multiplyHessianAdd(alpha, x, y); } @@ -182,7 +182,7 @@ class RegularHessianFactor: public HessianFactor { } /** Return the diagonal of the Hessian for this factor (raw memory version) */ - virtual void hessianDiagonal(double* d) const { + void hessianDiagonal(double* d) const override { // Loop over all variables in the factor for (DenseIndex pos = 0; pos < (DenseIndex) size(); ++pos) { @@ -193,7 +193,7 @@ class RegularHessianFactor: public HessianFactor { } /// Add gradient at zero to d TODO: is it really the goal to add ?? - virtual void gradientAtZero(double* d) const { + void gradientAtZero(double* d) const override { // Loop over all variables in the factor for (DenseIndex pos = 0; pos < (DenseIndex) size(); ++pos) { diff --git a/gtsam/linear/RegularJacobianFactor.h b/gtsam/linear/RegularJacobianFactor.h index 0312efe78..1c465da03 100644 --- a/gtsam/linear/RegularJacobianFactor.h +++ b/gtsam/linear/RegularJacobianFactor.h @@ -70,8 +70,8 @@ class RegularJacobianFactor: public JacobianFactor { using JacobianFactor::multiplyHessianAdd; /** y += alpha * A'*A*x */ - virtual void multiplyHessianAdd(double alpha, const VectorValues& x, - VectorValues& y) const { + void multiplyHessianAdd(double alpha, const VectorValues& x, + VectorValues& y) const override { JacobianFactor::multiplyHessianAdd(alpha, x, y); } @@ -106,7 +106,7 @@ class RegularJacobianFactor: public JacobianFactor { using GaussianFactor::hessianDiagonal; /// Raw memory access version of hessianDiagonal - void hessianDiagonal(double* d) const { + void hessianDiagonal(double* d) const override { // Loop over all variables in the factor for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) { // Get the diagonal block, and insert its diagonal @@ -125,12 +125,12 @@ class RegularJacobianFactor: public JacobianFactor { } /// Expose base class gradientAtZero - virtual VectorValues gradientAtZero() const { + VectorValues gradientAtZero() const override { return JacobianFactor::gradientAtZero(); } /// Raw memory access version of gradientAtZero - void gradientAtZero(double* d) const { + void gradientAtZero(double* d) const override { // Get vector b not weighted Vector b = getb(); diff --git a/gtsam/linear/Sampler.cpp b/gtsam/linear/Sampler.cpp index 16c7e73e0..4957dfa14 100644 --- a/gtsam/linear/Sampler.cpp +++ b/gtsam/linear/Sampler.cpp @@ -54,17 +54,6 @@ Vector Sampler::sample() const { return sampleDiagonal(sigmas); } -/* ************************************************************************* */ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -Sampler::Sampler(uint_fast64_t seed) : generator_(seed) {} - -Vector Sampler::sampleNewModel( - const noiseModel::Diagonal::shared_ptr& model) const { - assert(model.get()); - const Vector& sigmas = model->sigmas(); - return sampleDiagonal(sigmas); -} -#endif /* ************************************************************************* */ } // namespace gtsam diff --git a/gtsam/linear/Sampler.h b/gtsam/linear/Sampler.h index 54c240a2b..bb5098f34 100644 --- a/gtsam/linear/Sampler.h +++ b/gtsam/linear/Sampler.h @@ -84,14 +84,6 @@ class GTSAM_EXPORT Sampler { /// @} -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - explicit Sampler(uint_fast64_t seed = 42u); - Vector sampleNewModel(const noiseModel::Diagonal::shared_ptr& model) const; - /// @} -#endif - protected: /** given sigmas for a diagonal model, returns a sample */ Vector sampleDiagonal(const Vector& sigmas) const; diff --git a/gtsam/linear/SubgraphBuilder.cpp b/gtsam/linear/SubgraphBuilder.cpp index c6b3ca15f..1919d38be 100644 --- a/gtsam/linear/SubgraphBuilder.cpp +++ b/gtsam/linear/SubgraphBuilder.cpp @@ -383,7 +383,7 @@ Subgraph SubgraphBuilder::operator()(const GaussianFactorGraph &gfg) const { const vector tree = buildTree(gfg, forward_ordering, weights); if (tree.size() != n - 1) { throw std::runtime_error( - "SubgraphBuilder::operator() failure: tree.size() != n-1"); + "SubgraphBuilder::operator() failure: tree.size() != n-1, might be caused by disconnected graph"); } // Downweight the tree edges to zero. diff --git a/gtsam/linear/SubgraphSolver.cpp b/gtsam/linear/SubgraphSolver.cpp index 56b843e8d..f49f9a135 100644 --- a/gtsam/linear/SubgraphSolver.cpp +++ b/gtsam/linear/SubgraphSolver.cpp @@ -65,23 +65,6 @@ SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1, : SubgraphSolver(Ab1.eliminateSequential(ordering, EliminateQR), Ab2, parameters) {} -/**************************************************************************************************/ -// deprecated variants -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, - const GaussianFactorGraph &Ab2, - const Parameters ¶meters) - : SubgraphSolver(Rc1, boost::make_shared(Ab2), - parameters) {} - -SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1, - const GaussianFactorGraph &Ab2, - const Parameters ¶meters, - const Ordering &ordering) - : SubgraphSolver(Ab1, boost::make_shared(Ab2), - parameters, ordering) {} -#endif - /**************************************************************************************************/ VectorValues SubgraphSolver::optimize() const { VectorValues ybar = conjugateGradients &A, - const Parameters ¶meters, const Ordering &ordering) - : SubgraphSolver(*A, parameters, ordering) {} - SubgraphSolver(const GaussianFactorGraph &, const GaussianFactorGraph &, - const Parameters &, const Ordering &); - SubgraphSolver(const boost::shared_ptr &Ab1, - const boost::shared_ptr &Ab2, - const Parameters ¶meters, const Ordering &ordering) - : SubgraphSolver(*Ab1, Ab2, parameters, ordering) {} - SubgraphSolver(const boost::shared_ptr &, - const GaussianFactorGraph &, const Parameters &); - /// @} -#endif }; } // namespace gtsam diff --git a/gtsam/linear/VectorValues.cpp b/gtsam/linear/VectorValues.cpp index 0a25617e4..746275847 100644 --- a/gtsam/linear/VectorValues.cpp +++ b/gtsam/linear/VectorValues.cpp @@ -45,7 +45,7 @@ namespace gtsam { /* ************************************************************************* */ VectorValues::VectorValues(const Vector& x, const Dims& dims) { - typedef pair Pair; + using Pair = pair; size_t j = 0; for (const Pair& v : dims) { Key key; @@ -161,7 +161,7 @@ namespace gtsam { bool VectorValues::equals(const VectorValues& x, double tol) const { if(this->size() != x.size()) return false; - for(const auto& values: boost::combine(*this, x)) { + for(const auto values: boost::combine(*this, x)) { if(values.get<0>().first != values.get<1>().first || !equal_with_abs_tol(values.get<0>().second, values.get<1>().second, tol)) return false; @@ -233,7 +233,7 @@ namespace gtsam { double result = 0.0; typedef boost::tuple ValuePair; using boost::adaptors::map_values; - for(const ValuePair& values: boost::combine(*this, v)) { + for(const ValuePair values: boost::combine(*this, v)) { assert_throw(values.get<0>().first == values.get<1>().first, invalid_argument("VectorValues::dot called with a VectorValues of different structure")); assert_throw(values.get<0>().second.size() == values.get<1>().second.size(), diff --git a/gtsam/linear/linearExceptions.cpp b/gtsam/linear/linearExceptions.cpp index a4168af2d..ada3a298c 100644 --- a/gtsam/linear/linearExceptions.cpp +++ b/gtsam/linear/linearExceptions.cpp @@ -24,7 +24,7 @@ namespace gtsam { /* ************************************************************************* */ - const char* IndeterminantLinearSystemException::what() const throw() + const char* IndeterminantLinearSystemException::what() const noexcept { if(!description_) { description_ = String( @@ -43,7 +43,7 @@ more information."); } /* ************************************************************************* */ - const char* InvalidNoiseModel::what() const throw() { + const char* InvalidNoiseModel::what() const noexcept { if(description_.empty()) description_ = (boost::format( "A JacobianFactor was attempted to be constructed or modified to use a\n" @@ -54,7 +54,7 @@ more information."); } /* ************************************************************************* */ - const char* InvalidMatrixBlock::what() const throw() { + const char* InvalidMatrixBlock::what() const noexcept { if(description_.empty()) description_ = (boost::format( "A JacobianFactor was attempted to be constructed with a matrix block of\n" diff --git a/gtsam/linear/linearExceptions.h b/gtsam/linear/linearExceptions.h index 32db27fc9..f0ad0be39 100644 --- a/gtsam/linear/linearExceptions.h +++ b/gtsam/linear/linearExceptions.h @@ -94,10 +94,10 @@ namespace gtsam { class GTSAM_EXPORT IndeterminantLinearSystemException : public ThreadsafeException { Key j_; public: - IndeterminantLinearSystemException(Key j) throw() : j_(j) {} - virtual ~IndeterminantLinearSystemException() throw() {} + IndeterminantLinearSystemException(Key j) noexcept : j_(j) {} + virtual ~IndeterminantLinearSystemException() noexcept {} Key nearbyVariable() const { return j_; } - virtual const char* what() const throw(); + const char* what() const noexcept override; }; /* ************************************************************************* */ @@ -110,9 +110,9 @@ namespace gtsam { InvalidNoiseModel(DenseIndex factorDims, DenseIndex noiseModelDims) : factorDims(factorDims), noiseModelDims(noiseModelDims) {} - virtual ~InvalidNoiseModel() throw() {} + virtual ~InvalidNoiseModel() noexcept {} - virtual const char* what() const throw(); + const char* what() const noexcept override; private: mutable std::string description_; @@ -128,9 +128,9 @@ namespace gtsam { InvalidMatrixBlock(DenseIndex factorRows, DenseIndex blockRows) : factorRows(factorRows), blockRows(blockRows) {} - virtual ~InvalidMatrixBlock() throw() {} + virtual ~InvalidMatrixBlock() noexcept {} - virtual const char* what() const throw(); + const char* what() const noexcept override; private: mutable std::string description_; diff --git a/gtsam/linear/tests/testGaussianBayesNet.cpp b/gtsam/linear/tests/testGaussianBayesNet.cpp index 488368c72..eafefb3cb 100644 --- a/gtsam/linear/tests/testGaussianBayesNet.cpp +++ b/gtsam/linear/tests/testGaussianBayesNet.cpp @@ -137,7 +137,7 @@ TEST( GaussianBayesNet, optimize3 ) } /* ************************************************************************* */ -TEST(GaussianBayesNet, ordering) +TEST(GaussianBayesNet, ordering) { Ordering expected; expected += _x_, _y_; @@ -155,7 +155,7 @@ TEST( GaussianBayesNet, MatrixStress ) bn.emplace_shared(_z_, Vector2(5, 6), 6 * I_2x2); const VectorValues expected = bn.optimize(); - for (const auto keys : + for (const auto& keys : {KeyVector({_x_, _y_, _z_}), KeyVector({_x_, _z_, _y_}), KeyVector({_y_, _x_, _z_}), KeyVector({_y_, _z_, _x_}), KeyVector({_z_, _x_, _y_}), KeyVector({_z_, _y_, _x_})}) { @@ -183,7 +183,7 @@ TEST( GaussianBayesNet, backSubstituteTranspose ) VectorValues actual = smallBayesNet.backSubstituteTranspose(x); EXPECT(assert_equal(expected, actual)); - + const auto ordering = noisyBayesNet.ordering(); const Matrix R = smallBayesNet.matrix(ordering).first; const Vector expected_vector = R.transpose().inverse() * x.vector(ordering); @@ -206,7 +206,7 @@ TEST( GaussianBayesNet, backSubstituteTransposeNoisy ) VectorValues actual = noisyBayesNet.backSubstituteTranspose(x); EXPECT(assert_equal(expected, actual)); - + const auto ordering = noisyBayesNet.ordering(); const Matrix R = noisyBayesNet.matrix(ordering).first; const Vector expected_vector = R.transpose().inverse() * x.vector(ordering); diff --git a/gtsam/linear/tests/testNoiseModel.cpp b/gtsam/linear/tests/testNoiseModel.cpp index 3f6550b9f..42d68a603 100644 --- a/gtsam/linear/tests/testNoiseModel.cpp +++ b/gtsam/linear/tests/testNoiseModel.cpp @@ -182,8 +182,9 @@ TEST(NoiseModel, ConstrainedMixed ) EXPECT(assert_equal(Vector3(0.5, 1.0, 0.5),d->whiten(infeasible))); EXPECT(assert_equal(Vector3(0.5, 0.0, 0.5),d->whiten(feasible))); - DOUBLES_EQUAL(1000.0 + 0.25 + 0.25,d->distance(infeasible),1e-9); - DOUBLES_EQUAL(0.5,d->distance(feasible),1e-9); + DOUBLES_EQUAL(0.5 * (1000.0 + 0.25 + 0.25),d->loss(d->squaredMahalanobisDistance(infeasible)),1e-9); + DOUBLES_EQUAL(0.5, d->squaredMahalanobisDistance(feasible),1e-9); + DOUBLES_EQUAL(0.5 * 0.5, d->loss(0.5),1e-9); } /* ************************************************************************* */ @@ -197,8 +198,9 @@ TEST(NoiseModel, ConstrainedAll ) EXPECT(assert_equal(Vector3(1.0, 1.0, 1.0),i->whiten(infeasible))); EXPECT(assert_equal(Vector3(0.0, 0.0, 0.0),i->whiten(feasible))); - DOUBLES_EQUAL(1000.0 * 3.0,i->distance(infeasible),1e-9); - DOUBLES_EQUAL(0.0,i->distance(feasible),1e-9); + DOUBLES_EQUAL(0.5 * 1000.0 * 3.0,i->loss(i->squaredMahalanobisDistance(infeasible)),1e-9); + DOUBLES_EQUAL(0.0, i->squaredMahalanobisDistance(feasible), 1e-9); + DOUBLES_EQUAL(0.0, i->loss(0.0),1e-9); } /* ************************************************************************* */ @@ -451,7 +453,7 @@ TEST(NoiseModel, WhitenInPlace) /* * These tests are responsible for testing the weight functions for the m-estimators in GTSAM. - * The weight function is related to the analytic derivative of the residual function. See + * The weight function is related to the analytic derivative of the loss function. See * https://members.loria.fr/MOBerger/Enseignement/Master2/Documents/ZhangIVC-97-01.pdf * for details. This weight function is required when optimizing cost functions with robust * penalties using iteratively re-weighted least squares. @@ -467,10 +469,10 @@ TEST(NoiseModel, robustFunctionFair) DOUBLES_EQUAL(0.3333333333333333, fair->weight(error3), 1e-8); DOUBLES_EQUAL(0.8333333333333333, fair->weight(error4), 1e-8); - DOUBLES_EQUAL(0.441961080151135, fair->residual(error1), 1e-8); - DOUBLES_EQUAL(22.534692783297260, fair->residual(error2), 1e-8); - DOUBLES_EQUAL(22.534692783297260, fair->residual(error3), 1e-8); - DOUBLES_EQUAL(0.441961080151135, fair->residual(error4), 1e-8); + DOUBLES_EQUAL(0.441961080151135, fair->loss(error1), 1e-8); + DOUBLES_EQUAL(22.534692783297260, fair->loss(error2), 1e-8); + DOUBLES_EQUAL(22.534692783297260, fair->loss(error3), 1e-8); + DOUBLES_EQUAL(0.441961080151135, fair->loss(error4), 1e-8); } TEST(NoiseModel, robustFunctionHuber) @@ -483,10 +485,10 @@ TEST(NoiseModel, robustFunctionHuber) DOUBLES_EQUAL(0.5, huber->weight(error3), 1e-8); DOUBLES_EQUAL(1.0, huber->weight(error4), 1e-8); - DOUBLES_EQUAL(0.5000, huber->residual(error1), 1e-8); - DOUBLES_EQUAL(37.5000, huber->residual(error2), 1e-8); - DOUBLES_EQUAL(37.5000, huber->residual(error3), 1e-8); - DOUBLES_EQUAL(0.5000, huber->residual(error4), 1e-8); + DOUBLES_EQUAL(0.5000, huber->loss(error1), 1e-8); + DOUBLES_EQUAL(37.5000, huber->loss(error2), 1e-8); + DOUBLES_EQUAL(37.5000, huber->loss(error3), 1e-8); + DOUBLES_EQUAL(0.5000, huber->loss(error4), 1e-8); } TEST(NoiseModel, robustFunctionCauchy) @@ -499,10 +501,10 @@ TEST(NoiseModel, robustFunctionCauchy) DOUBLES_EQUAL(0.2000, cauchy->weight(error3), 1e-8); DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error4), 1e-8); - DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error1), 1e-8); - DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error2), 1e-8); - DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error3), 1e-8); - DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error4), 1e-8); + DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error1), 1e-8); + DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error2), 1e-8); + DOUBLES_EQUAL(20.117973905426254, cauchy->loss(error3), 1e-8); + DOUBLES_EQUAL(0.490258914416017, cauchy->loss(error4), 1e-8); } TEST(NoiseModel, robustFunctionGemanMcClure) @@ -514,10 +516,10 @@ TEST(NoiseModel, robustFunctionGemanMcClure) DOUBLES_EQUAL(9.80296e-5, gmc->weight(error3), 1e-8); DOUBLES_EQUAL(0.25 , gmc->weight(error4), 1e-8); - DOUBLES_EQUAL(0.2500, gmc->residual(error1), 1e-8); - DOUBLES_EQUAL(0.495049504950495, gmc->residual(error2), 1e-8); - DOUBLES_EQUAL(0.495049504950495, gmc->residual(error3), 1e-8); - DOUBLES_EQUAL(0.2500, gmc->residual(error4), 1e-8); + DOUBLES_EQUAL(0.2500, gmc->loss(error1), 1e-8); + DOUBLES_EQUAL(0.495049504950495, gmc->loss(error2), 1e-8); + DOUBLES_EQUAL(0.495049504950495, gmc->loss(error3), 1e-8); + DOUBLES_EQUAL(0.2500, gmc->loss(error4), 1e-8); } TEST(NoiseModel, robustFunctionWelsch) @@ -530,10 +532,10 @@ TEST(NoiseModel, robustFunctionWelsch) DOUBLES_EQUAL(0.018315638888734, welsch->weight(error3), 1e-8); DOUBLES_EQUAL(0.960789439152323, welsch->weight(error4), 1e-8); - DOUBLES_EQUAL(0.490132010595960, welsch->residual(error1), 1e-8); - DOUBLES_EQUAL(12.271054513890823, welsch->residual(error2), 1e-8); - DOUBLES_EQUAL(12.271054513890823, welsch->residual(error3), 1e-8); - DOUBLES_EQUAL(0.490132010595960, welsch->residual(error4), 1e-8); + DOUBLES_EQUAL(0.490132010595960, welsch->loss(error1), 1e-8); + DOUBLES_EQUAL(12.271054513890823, welsch->loss(error2), 1e-8); + DOUBLES_EQUAL(12.271054513890823, welsch->loss(error3), 1e-8); + DOUBLES_EQUAL(0.490132010595960, welsch->loss(error4), 1e-8); } TEST(NoiseModel, robustFunctionTukey) @@ -546,10 +548,10 @@ TEST(NoiseModel, robustFunctionTukey) DOUBLES_EQUAL(0.0, tukey->weight(error3), 1e-8); DOUBLES_EQUAL(0.9216, tukey->weight(error4), 1e-8); - DOUBLES_EQUAL(0.480266666666667, tukey->residual(error1), 1e-8); - DOUBLES_EQUAL(4.166666666666667, tukey->residual(error2), 1e-8); - DOUBLES_EQUAL(4.166666666666667, tukey->residual(error3), 1e-8); - DOUBLES_EQUAL(0.480266666666667, tukey->residual(error4), 1e-8); + DOUBLES_EQUAL(0.480266666666667, tukey->loss(error1), 1e-8); + DOUBLES_EQUAL(4.166666666666667, tukey->loss(error2), 1e-8); + DOUBLES_EQUAL(4.166666666666667, tukey->loss(error3), 1e-8); + DOUBLES_EQUAL(0.480266666666667, tukey->loss(error4), 1e-8); } TEST(NoiseModel, robustFunctionDCS) @@ -560,8 +562,8 @@ TEST(NoiseModel, robustFunctionDCS) DOUBLES_EQUAL(1.0 , dcs->weight(error1), 1e-8); DOUBLES_EQUAL(0.00039211, dcs->weight(error2), 1e-8); - DOUBLES_EQUAL(0.5 , dcs->residual(error1), 1e-8); - DOUBLES_EQUAL(0.9900990099, dcs->residual(error2), 1e-8); + DOUBLES_EQUAL(0.5 , dcs->loss(error1), 1e-8); + DOUBLES_EQUAL(0.9900990099, dcs->loss(error2), 1e-8); } TEST(NoiseModel, robustFunctionL2WithDeadZone) @@ -576,12 +578,12 @@ TEST(NoiseModel, robustFunctionL2WithDeadZone) DOUBLES_EQUAL(0.00990099009, lsdz->weight(e4), 1e-8); DOUBLES_EQUAL(0.9, lsdz->weight(e5), 1e-8); - DOUBLES_EQUAL(40.5, lsdz->residual(e0), 1e-8); - DOUBLES_EQUAL(0.00005, lsdz->residual(e1), 1e-8); - DOUBLES_EQUAL(0.0, lsdz->residual(e2), 1e-8); - DOUBLES_EQUAL(0.0, lsdz->residual(e3), 1e-8); - DOUBLES_EQUAL(0.00005, lsdz->residual(e4), 1e-8); - DOUBLES_EQUAL(40.5, lsdz->residual(e5), 1e-8); + DOUBLES_EQUAL(40.5, lsdz->loss(e0), 1e-8); + DOUBLES_EQUAL(0.00005, lsdz->loss(e1), 1e-8); + DOUBLES_EQUAL(0.0, lsdz->loss(e2), 1e-8); + DOUBLES_EQUAL(0.0, lsdz->loss(e3), 1e-8); + DOUBLES_EQUAL(0.00005, lsdz->loss(e4), 1e-8); + DOUBLES_EQUAL(40.5, lsdz->loss(e5), 1e-8); } /* ************************************************************************* */ @@ -665,11 +667,11 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone) /* * TODO(mike): There is currently a bug in GTSAM, where none of the mEstimator classes - * implement a residual function, and GTSAM calls the weight function to evaluate the - * total penalty, rather than calling the residual function. The weight function should be + * implement a loss function, and GTSAM calls the weight function to evaluate the + * total penalty, rather than calling the loss function. The weight function should be * used during iteratively reweighted least squares optimization, but should not be used to * evaluate the total penalty. The long-term solution is for all mEstimators to implement - * both a weight and a residual function, and for GTSAM to call the residual function when + * both a weight and a loss function, and for GTSAM to call the loss function when * evaluating the total penalty. This bug causes the test below to fail, so I'm leaving it * commented out until the underlying bug in GTSAM is fixed. * @@ -681,13 +683,44 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone) } +TEST(NoiseModel, lossFunctionAtZero) +{ + const double k = 5.0; + auto fair = mEstimator::Fair::Create(k); + DOUBLES_EQUAL(fair->loss(0), 0, 1e-8); + DOUBLES_EQUAL(fair->weight(0), 1, 1e-8); + auto huber = mEstimator::Huber::Create(k); + DOUBLES_EQUAL(huber->loss(0), 0, 1e-8); + DOUBLES_EQUAL(huber->weight(0), 1, 1e-8); + auto cauchy = mEstimator::Cauchy::Create(k); + DOUBLES_EQUAL(cauchy->loss(0), 0, 1e-8); + DOUBLES_EQUAL(cauchy->weight(0), 1, 1e-8); + auto gmc = mEstimator::GemanMcClure::Create(k); + DOUBLES_EQUAL(gmc->loss(0), 0, 1e-8); + DOUBLES_EQUAL(gmc->weight(0), 1, 1e-8); + auto welsch = mEstimator::Welsch::Create(k); + DOUBLES_EQUAL(welsch->loss(0), 0, 1e-8); + DOUBLES_EQUAL(welsch->weight(0), 1, 1e-8); + auto tukey = mEstimator::Tukey::Create(k); + DOUBLES_EQUAL(tukey->loss(0), 0, 1e-8); + DOUBLES_EQUAL(tukey->weight(0), 1, 1e-8); + auto dcs = mEstimator::DCS::Create(k); + DOUBLES_EQUAL(dcs->loss(0), 0, 1e-8); + DOUBLES_EQUAL(dcs->weight(0), 1, 1e-8); + // auto lsdz = mEstimator::L2WithDeadZone::Create(k); + // DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8); + // DOUBLES_EQUAL(lsdz->weight(0), 1, 1e-8); +} + + /* ************************************************************************* */ #define TEST_GAUSSIAN(gaussian)\ EQUALITY(info, gaussian->information());\ EQUALITY(cov, gaussian->covariance());\ EXPECT(assert_equal(white, gaussian->whiten(e)));\ EXPECT(assert_equal(e, gaussian->unwhiten(white)));\ - EXPECT_DOUBLES_EQUAL(251, gaussian->distance(e), 1e-9);\ + EXPECT_DOUBLES_EQUAL(251.0, gaussian->squaredMahalanobisDistance(e), 1e-9);\ + EXPECT_DOUBLES_EQUAL(0.5 * 251.0, gaussian->loss(251.0), 1e-9);\ Matrix A = R.inverse(); Vector b = e;\ gaussian->WhitenSystem(A, b);\ EXPECT(assert_equal(I, A));\ diff --git a/gtsam/navigation/AHRSFactor.h b/gtsam/navigation/AHRSFactor.h index 1418ab687..34626fcf6 100644 --- a/gtsam/navigation/AHRSFactor.h +++ b/gtsam/navigation/AHRSFactor.h @@ -42,7 +42,7 @@ class GTSAM_EXPORT PreintegratedAhrsMeasurements : public PreintegratedRotation public: - /// Default constructor, only for serialization and Cython wrapper + /// Default constructor, only for serialization and wrappers PreintegratedAhrsMeasurements() {} /** @@ -75,7 +75,7 @@ class GTSAM_EXPORT PreintegratedAhrsMeasurements : public PreintegratedRotation biasHat_(bias_hat), preintMeasCov_(preint_meas_cov) {} - const Params& p() const { return *boost::static_pointer_cast(p_);} + Params& p() const { return *boost::static_pointer_cast(p_);} const Vector3& biasHat() const { return biasHat_; } const Matrix3& preintMeasCov() const { return preintMeasCov_; } @@ -158,14 +158,14 @@ class GTSAM_EXPORT AHRSFactor: public NoiseModelFactor3 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const; + gtsam::NonlinearFactor::shared_ptr clone() const override; /// print - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const; + void print(const std::string& s, const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; /// equals - virtual bool equals(const NonlinearFactor&, double tol = 1e-9) const; + bool equals(const NonlinearFactor&, double tol = 1e-9) const override; /// Access the preintegrated measurements. const PreintegratedAhrsMeasurements& preintegratedMeasurements() const { @@ -178,7 +178,7 @@ class GTSAM_EXPORT AHRSFactor: public NoiseModelFactor3 { Vector evaluateError(const Rot3& rot_i, const Rot3& rot_j, const Vector3& bias, boost::optional H1 = boost::none, boost::optional H2 = boost::none, boost::optional H3 = - boost::none) const; + boost::none) const override; /// predicted states from IMU /// TODO(frank): relationship with PIM predict ?? diff --git a/gtsam/navigation/AttitudeFactor.h b/gtsam/navigation/AttitudeFactor.h index db588008e..9a0a11cfb 100644 --- a/gtsam/navigation/AttitudeFactor.h +++ b/gtsam/navigation/AttitudeFactor.h @@ -108,21 +108,21 @@ class GTSAM_EXPORT Rot3AttitudeFactor: public NoiseModelFactor1, public At } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const; + void print(const std::string& s, const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; /** equals */ - virtual bool equals(const NonlinearFactor& expected, double tol = 1e-9) const; + bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /** vector of errors */ - virtual Vector evaluateError(const Rot3& nRb, // - boost::optional H = boost::none) const { + Vector evaluateError(const Rot3& nRb, // + boost::optional H = boost::none) const override { return attitudeError(nRb, H); } @@ -139,7 +139,7 @@ class GTSAM_EXPORT Rot3AttitudeFactor: public NoiseModelFactor1, public At } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; /// traits @@ -182,21 +182,21 @@ class GTSAM_EXPORT Pose3AttitudeFactor: public NoiseModelFactor1, } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const; + void print(const std::string& s, const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; /** equals */ - virtual bool equals(const NonlinearFactor& expected, double tol = 1e-9) const; + bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /** vector of errors */ - virtual Vector evaluateError(const Pose3& nTb, // - boost::optional H = boost::none) const { + Vector evaluateError(const Pose3& nTb, // + boost::optional H = boost::none) const override { Vector e = attitudeError(nTb.rotation(), H); if (H) { Matrix H23 = *H; @@ -219,7 +219,7 @@ class GTSAM_EXPORT Pose3AttitudeFactor: public NoiseModelFactor1, } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; /// traits diff --git a/gtsam/navigation/CombinedImuFactor.cpp b/gtsam/navigation/CombinedImuFactor.cpp index 149067269..e41a8de44 100644 --- a/gtsam/navigation/CombinedImuFactor.cpp +++ b/gtsam/navigation/CombinedImuFactor.cpp @@ -17,9 +17,11 @@ * @author Vadim Indelman * @author David Jensen * @author Frank Dellaert + * @author Varun Agrawal **/ #include +#include /* External or standard includes */ #include @@ -28,6 +30,31 @@ namespace gtsam { using namespace std; +//------------------------------------------------------------------------------ +// Inner class PreintegrationCombinedParams +//------------------------------------------------------------------------------ +void PreintegrationCombinedParams::print(const string& s) const { + PreintegrationParams::print(s); + cout << "biasAccCovariance:\n[\n" << biasAccCovariance << "\n]" + << endl; + cout << "biasOmegaCovariance:\n[\n" << biasOmegaCovariance << "\n]" + << endl; + cout << "biasAccOmegaInt:\n[\n" << biasAccOmegaInt << "\n]" + << endl; +} + +//------------------------------------------------------------------------------ +bool PreintegrationCombinedParams::equals(const PreintegratedRotationParams& other, + double tol) const { + auto e = dynamic_cast(&other); + return e != nullptr && PreintegrationParams::equals(other, tol) && + equal_with_abs_tol(biasAccCovariance, e->biasAccCovariance, + tol) && + equal_with_abs_tol(biasOmegaCovariance, e->biasOmegaCovariance, + tol) && + equal_with_abs_tol(biasAccOmegaInt, e->biasAccOmegaInt, tol); +} + //------------------------------------------------------------------------------ // Inner class PreintegratedCombinedMeasurements //------------------------------------------------------------------------------ @@ -121,29 +148,6 @@ void PreintegratedCombinedMeasurements::integrateMeasurement( preintMeasCov_ = F * preintMeasCov_ * F.transpose() + G_measCov_Gt; } -//------------------------------------------------------------------------------ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -PreintegratedCombinedMeasurements::PreintegratedCombinedMeasurements( - const imuBias::ConstantBias& biasHat, const Matrix3& measuredAccCovariance, - const Matrix3& measuredOmegaCovariance, - const Matrix3& integrationErrorCovariance, const Matrix3& biasAccCovariance, - const Matrix3& biasOmegaCovariance, const Matrix6& biasAccOmegaInt, - const bool use2ndOrderIntegration) { - if (!use2ndOrderIntegration) - throw("PreintegratedImuMeasurements no longer supports first-order integration: it incorrectly compensated for gravity"); - biasHat_ = biasHat; - boost::shared_ptr p = Params::MakeSharedD(); - p->gyroscopeCovariance = measuredOmegaCovariance; - p->accelerometerCovariance = measuredAccCovariance; - p->integrationCovariance = integrationErrorCovariance; - p->biasAccCovariance = biasAccCovariance; - p->biasOmegaCovariance = biasOmegaCovariance; - p->biasAccOmegaInt = biasAccOmegaInt; - p_ = p; - resetIntegration(); - preintMeasCov_.setZero(); -} -#endif //------------------------------------------------------------------------------ // CombinedImuFactor methods //------------------------------------------------------------------------------ @@ -163,10 +167,11 @@ gtsam::NonlinearFactor::shared_ptr CombinedImuFactor::clone() const { //------------------------------------------------------------------------------ void CombinedImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "CombinedImuFactor(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << "," - << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) << "," - << keyFormatter(this->key6()) << ")\n"; + cout << (s == "" ? s : s + "\n") << "CombinedImuFactor(" + << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << "," + << keyFormatter(this->key3()) << "," << keyFormatter(this->key4()) << "," + << keyFormatter(this->key5()) << "," << keyFormatter(this->key6()) + << ")\n"; _PIM_.print(" preintegrated measurements:"); this->noiseModel_->print(" noise model: "); } @@ -243,39 +248,14 @@ Vector CombinedImuFactor::evaluateError(const Pose3& pose_i, } //------------------------------------------------------------------------------ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -CombinedImuFactor::CombinedImuFactor( - Key pose_i, Key vel_i, Key pose_j, Key vel_j, Key bias_i, Key bias_j, - const CombinedPreintegratedMeasurements& pim, const Vector3& n_gravity, - const Vector3& omegaCoriolis, const boost::optional& body_P_sensor, - const bool use2ndOrderCoriolis) -: Base(noiseModel::Gaussian::Covariance(pim.preintMeasCov_), pose_i, vel_i, - pose_j, vel_j, bias_i, bias_j), -_PIM_(pim) { - using P = CombinedPreintegratedMeasurements::Params; - auto p = boost::allocate_shared

(Eigen::aligned_allocator

(), pim.p()); - p->n_gravity = n_gravity; - p->omegaCoriolis = omegaCoriolis; - p->body_P_sensor = body_P_sensor; - p->use2ndOrderCoriolis = use2ndOrderCoriolis; - _PIM_.p_ = p; +std::ostream& operator<<(std::ostream& os, const CombinedImuFactor& f) { + f._PIM_.print("combined preintegrated measurements:\n"); + os << " noise model sigmas: " << f.noiseModel_->sigmas().transpose(); + return os; } - -void CombinedImuFactor::Predict(const Pose3& pose_i, const Vector3& vel_i, - Pose3& pose_j, Vector3& vel_j, - const imuBias::ConstantBias& bias_i, - CombinedPreintegratedMeasurements& pim, - const Vector3& n_gravity, - const Vector3& omegaCoriolis, - const bool use2ndOrderCoriolis) { - // use deprecated predict - PoseVelocityBias pvb = pim.predict(pose_i, vel_i, bias_i, n_gravity, - omegaCoriolis, use2ndOrderCoriolis); - pose_j = pvb.pose; - vel_j = pvb.velocity; -} -#endif - } /// namespace gtsam +/// Boost serialization export definition for derived class +BOOST_CLASS_EXPORT_IMPLEMENT(gtsam::PreintegrationCombinedParams); + diff --git a/gtsam/navigation/CombinedImuFactor.h b/gtsam/navigation/CombinedImuFactor.h index ca9b2ca1a..efca25bff 100644 --- a/gtsam/navigation/CombinedImuFactor.h +++ b/gtsam/navigation/CombinedImuFactor.h @@ -17,6 +17,7 @@ * @author Vadim Indelman * @author David Jensen * @author Frank Dellaert + * @author Varun Agrawal **/ #pragma once @@ -26,6 +27,7 @@ #include #include #include +#include namespace gtsam { @@ -61,10 +63,18 @@ struct GTSAM_EXPORT PreintegrationCombinedParams : PreintegrationParams { Matrix3 biasOmegaCovariance; ///< continuous-time "Covariance" describing gyroscope bias random walk Matrix6 biasAccOmegaInt; ///< covariance of bias used for pre-integration + /// Default constructor makes uninitialized params struct. + /// Used for serialization. + PreintegrationCombinedParams() + : biasAccCovariance(I_3x3), + biasOmegaCovariance(I_3x3), + biasAccOmegaInt(I_6x6) {} + /// See two named constructors below for good values of n_gravity in body frame -PreintegrationCombinedParams(const Vector3& n_gravity) : - PreintegrationParams(n_gravity), biasAccCovariance(I_3x3), biasOmegaCovariance( - I_3x3), biasAccOmegaInt(I_6x6) { + PreintegrationCombinedParams(const Vector3& n_gravity) : + PreintegrationParams(n_gravity), biasAccCovariance(I_3x3), + biasOmegaCovariance(I_3x3), biasAccOmegaInt(I_6x6) { + } // Default Params for a Z-down navigation frame, such as NED: gravity points along positive Z-axis @@ -77,6 +87,9 @@ PreintegrationCombinedParams(const Vector3& n_gravity) : return boost::shared_ptr(new PreintegrationCombinedParams(Vector3(0, 0, -g))); } + void print(const std::string& s="") const override; + bool equals(const PreintegratedRotationParams& other, double tol) const override; + void setBiasAccCovariance(const Matrix3& cov) { biasAccCovariance=cov; } void setBiasOmegaCovariance(const Matrix3& cov) { biasOmegaCovariance=cov; } void setBiasAccOmegaInt(const Matrix6& cov) { biasAccOmegaInt=cov; } @@ -86,24 +99,22 @@ PreintegrationCombinedParams(const Vector3& n_gravity) : const Matrix6& getBiasAccOmegaInt() const { return biasAccOmegaInt; } private: - /// Default constructor makes unitialized params struct - PreintegrationCombinedParams() {} /** Serialization function */ friend class boost::serialization::access; template - void serialize(ARCHIVE& ar, const unsigned int /*version*/) { - ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegratedRotation::Params); - ar& BOOST_SERIALIZATION_NVP(biasAccCovariance); - ar& BOOST_SERIALIZATION_NVP(biasOmegaCovariance); - ar& BOOST_SERIALIZATION_NVP(biasAccOmegaInt); + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + namespace bs = ::boost::serialization; + ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationParams); + ar & BOOST_SERIALIZATION_NVP(biasAccCovariance); + ar & BOOST_SERIALIZATION_NVP(biasOmegaCovariance); + ar & BOOST_SERIALIZATION_NVP(biasAccOmegaInt); } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; - /** * PreintegratedCombinedMeasurements integrates the IMU measurements * (rotation rates and accelerations) and the corresponding covariance matrix. @@ -128,19 +139,21 @@ class GTSAM_EXPORT PreintegratedCombinedMeasurements : public PreintegrationType */ Eigen::Matrix preintMeasCov_; - friend class CombinedImuFactor; public: /// @name Constructors /// @{ - /// Default constructor only for serialization and Cython wrapper - PreintegratedCombinedMeasurements() {} + /// Default constructor only for serialization and wrappers + PreintegratedCombinedMeasurements() { + preintMeasCov_.setZero(); + } /** * Default constructor, initializes the class with no measurements - * @param bias Current estimate of acceleration and rotation rate biases + * @param p Parameters, typically fixed in a single application + * @param biasHat Current estimate of acceleration and rotation rate biases */ PreintegratedCombinedMeasurements( const boost::shared_ptr& p, @@ -149,6 +162,19 @@ class GTSAM_EXPORT PreintegratedCombinedMeasurements : public PreintegrationType preintMeasCov_.setZero(); } + /** + * Construct preintegrated directly from members: base class and preintMeasCov + * @param base PreintegrationType instance + * @param preintMeasCov Covariance matrix used in noise model. + */ + PreintegratedCombinedMeasurements(const PreintegrationType& base, const Eigen::Matrix& preintMeasCov) + : PreintegrationType(base), + preintMeasCov_(preintMeasCov) { + } + + /// Virtual destructor + virtual ~PreintegratedCombinedMeasurements() {} + /// @} /// @name Basic utilities @@ -158,20 +184,25 @@ class GTSAM_EXPORT PreintegratedCombinedMeasurements : public PreintegrationType void resetIntegration() override; /// const reference to params, shadows definition in base class - Params& p() const { return *boost::static_pointer_cast(this->p_);} + Params& p() const { return *boost::static_pointer_cast(this->p_); } /// @} /// @name Access instance variables /// @{ + /// Return pre-integrated measurement covariance Matrix preintMeasCov() const { return preintMeasCov_; } /// @} /// @name Testable /// @{ + /// print void print(const std::string& s = "Preintegrated Measurements:") const override; - bool equals(const PreintegratedCombinedMeasurements& expected, double tol = 1e-9) const; + /// equals + bool equals(const PreintegratedCombinedMeasurements& expected, + double tol = 1e-9) const; /// @} + /// @name Main functionality /// @{ @@ -189,28 +220,18 @@ class GTSAM_EXPORT PreintegratedCombinedMeasurements : public PreintegrationType /// @} -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// deprecated constructor - /// NOTE(frank): assumes Z-Down convention, only second order integration supported - PreintegratedCombinedMeasurements(const imuBias::ConstantBias& biasHat, - const Matrix3& measuredAccCovariance, - const Matrix3& measuredOmegaCovariance, - const Matrix3& integrationErrorCovariance, - const Matrix3& biasAccCovariance, const Matrix3& biasOmegaCovariance, - const Matrix6& biasAccOmegaInt, const bool use2ndOrderIntegration = true); -#endif - private: /// Serialization function friend class boost::serialization::access; template void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + namespace bs = ::boost::serialization; ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationType); ar& BOOST_SERIALIZATION_NVP(preintMeasCov_); } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; /** @@ -244,9 +265,6 @@ class GTSAM_EXPORT CombinedImuFactor: public NoiseModelFactor6 shared_ptr; #endif + /** Default constructor - only use for serialization */ + CombinedImuFactor() {} + /** * Constructor * @param pose_i Previous pose key @@ -273,16 +294,21 @@ class GTSAM_EXPORT CombinedImuFactor: public NoiseModelFactor6 H1 = boost::none, boost::optional H2 = boost::none, boost::optional H3 = boost::none, boost::optional H4 = boost::none, boost::optional H5 = - boost::none, boost::optional H6 = boost::none) const; - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @deprecated typename - typedef gtsam::PreintegratedCombinedMeasurements CombinedPreintegratedMeasurements; - - /// @deprecated constructor - CombinedImuFactor(Key pose_i, Key vel_i, Key pose_j, Key vel_j, Key bias_i, - Key bias_j, const CombinedPreintegratedMeasurements& pim, - const Vector3& n_gravity, const Vector3& omegaCoriolis, - const boost::optional& body_P_sensor = boost::none, - const bool use2ndOrderCoriolis = false); - - // @deprecated use PreintegrationBase::predict - static void Predict(const Pose3& pose_i, const Vector3& vel_i, Pose3& pose_j, - Vector3& vel_j, const imuBias::ConstantBias& bias_i, - CombinedPreintegratedMeasurements& pim, - const Vector3& n_gravity, const Vector3& omegaCoriolis, - const bool use2ndOrderCoriolis = false); -#endif - -private: + boost::none, boost::optional H6 = boost::none) const override; + private: /** Serialization function */ friend class boost::serialization::access; - template - void serialize(ARCHIVE & ar, const unsigned int /*version*/) { - ar & boost::serialization::make_nvp("NoiseModelFactor6", - boost::serialization::base_object(*this)); - ar & BOOST_SERIALIZATION_NVP(_PIM_); + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(NoiseModelFactor6); + ar& BOOST_SERIALIZATION_NVP(_PIM_); } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; // class CombinedImuFactor -} /// namespace gtsam +template <> +struct traits + : public Testable {}; + +template <> +struct traits + : public Testable {}; + +template <> +struct traits : public Testable {}; + +} // namespace gtsam + +/// Add Boost serialization export key (declaration) for derived class +BOOST_CLASS_EXPORT_KEY(gtsam::PreintegrationCombinedParams); diff --git a/gtsam/navigation/GPSFactor.h b/gtsam/navigation/GPSFactor.h index 2b5ea1f2f..e6d5b88a9 100644 --- a/gtsam/navigation/GPSFactor.h +++ b/gtsam/navigation/GPSFactor.h @@ -65,21 +65,21 @@ class GTSAM_EXPORT GPSFactor: public NoiseModelFactor1 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /// print - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const; + void print(const std::string& s, const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; /// equals - virtual bool equals(const NonlinearFactor& expected, double tol = 1e-9) const; + bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /// vector of errors Vector evaluateError(const Pose3& p, - boost::optional H = boost::none) const; + boost::optional H = boost::none) const override; inline const Point3 & measurementIn() const { return nT_; @@ -137,21 +137,21 @@ class GTSAM_EXPORT GPSFactor2: public NoiseModelFactor1 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /// print - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const; + void print(const std::string& s, const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override; /// equals - virtual bool equals(const NonlinearFactor& expected, double tol = 1e-9) const; + bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /// vector of errors Vector evaluateError(const NavState& p, - boost::optional H = boost::none) const; + boost::optional H = boost::none) const override; inline const Point3 & measurementIn() const { return nT_; diff --git a/gtsam/navigation/ImuBias.h b/gtsam/navigation/ImuBias.h index ff1a53025..fad952232 100644 --- a/gtsam/navigation/ImuBias.h +++ b/gtsam/navigation/ImuBias.h @@ -29,8 +29,8 @@ namespace imuBias { class GTSAM_EXPORT ConstantBias { private: - Vector3 biasAcc_; - Vector3 biasGyro_; + Vector3 biasAcc_; ///< The units for stddev are σ = m/s² or m √Hz/s² + Vector3 biasGyro_; ///< The units for stddev are σ = rad/s or rad √Hz/s public: /// dimension of the variable - used to autodetect sizes @@ -171,7 +171,7 @@ class GTSAM_EXPORT ConstantBias { public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW /// @} }; // ConstantBias class diff --git a/gtsam/navigation/ImuFactor.cpp b/gtsam/navigation/ImuFactor.cpp index 582d96acb..cebddf05d 100644 --- a/gtsam/navigation/ImuFactor.cpp +++ b/gtsam/navigation/ImuFactor.cpp @@ -106,32 +106,6 @@ void PreintegratedImuMeasurements::mergeWith(const PreintegratedImuMeasurements& preintMeasCov_ = P + *H2 * pim12.preintMeasCov_ * H2->transpose(); } #endif -//------------------------------------------------------------------------------ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -PreintegratedImuMeasurements::PreintegratedImuMeasurements( - const imuBias::ConstantBias& biasHat, const Matrix3& measuredAccCovariance, - const Matrix3& measuredOmegaCovariance, - const Matrix3& integrationErrorCovariance, bool use2ndOrderIntegration) { - if (!use2ndOrderIntegration) - throw("PreintegratedImuMeasurements no longer supports first-order integration: it incorrectly compensated for gravity"); - biasHat_ = biasHat; - boost::shared_ptr p = Params::MakeSharedD(); - p->gyroscopeCovariance = measuredOmegaCovariance; - p->accelerometerCovariance = measuredAccCovariance; - p->integrationCovariance = integrationErrorCovariance; - p_ = p; - resetIntegration(); -} - -void PreintegratedImuMeasurements::integrateMeasurement( - const Vector3& measuredAcc, const Vector3& measuredOmega, double deltaT, - boost::optional body_P_sensor) { - // modify parameters to accommodate deprecated method:-( - p_->body_P_sensor = body_P_sensor; - integrateMeasurement(measuredAcc, measuredOmega, deltaT); -} -#endif - //------------------------------------------------------------------------------ // ImuFactor methods //------------------------------------------------------------------------------ @@ -156,10 +130,10 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor& f) { //------------------------------------------------------------------------------ void ImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "ImuFactor(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << "," - << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) - << ")\n"; + cout << (s == "" ? s : s + "\n") << "ImuFactor(" << keyFormatter(this->key1()) + << "," << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) + << "," << keyFormatter(this->key4()) << "," << keyFormatter(this->key5()) + << ")\n"; cout << *this << endl; } @@ -218,43 +192,15 @@ ImuFactor::shared_ptr ImuFactor::Merge(const shared_ptr& f01, // return new factor auto pim02 = Merge(f01->preintegratedMeasurements(), f12->preintegratedMeasurements()); - return boost::make_shared(f01->key1(),// P0 - f01->key2(),// V0 - f12->key3(),// P2 - f12->key4(),// V2 - f01->key5(),// B + return boost::make_shared(f01->key1(), // P0 + f01->key2(), // V0 + f12->key3(), // P2 + f12->key4(), // V2 + f01->key5(), // B pim02); } #endif -//------------------------------------------------------------------------------ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -ImuFactor::ImuFactor(Key pose_i, Key vel_i, Key pose_j, Key vel_j, Key bias, - const PreintegratedImuMeasurements& pim, const Vector3& n_gravity, - const Vector3& omegaCoriolis, const boost::optional& body_P_sensor, - const bool use2ndOrderCoriolis) : -Base(noiseModel::Gaussian::Covariance(pim.preintMeasCov_), pose_i, vel_i, - pose_j, vel_j, bias), _PIM_(pim) { - boost::shared_ptr p = boost::make_shared< - PreintegrationParams>(pim.p()); - p->n_gravity = n_gravity; - p->omegaCoriolis = omegaCoriolis; - p->body_P_sensor = body_P_sensor; - p->use2ndOrderCoriolis = use2ndOrderCoriolis; - _PIM_.p_ = p; -} - -void ImuFactor::Predict(const Pose3& pose_i, const Vector3& vel_i, - Pose3& pose_j, Vector3& vel_j, const imuBias::ConstantBias& bias_i, - PreintegratedImuMeasurements& pim, const Vector3& n_gravity, - const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis) { - // use deprecated predict - PoseVelocityBias pvb = pim.predict(pose_i, vel_i, bias_i, n_gravity, - omegaCoriolis, use2ndOrderCoriolis); - pose_j = pvb.pose; - vel_j = pvb.velocity; -} -#endif //------------------------------------------------------------------------------ // ImuFactor2 methods //------------------------------------------------------------------------------ @@ -280,9 +226,9 @@ std::ostream& operator<<(std::ostream& os, const ImuFactor2& f) { //------------------------------------------------------------------------------ void ImuFactor2::print(const string& s, const KeyFormatter& keyFormatter) const { - cout << s << "ImuFactor2(" << keyFormatter(this->key1()) << "," - << keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) - << ")\n"; + cout << (s == "" ? s : s + "\n") << "ImuFactor2(" + << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << "," + << keyFormatter(this->key3()) << ")\n"; cout << *this << endl; } diff --git a/gtsam/navigation/ImuFactor.h b/gtsam/navigation/ImuFactor.h index 8e3f8f0a4..cd9c591f1 100644 --- a/gtsam/navigation/ImuFactor.h +++ b/gtsam/navigation/ImuFactor.h @@ -59,7 +59,7 @@ typedef ManifoldPreintegration PreintegrationType; */ /** - * PreintegratedIMUMeasurements accumulates (integrates) the IMU measurements + * PreintegratedImuMeasurements accumulates (integrates) the IMU measurements * (rotation rates and accelerations) and the corresponding covariance matrix. * The measurements are then used to build the Preintegrated IMU factor. * Integration is done incrementally (ideally, one integrates the measurement @@ -80,15 +80,15 @@ class GTSAM_EXPORT PreintegratedImuMeasurements: public PreintegrationType { public: - /// Default constructor for serialization and Cython wrapper + /// Default constructor for serialization and wrappers PreintegratedImuMeasurements() { preintMeasCov_.setZero(); } /** * Constructor, initializes the class with no measurements - * @param bias Current estimate of acceleration and rotation rate biases - * @param p Parameters, typically fixed in a single application + * @param p Parameters, typically fixed in a single application + * @param biasHat Current estimate of acceleration and rotation rate biases */ PreintegratedImuMeasurements(const boost::shared_ptr& p, const imuBias::ConstantBias& biasHat = imuBias::ConstantBias()) : @@ -140,31 +140,14 @@ class GTSAM_EXPORT PreintegratedImuMeasurements: public PreintegrationType { void mergeWith(const PreintegratedImuMeasurements& pim, Matrix9* H1, Matrix9* H2); #endif -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @deprecated constructor - /// NOTE(frank): assumes Z-Down convention, only second order integration supported - PreintegratedImuMeasurements(const imuBias::ConstantBias& biasHat, - const Matrix3& measuredAccCovariance, - const Matrix3& measuredOmegaCovariance, - const Matrix3& integrationErrorCovariance, - bool use2ndOrderIntegration = true); - - /// @deprecated version of integrateMeasurement with body_P_sensor - /// Use parameters instead - void integrateMeasurement(const Vector3& measuredAcc, - const Vector3& measuredOmega, double dt, - boost::optional body_P_sensor); -#endif - -private: - + private: /// Serialization function friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { namespace bs = ::boost::serialization; ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationType); - ar & bs::make_nvp("preintMeasCov_", bs::make_array(preintMeasCov_.data(), preintMeasCov_.size())); + ar & BOOST_SERIALIZATION_NVP(preintMeasCov_); } }; @@ -217,14 +200,14 @@ class GTSAM_EXPORT ImuFactor: public NoiseModelFactor5 H1 = boost::none, boost::optional H2 = boost::none, boost::optional H3 = boost::none, boost::optional H4 = - boost::none, boost::optional H5 = boost::none) const; + boost::none, boost::optional H5 = boost::none) const override; #ifdef GTSAM_TANGENT_PREINTEGRATION /// Merge two pre-integrated measurement classes @@ -253,27 +236,7 @@ class GTSAM_EXPORT ImuFactor: public NoiseModelFactor5& body_P_sensor = boost::none, - const bool use2ndOrderCoriolis = false); - - /// @deprecated use PreintegrationBase::predict, - /// in the new one gravity, coriolis settings are in PreintegrationParams - static void Predict(const Pose3& pose_i, const Vector3& vel_i, Pose3& pose_j, - Vector3& vel_j, const imuBias::ConstantBias& bias_i, - PreintegratedMeasurements& pim, const Vector3& n_gravity, - const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis = false); -#endif - -private: - + private: /** Serialization function */ friend class boost::serialization::access; template @@ -315,14 +278,14 @@ class GTSAM_EXPORT ImuFactor2 : public NoiseModelFactor3 H1 = boost::none, boost::optional H2 = boost::none, - boost::optional H3 = boost::none) const; + boost::optional H3 = boost::none) const override; private: diff --git a/gtsam/navigation/MagFactor.h b/gtsam/navigation/MagFactor.h index 3875391d0..74e9177d5 100644 --- a/gtsam/navigation/MagFactor.h +++ b/gtsam/navigation/MagFactor.h @@ -53,7 +53,7 @@ class MagFactor: public NoiseModelFactor1 { } /// @return a deep copy of this factor - virtual NonlinearFactor::shared_ptr clone() const { + NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( NonlinearFactor::shared_ptr(new MagFactor(*this))); } @@ -73,7 +73,7 @@ class MagFactor: public NoiseModelFactor1 { * @brief vector of errors */ Vector evaluateError(const Rot2& nRb, - boost::optional H = boost::none) const { + boost::optional H = boost::none) const override { // measured bM = nRb� * nM + b Point3 hx = unrotate(nRb, nM_, H) + bias_; return (hx - measured_); @@ -102,7 +102,7 @@ class MagFactor1: public NoiseModelFactor1 { } /// @return a deep copy of this factor - virtual NonlinearFactor::shared_ptr clone() const { + NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( NonlinearFactor::shared_ptr(new MagFactor1(*this))); } @@ -111,7 +111,7 @@ class MagFactor1: public NoiseModelFactor1 { * @brief vector of errors */ Vector evaluateError(const Rot3& nRb, - boost::optional H = boost::none) const { + boost::optional H = boost::none) const override { // measured bM = nRb� * nM + b Point3 hx = nRb.unrotate(nM_, H, boost::none) + bias_; return (hx - measured_); @@ -138,7 +138,7 @@ class MagFactor2: public NoiseModelFactor2 { } /// @return a deep copy of this factor - virtual NonlinearFactor::shared_ptr clone() const { + NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( NonlinearFactor::shared_ptr(new MagFactor2(*this))); } @@ -150,7 +150,7 @@ class MagFactor2: public NoiseModelFactor2 { */ Vector evaluateError(const Point3& nM, const Point3& bias, boost::optional H1 = boost::none, boost::optional H2 = - boost::none) const { + boost::none) const override { // measured bM = nRb� * nM + b, where b is unknown bias Point3 hx = bRn_.rotate(nM, boost::none, H1) + bias; if (H2) @@ -179,7 +179,7 @@ class MagFactor3: public NoiseModelFactor3 { } /// @return a deep copy of this factor - virtual NonlinearFactor::shared_ptr clone() const { + NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( NonlinearFactor::shared_ptr(new MagFactor3(*this))); } @@ -192,7 +192,7 @@ class MagFactor3: public NoiseModelFactor3 { Vector evaluateError(const double& scale, const Unit3& direction, const Point3& bias, boost::optional H1 = boost::none, boost::optional H2 = boost::none, boost::optional H3 = - boost::none) const { + boost::none) const override { // measured bM = nRb� * nM + b, where b is unknown bias Unit3 rotated = bRn_.rotate(direction, boost::none, H2); Point3 hx = scale * rotated.point3() + bias; diff --git a/gtsam/navigation/ManifoldPreintegration.h b/gtsam/navigation/ManifoldPreintegration.h index 22897b9d4..a290972e4 100644 --- a/gtsam/navigation/ManifoldPreintegration.h +++ b/gtsam/navigation/ManifoldPreintegration.h @@ -118,15 +118,13 @@ class GTSAM_EXPORT ManifoldPreintegration : public PreintegrationBase { template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { namespace bs = ::boost::serialization; - ar & BOOST_SERIALIZATION_NVP(p_); - ar & BOOST_SERIALIZATION_NVP(deltaTij_); + ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationBase); ar & BOOST_SERIALIZATION_NVP(deltaXij_); - ar & BOOST_SERIALIZATION_NVP(biasHat_); - ar & bs::make_nvp("delRdelBiasOmega_", bs::make_array(delRdelBiasOmega_.data(), delRdelBiasOmega_.size())); - ar & bs::make_nvp("delPdelBiasAcc_", bs::make_array(delPdelBiasAcc_.data(), delPdelBiasAcc_.size())); - ar & bs::make_nvp("delPdelBiasOmega_", bs::make_array(delPdelBiasOmega_.data(), delPdelBiasOmega_.size())); - ar & bs::make_nvp("delVdelBiasAcc_", bs::make_array(delVdelBiasAcc_.data(), delVdelBiasAcc_.size())); - ar & bs::make_nvp("delVdelBiasOmega_", bs::make_array(delVdelBiasOmega_.data(), delVdelBiasOmega_.size())); + ar & BOOST_SERIALIZATION_NVP(delRdelBiasOmega_); + ar & BOOST_SERIALIZATION_NVP(delPdelBiasAcc_); + ar & BOOST_SERIALIZATION_NVP(delPdelBiasOmega_); + ar & BOOST_SERIALIZATION_NVP(delVdelBiasAcc_); + ar & BOOST_SERIALIZATION_NVP(delVdelBiasOmega_); } }; diff --git a/gtsam/navigation/NavState.cpp b/gtsam/navigation/NavState.cpp index 50949c761..0181ea45f 100644 --- a/gtsam/navigation/NavState.cpp +++ b/gtsam/navigation/NavState.cpp @@ -88,15 +88,15 @@ Matrix7 NavState::matrix() const { //------------------------------------------------------------------------------ ostream& operator<<(ostream& os, const NavState& state) { - os << "R:" << state.attitude(); - os << "p:" << state.position() << endl; - os << "v:" << Point3(state.velocity()) << endl; + os << "R: " << state.attitude() << "\n"; + os << "p: " << state.position().transpose() << "\n"; + os << "v: " << state.velocity().transpose(); return os; } //------------------------------------------------------------------------------ void NavState::print(const string& s) const { - cout << s << *this << endl; + cout << (s.empty() ? s : s + " ") << *this << endl; } //------------------------------------------------------------------------------ @@ -218,28 +218,37 @@ Vector9 NavState::coriolis(double dt, const Vector3& omega, bool secondOrder, const double dt2 = dt * dt; const Vector3 omega_cross_vel = omega.cross(n_v); - Vector9 xi; - Matrix3 D_dP_R; - dR(xi) << nRb.unrotate((-dt) * omega, H ? &D_dP_R : 0); - dP(xi) << ((-dt2) * omega_cross_vel); // NOTE(luca): we got rid of the 2 wrt INS paper - dV(xi) << ((-2.0 * dt) * omega_cross_vel); + // Get perturbations in nav frame + Vector9 n_xi, xi; + Matrix3 D_dR_R, D_dP_R, D_dV_R, D_body_nav; + dR(n_xi) << ((-dt) * omega); + dP(n_xi) << ((-dt2) * omega_cross_vel); // NOTE(luca): we got rid of the 2 wrt INS paper + dV(n_xi) << ((-2.0 * dt) * omega_cross_vel); if (secondOrder) { const Vector3 omega_cross2_t = omega.cross(omega.cross(n_t)); - dP(xi) -= (0.5 * dt2) * omega_cross2_t; - dV(xi) -= dt * omega_cross2_t; + dP(n_xi) -= (0.5 * dt2) * omega_cross2_t; + dV(n_xi) -= dt * omega_cross2_t; } + + // Transform n_xi into the body frame + xi << nRb.unrotate(dR(n_xi), H ? &D_dR_R : 0, H ? &D_body_nav : 0), + nRb.unrotate(dP(n_xi), H ? &D_dP_R : 0), + nRb.unrotate(dV(n_xi), H ? &D_dV_R : 0); + if (H) { H->setZero(); const Matrix3 Omega = skewSymmetric(omega); const Matrix3 D_cross_state = Omega * R(); H->setZero(); - D_R_R(H) << D_dP_R; - D_t_v(H) << (-dt2) * D_cross_state; - D_v_v(H) << (-2.0 * dt) * D_cross_state; + D_R_R(H) << D_dR_R; + D_t_v(H) << D_body_nav * (-dt2) * D_cross_state; + D_t_R(H) << D_dP_R; + D_v_v(H) << D_body_nav * (-2.0 * dt) * D_cross_state; + D_v_R(H) << D_dV_R; if (secondOrder) { const Matrix3 D_cross2_state = Omega * D_cross_state; - D_t_t(H) -= (0.5 * dt2) * D_cross2_state; - D_v_t(H) -= dt * D_cross2_state; + D_t_t(H) -= D_body_nav * (0.5 * dt2) * D_cross2_state; + D_v_t(H) -= D_body_nav * dt * D_cross2_state; } } return xi; diff --git a/gtsam/navigation/PreintegratedRotation.cpp b/gtsam/navigation/PreintegratedRotation.cpp index 8c29d85dd..f827c7c59 100644 --- a/gtsam/navigation/PreintegratedRotation.cpp +++ b/gtsam/navigation/PreintegratedRotation.cpp @@ -25,17 +25,16 @@ using namespace std; namespace gtsam { -void PreintegratedRotation::Params::print(const string& s) const { - cout << s << endl; +void PreintegratedRotationParams::print(const string& s) const { + cout << (s == "" ? s : s + "\n") << endl; cout << "gyroscopeCovariance:\n[\n" << gyroscopeCovariance << "\n]" << endl; if (omegaCoriolis) cout << "omegaCoriolis = (" << omegaCoriolis->transpose() << ")" << endl; - if (body_P_sensor) - body_P_sensor->print("body_P_sensor"); + if (body_P_sensor) body_P_sensor->print("body_P_sensor"); } -bool PreintegratedRotation::Params::equals( - const PreintegratedRotation::Params& other, double tol) const { +bool PreintegratedRotationParams::equals( + const PreintegratedRotationParams& other, double tol) const { if (body_P_sensor) { if (!other.body_P_sensor || !assert_equal(*body_P_sensor, *other.body_P_sensor, tol)) diff --git a/gtsam/navigation/PreintegratedRotation.h b/gtsam/navigation/PreintegratedRotation.h index 71ef5d08f..e52d28e1e 100644 --- a/gtsam/navigation/PreintegratedRotation.h +++ b/gtsam/navigation/PreintegratedRotation.h @@ -29,7 +29,9 @@ namespace gtsam { /// Parameters for pre-integration: /// Usage: Create just a single Params and pass a shared pointer to the constructor struct GTSAM_EXPORT PreintegratedRotationParams { - Matrix3 gyroscopeCovariance; ///< continuous-time "Covariance" of gyroscope measurements + /// Continuous-time "Covariance" of gyroscope measurements + /// The units for stddev are σ = rad/s/√Hz + Matrix3 gyroscopeCovariance; boost::optional omegaCoriolis; ///< Coriolis constant boost::optional body_P_sensor; ///< The pose of the sensor in the body frame @@ -61,15 +63,21 @@ struct GTSAM_EXPORT PreintegratedRotationParams { template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { namespace bs = ::boost::serialization; - ar & bs::make_nvp("gyroscopeCovariance", bs::make_array(gyroscopeCovariance.data(), gyroscopeCovariance.size())); - ar & BOOST_SERIALIZATION_NVP(omegaCoriolis); + ar & BOOST_SERIALIZATION_NVP(gyroscopeCovariance); ar & BOOST_SERIALIZATION_NVP(body_P_sensor); + + // Provide support for Eigen::Matrix in boost::optional + bool omegaCoriolisFlag = omegaCoriolis.is_initialized(); + ar & boost::serialization::make_nvp("omegaCoriolisFlag", omegaCoriolisFlag); + if (omegaCoriolisFlag) { + ar & BOOST_SERIALIZATION_NVP(*omegaCoriolis); + } } #ifdef GTSAM_USE_QUATERNIONS // Align if we are using Quaternions public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW #endif }; @@ -182,7 +190,7 @@ class GTSAM_EXPORT PreintegratedRotation { #ifdef GTSAM_USE_QUATERNIONS // Align if we are using Quaternions public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW #endif }; diff --git a/gtsam/navigation/PreintegrationBase.cpp b/gtsam/navigation/PreintegrationBase.cpp index 4af752ac0..111594663 100644 --- a/gtsam/navigation/PreintegrationBase.cpp +++ b/gtsam/navigation/PreintegrationBase.cpp @@ -46,7 +46,7 @@ ostream& operator<<(ostream& os, const PreintegrationBase& pim) { //------------------------------------------------------------------------------ void PreintegrationBase::print(const string& s) const { - cout << s << *this << endl; + cout << (s == "" ? s : s + "\n") << *this << endl; } //------------------------------------------------------------------------------ @@ -193,22 +193,6 @@ Vector9 PreintegrationBase::computeErrorAndJacobians(const Pose3& pose_i, return error; } -//------------------------------------------------------------------------------ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -PoseVelocityBias PreintegrationBase::predict(const Pose3& pose_i, - const Vector3& vel_i, const imuBias::ConstantBias& bias_i, - const Vector3& n_gravity, const Vector3& omegaCoriolis, - const bool use2ndOrderCoriolis) const { -// NOTE(frank): parameters are supposed to be constant, below is only provided for compatibility - boost::shared_ptr q = boost::make_shared(p()); - q->n_gravity = n_gravity; - q->omegaCoriolis = omegaCoriolis; - q->use2ndOrderCoriolis = use2ndOrderCoriolis; - p_ = q; - return PoseVelocityBias(predict(NavState(pose_i, vel_i), bias_i), bias_i); -} - -#endif //------------------------------------------------------------------------------ } // namespace gtsam diff --git a/gtsam/navigation/PreintegrationBase.h b/gtsam/navigation/PreintegrationBase.h index 9926d207a..e118a6232 100644 --- a/gtsam/navigation/PreintegrationBase.h +++ b/gtsam/navigation/PreintegrationBase.h @@ -32,25 +32,6 @@ namespace gtsam { -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -/// @deprecated -struct PoseVelocityBias { - Pose3 pose; - Vector3 velocity; - imuBias::ConstantBias bias; - PoseVelocityBias(const Pose3& _pose, const Vector3& _velocity, - const imuBias::ConstantBias _bias) : - pose(_pose), velocity(_velocity), bias(_bias) { - } - PoseVelocityBias(const NavState& navState, const imuBias::ConstantBias _bias) : - pose(navState.pose()), velocity(navState.velocity()), bias(_bias) { - } - NavState navState() const { - return NavState(pose, velocity); - } -}; -#endif - /** * PreintegrationBase is the base class for PreintegratedMeasurements * (in ImuFactor) and CombinedPreintegratedMeasurements (in CombinedImuFactor). @@ -63,11 +44,6 @@ class GTSAM_EXPORT PreintegrationBase { typedef PreintegrationParams Params; protected: - /// Parameters. Declared mutable only for deprecated predict method. - /// TODO(frank): make const once deprecated method is removed -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - mutable -#endif boost::shared_ptr p_; /// Acceleration and gyro bias used for preintegration @@ -117,16 +93,11 @@ class GTSAM_EXPORT PreintegrationBase { } /// const reference to params - const Params& p() const { - return *boost::static_pointer_cast(p_); + Params& p() const { + return *p_; } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - void set_body_P_sensor(const Pose3& body_P_sensor) { - p_->body_P_sensor = body_P_sensor; - } -#endif -/// @} + /// @} /// @name Instance variables access /// @{ @@ -145,7 +116,7 @@ class GTSAM_EXPORT PreintegrationBase { /// @name Testable /// @{ GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const PreintegrationBase& pim); - virtual void print(const std::string& s) const; + virtual void print(const std::string& s="") const; /// @} /// @name Main functionality @@ -201,20 +172,18 @@ class GTSAM_EXPORT PreintegrationBase { OptionalJacobian<9, 6> H3 = boost::none, OptionalJacobian<9, 3> H4 = boost::none, OptionalJacobian<9, 6> H5 = boost::none) const; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - - /// @deprecated predict - PoseVelocityBias predict(const Pose3& pose_i, const Vector3& vel_i, - const imuBias::ConstantBias& bias_i, const Vector3& n_gravity, - const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis = false) const; - - /// @} -#endif + private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + ar & BOOST_SERIALIZATION_NVP(p_); + ar & BOOST_SERIALIZATION_NVP(biasHat_); + ar & BOOST_SERIALIZATION_NVP(deltaTij_); + } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; } /// namespace gtsam diff --git a/gtsam/navigation/PreintegrationParams.cpp b/gtsam/navigation/PreintegrationParams.cpp index 61cd1617c..2298bb696 100644 --- a/gtsam/navigation/PreintegrationParams.cpp +++ b/gtsam/navigation/PreintegrationParams.cpp @@ -27,7 +27,7 @@ namespace gtsam { //------------------------------------------------------------------------------ void PreintegrationParams::print(const string& s) const { - PreintegratedRotation::Params::print(s); + PreintegratedRotationParams::print(s); cout << "accelerometerCovariance:\n[\n" << accelerometerCovariance << "\n]" << endl; cout << "integrationCovariance:\n[\n" << integrationCovariance << "\n]" @@ -39,10 +39,10 @@ void PreintegrationParams::print(const string& s) const { } //------------------------------------------------------------------------------ -bool PreintegrationParams::equals(const PreintegratedRotation::Params& other, +bool PreintegrationParams::equals(const PreintegratedRotationParams& other, double tol) const { auto e = dynamic_cast(&other); - return e != nullptr && PreintegratedRotation::Params::equals(other, tol) && + return e != nullptr && PreintegratedRotationParams::equals(other, tol) && use2ndOrderCoriolis == e->use2ndOrderCoriolis && equal_with_abs_tol(accelerometerCovariance, e->accelerometerCovariance, tol) && diff --git a/gtsam/navigation/PreintegrationParams.h b/gtsam/navigation/PreintegrationParams.h index 962fef277..960f67e24 100644 --- a/gtsam/navigation/PreintegrationParams.h +++ b/gtsam/navigation/PreintegrationParams.h @@ -24,14 +24,17 @@ namespace gtsam { /// Parameters for pre-integration: /// Usage: Create just a single Params and pass a shared pointer to the constructor struct GTSAM_EXPORT PreintegrationParams: PreintegratedRotationParams { - Matrix3 accelerometerCovariance; ///< continuous-time "Covariance" of accelerometer + /// Continuous-time "Covariance" of accelerometer + /// The units for stddev are σ = m/s²/√Hz + Matrix3 accelerometerCovariance; Matrix3 integrationCovariance; ///< continuous-time "Covariance" describing integration uncertainty bool use2ndOrderCoriolis; ///< Whether to use second order Coriolis integration Vector3 n_gravity; ///< Gravity vector in nav frame /// Default constructor for serialization only PreintegrationParams() - : accelerometerCovariance(I_3x3), + : PreintegratedRotationParams(), + accelerometerCovariance(I_3x3), integrationCovariance(I_3x3), use2ndOrderCoriolis(false), n_gravity(0, 0, -1) {} @@ -39,7 +42,8 @@ struct GTSAM_EXPORT PreintegrationParams: PreintegratedRotationParams { /// The Params constructor insists on getting the navigation frame gravity vector /// For convenience, two commonly used conventions are provided by named constructors below PreintegrationParams(const Vector3& n_gravity) - : accelerometerCovariance(I_3x3), + : PreintegratedRotationParams(), + accelerometerCovariance(I_3x3), integrationCovariance(I_3x3), use2ndOrderCoriolis(false), n_gravity(n_gravity) {} @@ -54,8 +58,8 @@ struct GTSAM_EXPORT PreintegrationParams: PreintegratedRotationParams { return boost::shared_ptr(new PreintegrationParams(Vector3(0, 0, -g))); } - void print(const std::string& s) const; - bool equals(const PreintegratedRotation::Params& other, double tol) const; + void print(const std::string& s="") const override; + bool equals(const PreintegratedRotationParams& other, double tol) const override; void setAccelerometerCovariance(const Matrix3& cov) { accelerometerCovariance = cov; } void setIntegrationCovariance(const Matrix3& cov) { integrationCovariance = cov; } @@ -73,10 +77,9 @@ struct GTSAM_EXPORT PreintegrationParams: PreintegratedRotationParams { template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { namespace bs = ::boost::serialization; - ar & boost::serialization::make_nvp("PreintegratedRotation_Params", - boost::serialization::base_object(*this)); - ar & bs::make_nvp("accelerometerCovariance", bs::make_array(accelerometerCovariance.data(), accelerometerCovariance.size())); - ar & bs::make_nvp("integrationCovariance", bs::make_array(integrationCovariance.data(), integrationCovariance.size())); + ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegratedRotationParams); + ar & BOOST_SERIALIZATION_NVP(accelerometerCovariance); + ar & BOOST_SERIALIZATION_NVP(integrationCovariance); ar & BOOST_SERIALIZATION_NVP(use2ndOrderCoriolis); ar & BOOST_SERIALIZATION_NVP(n_gravity); } @@ -84,7 +87,7 @@ struct GTSAM_EXPORT PreintegrationParams: PreintegratedRotationParams { #ifdef GTSAM_USE_QUATERNIONS // Align if we are using Quaternions public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW #endif }; diff --git a/gtsam/navigation/ScenarioRunner.h b/gtsam/navigation/ScenarioRunner.h index 649d0fe12..1577e36fe 100644 --- a/gtsam/navigation/ScenarioRunner.h +++ b/gtsam/navigation/ScenarioRunner.h @@ -104,15 +104,6 @@ class GTSAM_EXPORT ScenarioRunner { /// Estimate covariance of sampled noise for sanity-check Matrix6 estimateNoiseCovariance(size_t N = 1000) const; - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - ScenarioRunner(const Scenario* scenario, const SharedParams& p, - double imuSampleTime = 1.0 / 100.0, const Bias& bias = Bias()) - : ScenarioRunner(*scenario, p, imuSampleTime, bias) {} - /// @} -#endif }; } // namespace gtsam diff --git a/gtsam/navigation/TangentPreintegration.cpp b/gtsam/navigation/TangentPreintegration.cpp index 55efdb151..56d7aa6d3 100644 --- a/gtsam/navigation/TangentPreintegration.cpp +++ b/gtsam/navigation/TangentPreintegration.cpp @@ -68,7 +68,7 @@ Vector9 TangentPreintegration::UpdatePreintegrated(const Vector3& a_body, Matrix3 w_tangent_H_theta, invH; const Vector3 w_tangent = // angular velocity mapped back to tangent space local.applyInvDexp(w_body, A ? &w_tangent_H_theta : 0, C ? &invH : 0); - const Rot3 R(local.expmap()); + const Rot3 R(local.expmap()); // nRb: rotation of body in nav frame const Vector3 a_nav = R * a_body; const double dt22 = 0.5 * dt * dt; @@ -110,7 +110,7 @@ void TangentPreintegration::update(const Vector3& measuredAcc, Vector3 acc = biasHat_.correctAccelerometer(measuredAcc); Vector3 omega = biasHat_.correctGyroscope(measuredOmega); - // Possibly correct for sensor pose + // Possibly correct for sensor pose by converting to body frame Matrix3 D_correctedAcc_acc, D_correctedAcc_omega, D_correctedOmega_omega; if (p().body_P_sensor) boost::tie(acc, omega) = correctMeasurementsBySensorPose(acc, omega, diff --git a/gtsam/navigation/TangentPreintegration.h b/gtsam/navigation/TangentPreintegration.h index 11945e53a..1b51b4e1e 100644 --- a/gtsam/navigation/TangentPreintegration.h +++ b/gtsam/navigation/TangentPreintegration.h @@ -132,16 +132,14 @@ class GTSAM_EXPORT TangentPreintegration : public PreintegrationBase { template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { namespace bs = ::boost::serialization; - ar & BOOST_SERIALIZATION_NVP(p_); - ar & BOOST_SERIALIZATION_NVP(biasHat_); - ar & BOOST_SERIALIZATION_NVP(deltaTij_); - ar & bs::make_nvp("preintegrated_", bs::make_array(preintegrated_.data(), preintegrated_.size())); - ar & bs::make_nvp("preintegrated_H_biasAcc_", bs::make_array(preintegrated_H_biasAcc_.data(), preintegrated_H_biasAcc_.size())); - ar & bs::make_nvp("preintegrated_H_biasOmega_", bs::make_array(preintegrated_H_biasOmega_.data(), preintegrated_H_biasOmega_.size())); + ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegrationBase); + ar & BOOST_SERIALIZATION_NVP(preintegrated_); + ar & BOOST_SERIALIZATION_NVP(preintegrated_H_biasAcc_); + ar & BOOST_SERIALIZATION_NVP(preintegrated_H_biasOmega_); } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; } /// namespace gtsam diff --git a/gtsam/navigation/tests/testImuFactorSerialization.cpp b/gtsam/navigation/tests/testImuFactorSerialization.cpp index 59d0ac199..ed72e18e9 100644 --- a/gtsam/navigation/tests/testImuFactorSerialization.cpp +++ b/gtsam/navigation/tests/testImuFactorSerialization.cpp @@ -16,15 +16,19 @@ * @author Frank Dellaert * @author Richard Roberts * @author Stephen Williams + * @author Varun Agrawal */ -#include -#include #include +#include +#include +#include + #include using namespace std; using namespace gtsam; +using namespace gtsam::serializationTestHelpers; BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); @@ -38,23 +42,23 @@ BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); -TEST(ImuFactor, serialization) { - using namespace gtsam::serializationTestHelpers; - +template +P getPreintegratedMeasurements() { // Create default parameters with Z-down and above noise paramaters - auto p = PreintegrationParams::MakeSharedD(9.81); - p->body_P_sensor = Pose3(Rot3::Ypr(0, 0, M_PI), Point3(0,0,0)); + auto p = P::Params::MakeSharedD(9.81); + p->body_P_sensor = Pose3(Rot3::Ypr(0, 0, M_PI), Point3(0, 0, 0)); p->accelerometerCovariance = 1e-7 * I_3x3; p->gyroscopeCovariance = 1e-8 * I_3x3; p->integrationCovariance = 1e-9 * I_3x3; const double deltaT = 0.005; - const imuBias::ConstantBias priorBias( - Vector3(0, 0, 0), Vector3(0, 0.01, 0)); // Biases (acc, rot) - PreintegratedImuMeasurements pim(p, priorBias); + // Biases (acc, rot) + const imuBias::ConstantBias priorBias(Vector3(0, 0, 0), Vector3(0, 0.01, 0)); + + P pim(p, priorBias); - // measurements are needed for non-inf noise model, otherwise will throw err + // measurements are needed for non-inf noise model, otherwise will throw error // when deserialize const Vector3 measuredOmega(0, 0.01, 0); const Vector3 measuredAcc(0, 0, -9.81); @@ -62,6 +66,16 @@ TEST(ImuFactor, serialization) { for (int j = 0; j < 200; ++j) pim.integrateMeasurement(measuredAcc, measuredOmega, deltaT); + return pim; +} + +TEST(ImuFactor, serialization) { + auto pim = getPreintegratedMeasurements(); + + EXPECT(equalsObj(pim)); + EXPECT(equalsXML(pim)); + EXPECT(equalsBinary(pim)); + ImuFactor factor(1, 2, 3, 4, 5, pim); EXPECT(equalsObj(factor)); @@ -69,6 +83,30 @@ TEST(ImuFactor, serialization) { EXPECT(equalsBinary(factor)); } +TEST(ImuFactor2, serialization) { + auto pim = getPreintegratedMeasurements(); + + ImuFactor2 factor(1, 2, 3, pim); + + EXPECT(equalsObj(factor)); + EXPECT(equalsXML(factor)); + EXPECT(equalsBinary(factor)); +} + +TEST(CombinedImuFactor, Serialization) { + auto pim = getPreintegratedMeasurements(); + + EXPECT(equalsObj(pim)); + EXPECT(equalsXML(pim)); + EXPECT(equalsBinary(pim)); + + const CombinedImuFactor factor(1, 2, 3, 4, 5, 6, pim); + + EXPECT(equalsObj(factor)); + EXPECT(equalsXML(factor)); + EXPECT(equalsBinary(factor)); +} + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/navigation/tests/testNavState.cpp b/gtsam/navigation/tests/testNavState.cpp index 8ea8ce9b5..5bafe4392 100644 --- a/gtsam/navigation/tests/testNavState.cpp +++ b/gtsam/navigation/tests/testNavState.cpp @@ -163,37 +163,6 @@ TEST( NavState, Manifold ) { EXPECT(assert_equal(numericalDerivative22(local, state2, kIdentity), aH2)); } -/* ************************************************************************* */ -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -TEST(NavState, Update) { - Vector3 omega(M_PI / 100.0, 0.0, 0.0); - Vector3 acc(0.1, 0.0, 0.0); - double dt = 10; - Matrix9 aF; - Matrix93 aG1, aG2; - boost::function update = - boost::bind(&NavState::update, _1, _2, _3, dt, boost::none, - boost::none, boost::none); - Vector3 b_acc = kAttitude * acc; - NavState expected(kAttitude.expmap(dt * omega), - kPosition + Point3((kVelocity + b_acc * dt / 2) * dt), - kVelocity + b_acc * dt); - NavState actual = kState1.update(acc, omega, dt, aF, aG1, aG2); - EXPECT(assert_equal(expected, actual)); - EXPECT(assert_equal(numericalDerivative31(update, kState1, acc, omega, 1e-7), aF, 1e-7)); - EXPECT(assert_equal(numericalDerivative32(update, kState1, acc, omega, 1e-7), aG1, 1e-7)); - EXPECT(assert_equal(numericalDerivative33(update, kState1, acc, omega, 1e-7), aG2, 1e-7)); - - // Try different values - omega = Vector3(0.1, 0.2, 0.3); - acc = Vector3(0.4, 0.5, 0.6); - kState1.update(acc, omega, dt, aF, aG1, aG2); - EXPECT(assert_equal(numericalDerivative31(update, kState1, acc, omega, 1e-7), aF, 1e-7)); - EXPECT(assert_equal(numericalDerivative32(update, kState1, acc, omega, 1e-7), aG1, 1e-7)); - EXPECT(assert_equal(numericalDerivative33(update, kState1, acc, omega, 1e-7), aG2, 1e-7)); -} -#endif - /* ************************************************************************* */ static const double dt = 2.0; boost::function coriolis = boost::bind( @@ -223,6 +192,49 @@ TEST(NavState, Coriolis2) { EXPECT(assert_equal(numericalDerivative21(coriolis, state2, true), aH)); } +TEST(NavState, Coriolis3) { + /** Consider a massless planet with an attached nav frame at + * n_omega = [0 0 1]', and a body at position n_t = [1 0 0]', travelling with + * velocity n_v = [0 1 0]'. Orient the body so that it is not instantaneously + * aligned with the nav frame (i.e., nRb != I_3x3). Test that first and + * second order Coriolis corrections are as expected. + */ + + // Get true first and second order coriolis accelerations + double dt = 2.0, dt2 = dt * dt; + Vector3 n_omega(0.0, 0.0, 1.0), n_t(1.0, 0.0, 0.0), n_v(0.0, 1.0, 0.0); + Vector3 n_aCorr1 = -2.0 * n_omega.cross(n_v), + n_aCorr2 = -n_omega.cross(n_omega.cross(n_t)); + Rot3 nRb = Rot3(-1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0), + bRn = nRb.inverse(); + + // Get expected first and second order corrections in the nav frame + Vector3 n_dP1e = 0.5 * dt2 * n_aCorr1, + n_dP2e = 0.5 * dt2 * (n_aCorr1 + n_aCorr2), + n_dV1e = dt * n_aCorr1, + n_dV2e = dt * (n_aCorr1 + n_aCorr2); + + // Get expected first and second order corrections in the body frame + Vector3 dRe = -dt * (bRn * n_omega), + b_dP1e = bRn * n_dP1e, b_dP2e = bRn * n_dP2e, + b_dV1e = bRn * n_dV1e, b_dV2e = bRn * n_dV2e; + + // Get actual first and scond order corrections in body frame + NavState kState2(nRb, n_t, n_v); + Vector9 dXi1a = kState2.coriolis(dt, n_omega, false), + dXi2a = kState2.coriolis(dt, n_omega, true); + Vector3 dRa = NavState::dR(dXi1a), + b_dP1a = NavState::dP(dXi1a), b_dV1a = NavState::dV(dXi1a), + b_dP2a = NavState::dP(dXi2a), b_dV2a = NavState::dV(dXi2a); + + EXPECT(assert_equal(dRe, dRa)); + EXPECT(assert_equal(b_dP1e, b_dP1a)); + EXPECT(assert_equal(b_dV1e, b_dV1a)); + EXPECT(assert_equal(b_dP2e, b_dP2a)); + EXPECT(assert_equal(b_dV2e, b_dV2a)); + +} + /* ************************************************************************* */ TEST(NavState, CorrectPIM) { Vector9 xi; @@ -237,6 +249,21 @@ TEST(NavState, CorrectPIM) { EXPECT(assert_equal(numericalDerivative22(correctPIM, kState1, xi), aH2)); } +/* ************************************************************************* */ +TEST(NavState, Stream) +{ + NavState state; + + std::ostringstream os; + os << state; + + string expected; + expected = "R: [\n\t1, 0, 0;\n\t0, 1, 0;\n\t0, 0, 1\n]\np: 0 0 0\nv: 0 0 0"; + + EXPECT(os.str() == expected); +} + + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/navigation/tests/testPoseVelocityBias.cpp b/gtsam/navigation/tests/testPoseVelocityBias.cpp index 0b897bc6e..f3c36335b 100644 --- a/gtsam/navigation/tests/testPoseVelocityBias.cpp +++ b/gtsam/navigation/tests/testPoseVelocityBias.cpp @@ -25,40 +25,6 @@ using namespace std; using namespace gtsam; -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - -// Should be seen as between(pvb1,pvb2), i.e., written as pvb2 \omin pvb1 -Vector9 error(const PoseVelocityBias& pvb1, const PoseVelocityBias& pvb2) { - Matrix3 R1 = pvb1.pose.rotation().matrix(); - // Ri.transpose() translate the error from the global frame into pose1's frame - const Vector3 fp = R1.transpose() * (pvb2.pose.translation() - pvb1.pose.translation()); - const Vector3 fv = R1.transpose() * (pvb2.velocity - pvb1.velocity); - const Rot3 R1BetweenR2 = pvb1.pose.rotation().between(pvb2.pose.rotation()); - const Vector3 fR = Rot3::Logmap(R1BetweenR2); - Vector9 r; - r << fp, fv, fR; - return r; -} - -/* ************************************************************************************************/ -TEST(PoseVelocityBias, error) { - Point3 i1(0, 1, 0), j1(-1, 0, 0), k1(0, 0, 1); - Pose3 x1(Rot3(i1, j1, k1), Point3(5.0, 1.0, 0.0)); - Vector3 v1(Vector3(0.5, 0.0, 0.0)); - imuBias::ConstantBias bias1(Vector3(0.2, 0, 0), Vector3(0.1, 0, 0.3)); - - Pose3 x2(Rot3(i1, j1, k1).expmap(Vector3(0.1, 0.2, 0.3)), Point3(5.5, 1.0, 6.0)); - Vector3 v2(Vector3(0.5, 4.0, 3.0)); - imuBias::ConstantBias bias2(Vector3(0.1, 0.2, -0.3), Vector3(0.2, 0.3, 0.1)); - - PoseVelocityBias pvb1(x1, v1, bias1), pvb2(x2, v2, bias2); - - Vector9 expected, actual = error(pvb1, pvb2); - expected << 0.0, -0.5, 6.0, 4.0, 0.0, 3.0, 0.1, 0.2, 0.3; - EXPECT(assert_equal(expected, actual, 1e-9)); -} -#endif - /* ************************************************************************************************/ int main() { TestResult tr; diff --git a/gtsam/navigation/tests/testScenarios.cpp b/gtsam/navigation/tests/testScenarioRunner.cpp similarity index 100% rename from gtsam/navigation/tests/testScenarios.cpp rename to gtsam/navigation/tests/testScenarioRunner.cpp diff --git a/gtsam/nonlinear/AdaptAutoDiff.h b/gtsam/nonlinear/AdaptAutoDiff.h index ff059ef78..682cca29a 100644 --- a/gtsam/nonlinear/AdaptAutoDiff.h +++ b/gtsam/nonlinear/AdaptAutoDiff.h @@ -57,7 +57,7 @@ class AdaptAutoDiff { if (H1 || H2) { // Get derivatives with AutoDiff const double* parameters[] = {v1.data(), v2.data()}; - double rowMajor1[M * N1], rowMajor2[M * N2]; // on the stack + double rowMajor1[M * N1] = {}, rowMajor2[M * N2] = {}; // on the stack double* jacobians[] = {rowMajor1, rowMajor2}; success = AutoDiff::Differentiate( f, parameters, M, result.data(), jacobians); diff --git a/gtsam/nonlinear/ExpressionFactor.h b/gtsam/nonlinear/ExpressionFactor.h index 04d82fe9a..6d6162825 100644 --- a/gtsam/nonlinear/ExpressionFactor.h +++ b/gtsam/nonlinear/ExpressionFactor.h @@ -19,16 +19,26 @@ #pragma once +#include +#include +#include #include #include -#include #include namespace gtsam { /** - - * Factor that supports arbitrary expressions via AD + * Factor that supports arbitrary expressions via AD. + * + * Arbitrary instances of this template can be directly inserted into a factor + * graph for optimization. However, to enable the correct (de)serialization of + * such instances, the user should declare derived classes from this template, + * implementing expresion(), serialize(), clone(), print(), and defining the + * corresponding `struct traits : public Testable {}`. + * + * \tparam T Type for measurements. + * */ template class ExpressionFactor: public NoiseModelFactor { @@ -68,13 +78,13 @@ class ExpressionFactor: public NoiseModelFactor { /// print relies on Testable traits being defined for T void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { NoiseModelFactor::print(s, keyFormatter); traits::Print(measured_, "ExpressionFactor with measurement: "); } /// equals relies on Testable traits being defined for T - bool equals(const NonlinearFactor& f, double tol) const { + bool equals(const NonlinearFactor& f, double tol) const override { const ExpressionFactor* p = dynamic_cast(&f); return p && NoiseModelFactor::equals(f, tol) && traits::Equals(measured_, p->measured_, tol) && @@ -86,8 +96,8 @@ class ExpressionFactor: public NoiseModelFactor { * We override this method to provide * both the function evaluation and its derivative(s) in H. */ - virtual Vector unwhitenedError(const Values& x, - boost::optional&> H = boost::none) const { + Vector unwhitenedError(const Values& x, + boost::optional&> H = boost::none) const override { if (H) { const T value = expression_.valueAndDerivatives(x, keys_, dims_, *H); // NOTE(hayk): Doing the reverse, AKA Local(measured_, value) is not correct here @@ -99,7 +109,7 @@ class ExpressionFactor: public NoiseModelFactor { } } - virtual boost::shared_ptr linearize(const Values& x) const { + boost::shared_ptr linearize(const Values& x) const override { // Only linearize if the factor is active if (!active(x)) return boost::shared_ptr(); @@ -138,7 +148,7 @@ class ExpressionFactor: public NoiseModelFactor { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -209,7 +219,7 @@ class ExpressionFactor: public NoiseModelFactor { // Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html enum { NeedsToAlign = (sizeof(T) % 16) == 0 }; public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) + GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) }; // ExpressionFactor @@ -217,26 +227,98 @@ class ExpressionFactor: public NoiseModelFactor { template struct traits > : public Testable > {}; +/** + * N-ary variadic template for ExpressionFactor meant as a base class for N-ary + * factors. Enforces an 'expression' method with N keys. + * Derived class (an N-factor!) needs to call 'initialize'. + * + * Does not provide backward compatible 'evaluateError'. + * + * \tparam T Type for measurements. The rest of template arguments are types + * for the N key-indexed Values. + * + */ +template +class ExpressionFactorN : public ExpressionFactor { +public: + static const std::size_t NARY_EXPRESSION_SIZE = sizeof...(Args); + using ArrayNKeys = std::array; + + /// Destructor + virtual ~ExpressionFactorN() = default; + + // Don't provide backward compatible evaluateVector(), due to its problematic + // variable length of optional Jacobian arguments. Vector evaluateError(const + // Args... args,...); + + /// Recreate expression from given keys_ and measured_, used in load + /// Needed to deserialize a derived factor + virtual Expression expression(const ArrayNKeys &keys) const { + throw std::runtime_error( + "ExpressionFactorN::expression not provided: cannot deserialize."); + } + +protected: + /// Default constructor, for serialization + ExpressionFactorN() = default; + + /// Constructor takes care of keys, but still need to call initialize + ExpressionFactorN(const ArrayNKeys &keys, const SharedNoiseModel &noiseModel, + const T &measurement) + : ExpressionFactor(noiseModel, measurement) { + for (const auto &key : keys) + Factor::keys_.push_back(key); + } + +private: + /// Return an expression that predicts the measurement given Values + Expression expression() const override { + ArrayNKeys keys; + int idx = 0; + for (const auto &key : Factor::keys_) + keys[idx++] = key; + return expression(keys); + } + + friend class boost::serialization::access; + template + void serialize(ARCHIVE &ar, const unsigned int /*version*/) { + ar &boost::serialization::make_nvp( + "ExpressionFactorN", + boost::serialization::base_object>(*this)); + } +}; +/// traits +template +struct traits> + : public Testable> {}; +// ExpressionFactorN + + +#if defined(GTSAM_ALLOW_DEPRECATED_SINCE_V41) /** * Binary specialization of ExpressionFactor meant as a base class for binary - * factors. Enforces an 'expression' method with two keys, and provides 'evaluateError'. - * Derived class (a binary factor!) needs to call 'initialize'. + * factors. Enforces an 'expression' method with two keys, and provides + * 'evaluateError'. Derived class (a binary factor!) needs to call 'initialize'. + * + * \sa ExpressionFactorN + * \deprecated Prefer the more general ExpressionFactorN<>. */ template -class ExpressionFactor2 : public ExpressionFactor { - public: +class ExpressionFactor2 : public ExpressionFactorN { +public: /// Destructor - virtual ~ExpressionFactor2() {} + ~ExpressionFactor2() override {} /// Backwards compatible evaluateError, to make existing tests compile - Vector evaluateError(const A1& a1, const A2& a2, - boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + Vector evaluateError(const A1 &a1, const A2 &a2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const { Values values; values.insert(this->keys_[0], a1); values.insert(this->keys_[1], a2); std::vector H(2); - Vector error = this->unwhitenedError(values, H); + Vector error = ExpressionFactor::unwhitenedError(values, H); if (H1) (*H1) = H[0]; if (H2) (*H2) = H[1]; return error; @@ -245,35 +327,25 @@ class ExpressionFactor2 : public ExpressionFactor { /// Recreate expression from given keys_ and measured_, used in load /// Needed to deserialize a derived factor virtual Expression expression(Key key1, Key key2) const { - throw std::runtime_error("ExpressionFactor2::expression not provided: cannot deserialize."); + throw std::runtime_error( + "ExpressionFactor2::expression not provided: cannot deserialize."); + } + virtual Expression + expression(const typename ExpressionFactorN::ArrayNKeys &keys) + const override { + return expression(keys[0], keys[1]); } - protected: +protected: /// Default constructor, for serialization ExpressionFactor2() {} /// Constructor takes care of keys, but still need to call initialize - ExpressionFactor2(Key key1, Key key2, - const SharedNoiseModel& noiseModel, - const T& measurement) - : ExpressionFactor(noiseModel, measurement) { - this->keys_.push_back(key1); - this->keys_.push_back(key2); - } - - private: - /// Return an expression that predicts the measurement given Values - virtual Expression expression() const { - return expression(this->keys_[0], this->keys_[1]); - } - - friend class boost::serialization::access; - template - void serialize(ARCHIVE& ar, const unsigned int /*version*/) { - ar& boost::serialization::make_nvp( - "ExpressionFactor", boost::serialization::base_object >(*this)); - } + ExpressionFactor2(Key key1, Key key2, const SharedNoiseModel &noiseModel, + const T &measurement) + : ExpressionFactorN({key1, key2}, noiseModel, measurement) {} }; // ExpressionFactor2 +#endif -}// \ namespace gtsam +} // namespace gtsam diff --git a/gtsam/nonlinear/FunctorizedFactor.h b/gtsam/nonlinear/FunctorizedFactor.h new file mode 100644 index 000000000..688b3aaa2 --- /dev/null +++ b/gtsam/nonlinear/FunctorizedFactor.h @@ -0,0 +1,147 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file FunctorizedFactor.h + * @date May 31, 2020 + * @author Varun Agrawal + **/ + +#pragma once + +#include +#include + +#include + +namespace gtsam { + +/** + * Factor which evaluates provided unary functor and uses the result to compute + * error with respect to the provided measurement. + * + * Template parameters are + * @param R: The return type of the functor after evaluation. + * @param T: The argument type for the functor. + * + * Example: + * Key key = Symbol('X', 0); + * auto model = noiseModel::Isotropic::Sigma(9, 1); + * + * /// Functor that takes a matrix and multiplies every element by m + * class MultiplyFunctor { + * double m_; ///< simple multiplier + * public: + * MultiplyFunctor(double m) : m_(m) {} + * Matrix operator()(const Matrix &X, + * OptionalJacobian<-1, -1> H = boost::none) const { + * if (H) + * *H = m_ * Matrix::Identity(X.rows()*X.cols(), X.rows()*X.cols()); + * return m_ * X; + * } + * }; + * + * Matrix measurement = Matrix::Identity(3, 3); + * double multiplier = 2.0; + * + * FunctorizedFactor factor(keyX, measurement, model, + * MultiplyFunctor(multiplier)); + */ +template +class GTSAM_EXPORT FunctorizedFactor : public NoiseModelFactor1 { + private: + using Base = NoiseModelFactor1; + + R measured_; ///< value that is compared with functor return value + SharedNoiseModel noiseModel_; ///< noise model + std::function)> func_; ///< functor instance + + public: + /** default constructor - only use for serialization */ + FunctorizedFactor() {} + + /** Construct with given x and the parameters of the basis + * + * @param key: Factor key + * @param z: Measurement object of same type as that returned by functor + * @param model: Noise model + * @param func: The instance of the functor object + */ + FunctorizedFactor(Key key, const R &z, const SharedNoiseModel &model, + const std::function)> func) + : Base(model, key), measured_(z), noiseModel_(model), func_(func) {} + + virtual ~FunctorizedFactor() {} + + /// @return a deep copy of this factor + NonlinearFactor::shared_ptr clone() const override { + return boost::static_pointer_cast( + NonlinearFactor::shared_ptr(new FunctorizedFactor(*this))); + } + + Vector evaluateError(const T ¶ms, + boost::optional H = boost::none) const override { + R x = func_(params, H); + Vector error = traits::Local(measured_, x); + return error; + } + + /// @name Testable + /// @{ + void print(const std::string &s = "", + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override { + Base::print(s, keyFormatter); + std::cout << s << (s != "" ? " " : "") << "FunctorizedFactor(" + << keyFormatter(this->key()) << ")" << std::endl; + traits::Print(measured_, " measurement: "); + std::cout << " noise model sigmas: " << noiseModel_->sigmas().transpose() + << std::endl; + } + + bool equals(const NonlinearFactor &other, double tol = 1e-9) const override { + const FunctorizedFactor *e = + dynamic_cast *>(&other); + return e && Base::equals(other, tol) && + traits::Equals(this->measured_, e->measured_, tol); + } + /// @} + + private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE &ar, const unsigned int /*version*/) { + ar &boost::serialization::make_nvp( + "NoiseModelFactor1", boost::serialization::base_object(*this)); + ar &BOOST_SERIALIZATION_NVP(measured_); + ar &BOOST_SERIALIZATION_NVP(func_); + } +}; + +/// traits +template +struct traits> + : public Testable> {}; + +/** + * Helper function to create a functorized factor. + * + * Uses function template deduction to identify return type and functor type, so + * template list only needs the functor argument type. + */ +template +FunctorizedFactor MakeFunctorizedFactor(Key key, const R &z, + const SharedNoiseModel &model, + const FUNC func) { + return FunctorizedFactor(key, z, model, func); +} + +} // namespace gtsam diff --git a/gtsam/nonlinear/GaussNewtonOptimizer.h b/gtsam/nonlinear/GaussNewtonOptimizer.h index a436597e5..a3923524b 100644 --- a/gtsam/nonlinear/GaussNewtonOptimizer.h +++ b/gtsam/nonlinear/GaussNewtonOptimizer.h @@ -28,6 +28,8 @@ class GaussNewtonOptimizer; * NonlinearOptimizationParams. */ class GTSAM_EXPORT GaussNewtonParams : public NonlinearOptimizerParams { +public: + using OptimizerType = GaussNewtonOptimizer; }; /** diff --git a/gtsam/nonlinear/GncOptimizer.h b/gtsam/nonlinear/GncOptimizer.h new file mode 100644 index 000000000..cd0b4c81a --- /dev/null +++ b/gtsam/nonlinear/GncOptimizer.h @@ -0,0 +1,338 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GncOptimizer.h + * @brief The GncOptimizer class + * @author Jingnan Shi + * @author Luca Carlone + * @author Frank Dellaert + * + * Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated Non-Convexity for Robust Spatial Perception: + * From Non-Minimal Solvers to Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version: https://arxiv.org/pdf/1909.08605.pdf) + * + * See also: + * Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness, Minimally-Tuned Algorithms, and Applications", + * arxiv: https://arxiv.org/pdf/2007.15109.pdf, 2020. + */ + +#pragma once + +#include +#include + +namespace gtsam { + +/* ************************************************************************* */ +template +class GncOptimizer { + public: + /// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer. + typedef typename GncParameters::OptimizerType BaseOptimizer; + + private: + NonlinearFactorGraph nfg_; ///< Original factor graph to be solved by GNC. + Values state_; ///< Initial values to be used at each iteration by GNC. + GncParameters params_; ///< GNC parameters. + Vector weights_; ///< Weights associated to each factor in GNC (this could be a local variable in optimize, but it is useful to make it accessible from outside). + + public: + /// Constructor. + GncOptimizer(const NonlinearFactorGraph& graph, const Values& initialValues, + const GncParameters& params = GncParameters()) + : state_(initialValues), + params_(params) { + + // make sure all noiseModels are Gaussian or convert to Gaussian + nfg_.resize(graph.size()); + for (size_t i = 0; i < graph.size(); i++) { + if (graph[i]) { + NoiseModelFactor::shared_ptr factor = boost::dynamic_pointer_cast< + NoiseModelFactor>(graph[i]); + auto robust = boost::dynamic_pointer_cast< + noiseModel::Robust>(factor->noiseModel()); + // if the factor has a robust loss, we remove the robust loss + nfg_[i] = robust ? factor-> cloneWithNewNoiseModel(robust->noise()) : factor; + } + } + } + + /// Access a copy of the internal factor graph. + const NonlinearFactorGraph& getFactors() const { return nfg_; } + + /// Access a copy of the internal values. + const Values& getState() const { return state_; } + + /// Access a copy of the parameters. + const GncParameters& getParams() const { return params_;} + + /// Access a copy of the GNC weights. + const Vector& getWeights() const { return weights_;} + + /// Compute optimal solution using graduated non-convexity. + Values optimize() { + // start by assuming all measurements are inliers + weights_ = Vector::Ones(nfg_.size()); + BaseOptimizer baseOptimizer(nfg_, state_); + Values result = baseOptimizer.optimize(); + double mu = initializeMu(); + double prev_cost = nfg_.error(result); + double cost = 0.0; // this will be updated in the main loop + + // handle the degenerate case that corresponds to small + // maximum residual errors at initialization + // For GM: if residual error is small, mu -> 0 + // For TLS: if residual error is small, mu -> -1 + if (mu <= 0) { + if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) { + std::cout << "GNC Optimizer stopped because maximum residual at " + "initialization is small." + << std::endl; + } + if (params_.verbosity >= GncParameters::Verbosity::VALUES) { + result.print("result\n"); + std::cout << "mu: " << mu << std::endl; + } + return result; + } + + size_t iter; + for (iter = 0; iter < params_.maxIterations; iter++) { + + // display info + if (params_.verbosity >= GncParameters::Verbosity::VALUES) { + std::cout << "iter: " << iter << std::endl; + result.print("result\n"); + std::cout << "mu: " << mu << std::endl; + std::cout << "weights: " << weights_ << std::endl; + } + // weights update + weights_ = calculateWeights(result, mu); + + // variable/values update + NonlinearFactorGraph graph_iter = this->makeWeightedGraph(weights_); + BaseOptimizer baseOptimizer_iter(graph_iter, state_); + result = baseOptimizer_iter.optimize(); + + // stopping condition + cost = graph_iter.error(result); + if (checkConvergence(mu, weights_, cost, prev_cost)) { + break; + } + + // update mu + mu = updateMu(mu); + + // get ready for next iteration + prev_cost = cost; + + // display info + if (params_.verbosity >= GncParameters::Verbosity::VALUES) { + std::cout << "previous cost: " << prev_cost << std::endl; + std::cout << "current cost: " << cost << std::endl; + } + } + // display info + if (params_.verbosity >= GncParameters::Verbosity::SUMMARY) { + std::cout << "final iterations: " << iter << std::endl; + std::cout << "final mu: " << mu << std::endl; + std::cout << "final weights: " << weights_ << std::endl; + std::cout << "previous cost: " << prev_cost << std::endl; + std::cout << "current cost: " << cost << std::endl; + } + return result; + } + + /// Initialize the gnc parameter mu such that loss is approximately convex (remark 5 in GNC paper). + double initializeMu() const { + // compute largest error across all factors + double rmax_sq = 0.0; + for (size_t i = 0; i < nfg_.size(); i++) { + if (nfg_[i]) { + rmax_sq = std::max(rmax_sq, nfg_[i]->error(state_)); + } + } + // set initial mu + switch (params_.lossType) { + case GncLossType::GM: + // surrogate cost is convex for large mu + return 2 * rmax_sq / params_.barcSq; // initial mu + case GncLossType::TLS: + /* initialize mu to the value specified in Remark 5 in GNC paper. + surrogate cost is convex for mu close to zero + degenerate case: 2 * rmax_sq - params_.barcSq < 0 (handled in the main loop) + according to remark mu = params_.barcSq / (2 * rmax_sq - params_.barcSq) = params_.barcSq/ excessResidual + however, if the denominator is 0 or negative, we return mu = -1 which leads to termination of the main GNC loop + */ + return + (2 * rmax_sq - params_.barcSq) > 0 ? + params_.barcSq / (2 * rmax_sq - params_.barcSq) : -1; + default: + throw std::runtime_error( + "GncOptimizer::initializeMu: called with unknown loss type."); + } + } + + /// Update the gnc parameter mu to gradually increase nonconvexity. + double updateMu(const double mu) const { + switch (params_.lossType) { + case GncLossType::GM: + // reduce mu, but saturate at 1 (original cost is recovered for mu -> 1) + return std::max(1.0, mu / params_.muStep); + case GncLossType::TLS: + // increases mu at each iteration (original cost is recovered for mu -> inf) + return mu * params_.muStep; + default: + throw std::runtime_error( + "GncOptimizer::updateMu: called with unknown loss type."); + } + } + + /// Check if we have reached the value of mu for which the surrogate loss matches the original loss. + bool checkMuConvergence(const double mu) const { + bool muConverged = false; + switch (params_.lossType) { + case GncLossType::GM: + muConverged = std::fabs(mu - 1.0) < 1e-9; // mu=1 recovers the original GM function + break; + case GncLossType::TLS: + muConverged = false; // for TLS there is no stopping condition on mu (it must tend to infinity) + break; + default: + throw std::runtime_error( + "GncOptimizer::checkMuConvergence: called with unknown loss type."); + } + if (muConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY) + std::cout << "muConverged = true " << std::endl; + return muConverged; + } + + /// Check convergence of relative cost differences. + bool checkCostConvergence(const double cost, const double prev_cost) const { + bool costConverged = std::fabs(cost - prev_cost) / std::max(prev_cost, 1e-7) + < params_.relativeCostTol; + if (costConverged && params_.verbosity >= GncParameters::Verbosity::SUMMARY) + std::cout << "checkCostConvergence = true " << std::endl; + return costConverged; + } + + /// Check convergence of weights to binary values. + bool checkWeightsConvergence(const Vector& weights) const { + bool weightsConverged = false; + switch (params_.lossType) { + case GncLossType::GM: + weightsConverged = false; // for GM, there is no clear binary convergence for the weights + break; + case GncLossType::TLS: + weightsConverged = true; + for (size_t i = 0; i < weights.size(); i++) { + if (std::fabs(weights[i] - std::round(weights[i])) + > params_.weightsTol) { + weightsConverged = false; + break; + } + } + break; + default: + throw std::runtime_error( + "GncOptimizer::checkWeightsConvergence: called with unknown loss type."); + } + if (weightsConverged + && params_.verbosity >= GncParameters::Verbosity::SUMMARY) + std::cout << "weightsConverged = true " << std::endl; + return weightsConverged; + } + + /// Check for convergence between consecutive GNC iterations. + bool checkConvergence(const double mu, const Vector& weights, + const double cost, const double prev_cost) const { + return checkCostConvergence(cost, prev_cost) + || checkWeightsConvergence(weights) || checkMuConvergence(mu); + } + + /// Create a graph where each factor is weighted by the gnc weights. + NonlinearFactorGraph makeWeightedGraph(const Vector& weights) const { + // make sure all noiseModels are Gaussian or convert to Gaussian + NonlinearFactorGraph newGraph; + newGraph.resize(nfg_.size()); + for (size_t i = 0; i < nfg_.size(); i++) { + if (nfg_[i]) { + auto factor = boost::dynamic_pointer_cast< + NoiseModelFactor>(nfg_[i]); + auto noiseModel = + boost::dynamic_pointer_cast( + factor->noiseModel()); + if (noiseModel) { + Matrix newInfo = weights[i] * noiseModel->information(); + auto newNoiseModel = noiseModel::Gaussian::Information(newInfo); + newGraph[i] = factor->cloneWithNewNoiseModel(newNoiseModel); + } else { + throw std::runtime_error( + "GncOptimizer::makeWeightedGraph: unexpected non-Gaussian noise model."); + } + } + } + return newGraph; + } + + /// Calculate gnc weights. + Vector calculateWeights(const Values& currentEstimate, const double mu) { + Vector weights = Vector::Ones(nfg_.size()); + + // do not update the weights that the user has decided are known inliers + std::vector allWeights; + for (size_t k = 0; k < nfg_.size(); k++) { + allWeights.push_back(k); + } + std::vector unknownWeights; + std::set_difference(allWeights.begin(), allWeights.end(), + params_.knownInliers.begin(), + params_.knownInliers.end(), + std::inserter(unknownWeights, unknownWeights.begin())); + + // update weights of known inlier/outlier measurements + switch (params_.lossType) { + case GncLossType::GM: { // use eq (12) in GNC paper + for (size_t k : unknownWeights) { + if (nfg_[k]) { + double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual + weights[k] = std::pow( + (mu * params_.barcSq) / (u2_k + mu * params_.barcSq), 2); + } + } + return weights; + } + case GncLossType::TLS: { // use eq (14) in GNC paper + double upperbound = (mu + 1) / mu * params_.barcSq; + double lowerbound = mu / (mu + 1) * params_.barcSq; + for (size_t k : unknownWeights) { + if (nfg_[k]) { + double u2_k = nfg_[k]->error(currentEstimate); // squared (and whitened) residual + if (u2_k >= upperbound) { + weights[k] = 0; + } else if (u2_k <= lowerbound) { + weights[k] = 1; + } else { + weights[k] = std::sqrt(params_.barcSq * mu * (mu + 1) / u2_k) + - mu; + } + } + } + return weights; + } + default: + throw std::runtime_error( + "GncOptimizer::calculateWeights: called with unknown loss type."); + } + } +}; + +} diff --git a/gtsam/nonlinear/GncParams.h b/gtsam/nonlinear/GncParams.h new file mode 100644 index 000000000..0388a7fd1 --- /dev/null +++ b/gtsam/nonlinear/GncParams.h @@ -0,0 +1,164 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file GncOptimizer.h + * @brief The GncOptimizer class + * @author Jingnan Shi + * @author Luca Carlone + * @author Frank Dellaert + * + * Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated Non-Convexity for Robust Spatial Perception: + * From Non-Minimal Solvers to Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version: https://arxiv.org/pdf/1909.08605.pdf) + * + * See also: + * Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness, Minimally-Tuned Algorithms, and Applications", + * arxiv: https://arxiv.org/pdf/2007.15109.pdf, 2020. + */ + +#pragma once + +#include +#include + +namespace gtsam { + +/* ************************************************************************* */ +/// Choice of robust loss function for GNC. +enum GncLossType { + GM /*Geman McClure*/, + TLS /*Truncated least squares*/ +}; + +template +class GncParams { + public: + /// For each parameter, specify the corresponding optimizer: e.g., GaussNewtonParams -> GaussNewtonOptimizer. + typedef typename BaseOptimizerParameters::OptimizerType OptimizerType; + + /// Verbosity levels + enum Verbosity { + SILENT = 0, + SUMMARY, + VALUES + }; + + /// Constructor. + GncParams(const BaseOptimizerParameters& baseOptimizerParams) + : baseOptimizerParams(baseOptimizerParams) { + } + + /// Default constructor. + GncParams() + : baseOptimizerParams() { + } + + /// GNC parameters. + BaseOptimizerParameters baseOptimizerParams; ///< Optimization parameters used to solve the weighted least squares problem at each GNC iteration + /// any other specific GNC parameters: + GncLossType lossType = TLS; ///< Default loss + size_t maxIterations = 100; ///< Maximum number of iterations + double barcSq = 1.0; ///< A factor is considered an inlier if factor.error() < barcSq. Note that factor.error() whitens by the covariance + double muStep = 1.4; ///< Multiplicative factor to reduce/increase the mu in gnc + double relativeCostTol = 1e-5; ///< If relative cost change is below this threshold, stop iterating + double weightsTol = 1e-4; ///< If the weights are within weightsTol from being binary, stop iterating (only for TLS) + Verbosity verbosity = SILENT; ///< Verbosity level + std::vector knownInliers = std::vector(); ///< Slots in the factor graph corresponding to measurements that we know are inliers + + /// Set the robust loss function to be used in GNC (chosen among the ones in GncLossType). + void setLossType(const GncLossType type) { + lossType = type; + } + + /// Set the maximum number of iterations in GNC (changing the max nr of iters might lead to less accurate solutions and is not recommended). + void setMaxIterations(const size_t maxIter) { + std::cout + << "setMaxIterations: changing the max nr of iters might lead to less accurate solutions and is not recommended! " + << std::endl; + maxIterations = maxIter; + } + + /** Set the maximum weighted residual error for an inlier. For a factor in the form f(x) = 0.5 * || r(x) ||^2_Omega, + * the inlier threshold is the largest value of f(x) for the corresponding measurement to be considered an inlier. + * In other words, an inlier at x is such that 0.5 * || r(x) ||^2_Omega <= barcSq. + * Assuming a isotropic measurement covariance sigma^2 * Identity, the cost becomes: 0.5 * 1/sigma^2 || r(x) ||^2 <= barcSq. + * Hence || r(x) ||^2 <= 2 * barcSq * sigma^2. + * */ + void setInlierCostThreshold(const double inth) { + barcSq = inth; + } + + /// Set the graduated non-convexity step: at each GNC iteration, mu is updated as mu <- mu * muStep. + void setMuStep(const double step) { + muStep = step; + } + + /// Set the maximum relative difference in mu values to stop iterating. + void setRelativeCostTol(double value) { + relativeCostTol = value; + } + + /// Set the maximum difference between the weights and their rounding in {0,1} to stop iterating. + void setWeightsTol(double value) { + weightsTol = value; + } + + /// Set the verbosity level. + void setVerbosityGNC(const Verbosity value) { + verbosity = value; + } + + /** (Optional) Provide a vector of measurements that must be considered inliers. The enties in the vector + * corresponds to the slots in the factor graph. For instance, if you have a nonlinear factor graph nfg, + * and you provide knownIn = {0, 2, 15}, GNC will not apply outlier rejection to nfg[0], nfg[2], and nfg[15]. + * This functionality is commonly used in SLAM when one may assume the odometry is outlier free, and + * only apply GNC to prune outliers from the loop closures. + * */ + void setKnownInliers(const std::vector& knownIn) { + for (size_t i = 0; i < knownIn.size(); i++) + knownInliers.push_back(knownIn[i]); + } + + /// Equals. + bool equals(const GncParams& other, double tol = 1e-9) const { + return baseOptimizerParams.equals(other.baseOptimizerParams) + && lossType == other.lossType && maxIterations == other.maxIterations + && std::fabs(barcSq - other.barcSq) <= tol + && std::fabs(muStep - other.muStep) <= tol + && verbosity == other.verbosity && knownInliers == other.knownInliers; + } + + /// Print. + void print(const std::string& str) const { + std::cout << str << "\n"; + switch (lossType) { + case GM: + std::cout << "lossType: Geman McClure" << "\n"; + break; + case TLS: + std::cout << "lossType: Truncated Least-squares" << "\n"; + break; + default: + throw std::runtime_error("GncParams::print: unknown loss type."); + } + std::cout << "maxIterations: " << maxIterations << "\n"; + std::cout << "barcSq: " << barcSq << "\n"; + std::cout << "muStep: " << muStep << "\n"; + std::cout << "relativeCostTol: " << relativeCostTol << "\n"; + std::cout << "weightsTol: " << weightsTol << "\n"; + std::cout << "verbosity: " << verbosity << "\n"; + for (size_t i = 0; i < knownInliers.size(); i++) + std::cout << "knownInliers: " << knownInliers[i] << "\n"; + baseOptimizerParams.print(str); + } +}; + +} diff --git a/gtsam/nonlinear/ISAM2Clique.h b/gtsam/nonlinear/ISAM2Clique.h index 53bdaec81..9b40c02c6 100644 --- a/gtsam/nonlinear/ISAM2Clique.h +++ b/gtsam/nonlinear/ISAM2Clique.h @@ -51,6 +51,7 @@ class GTSAM_EXPORT ISAM2Clique /// Default constructor ISAM2Clique() : Base() {} + virtual ~ISAM2Clique() = default; /// Copy constructor, does *not* copy solution pointers as these are invalid /// in different trees. @@ -85,7 +86,7 @@ class GTSAM_EXPORT ISAM2Clique /** print this node */ void print(const std::string& s = "", - const KeyFormatter& formatter = DefaultKeyFormatter) const; + const KeyFormatter& formatter = DefaultKeyFormatter) const override; void optimizeWildfire(const KeySet& replaced, double threshold, KeySet* changed, VectorValues* delta, diff --git a/gtsam/nonlinear/ISAM2Result.h b/gtsam/nonlinear/ISAM2Result.h index e45b17e4a..b249af5c5 100644 --- a/gtsam/nonlinear/ISAM2Result.h +++ b/gtsam/nonlinear/ISAM2Result.h @@ -176,6 +176,8 @@ struct ISAM2Result { size_t getVariablesRelinearized() const { return variablesRelinearized; } size_t getVariablesReeliminated() const { return variablesReeliminated; } size_t getCliques() const { return cliques; } + double getErrorBefore() const { return errorBefore ? *errorBefore : std::nan(""); } + double getErrorAfter() const { return errorAfter ? *errorAfter : std::nan(""); } }; } // namespace gtsam diff --git a/gtsam/nonlinear/LevenbergMarquardtParams.h b/gtsam/nonlinear/LevenbergMarquardtParams.h index 4962ad366..30e783fc9 100644 --- a/gtsam/nonlinear/LevenbergMarquardtParams.h +++ b/gtsam/nonlinear/LevenbergMarquardtParams.h @@ -25,6 +25,8 @@ namespace gtsam { +class LevenbergMarquardtOptimizer; + /** Parameters for Levenberg-Marquardt optimization. Note that this parameters * class inherits from NonlinearOptimizerParams, which specifies the parameters * common to all nonlinear optimization algorithms. This class also contains @@ -40,6 +42,7 @@ class GTSAM_EXPORT LevenbergMarquardtParams: public NonlinearOptimizerParams { static VerbosityLM verbosityLMTranslator(const std::string &s); static std::string verbosityLMTranslator(VerbosityLM value); + using OptimizerType = LevenbergMarquardtOptimizer; public: diff --git a/gtsam/nonlinear/LinearContainerFactor.h b/gtsam/nonlinear/LinearContainerFactor.h index 8a1f600ff..8587e6b91 100644 --- a/gtsam/nonlinear/LinearContainerFactor.h +++ b/gtsam/nonlinear/LinearContainerFactor.h @@ -59,10 +59,10 @@ class LinearContainerFactor : public NonlinearFactor { // Testable /** print */ - GTSAM_EXPORT void print(const std::string& s = "", const KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const; + GTSAM_EXPORT void print(const std::string& s = "", const KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const override; /** Check if two factors are equal */ - GTSAM_EXPORT bool equals(const NonlinearFactor& f, double tol = 1e-9) const; + GTSAM_EXPORT bool equals(const NonlinearFactor& f, double tol = 1e-9) const override; // NonlinearFactor @@ -74,10 +74,10 @@ class LinearContainerFactor : public NonlinearFactor { * * @return nonlinear error if linearizationPoint present, zero otherwise */ - GTSAM_EXPORT double error(const Values& c) const; + GTSAM_EXPORT double error(const Values& c) const override; /** get the dimension of the factor: rows of linear factor */ - GTSAM_EXPORT size_t dim() const; + GTSAM_EXPORT size_t dim() const override; /** Extract the linearization point used in recalculating error */ const boost::optional& linearizationPoint() const { return linearizationPoint_; } @@ -98,7 +98,7 @@ class LinearContainerFactor : public NonlinearFactor { * TODO: better approximation of relinearization * TODO: switchable modes for approximation technique */ - GTSAM_EXPORT GaussianFactor::shared_ptr linearize(const Values& c) const; + GTSAM_EXPORT GaussianFactor::shared_ptr linearize(const Values& c) const override; /** * Creates an anti-factor directly @@ -116,7 +116,7 @@ class LinearContainerFactor : public NonlinearFactor { * * Clones the underlying linear factor */ - NonlinearFactor::shared_ptr clone() const { + NonlinearFactor::shared_ptr clone() const override { return NonlinearFactor::shared_ptr(new LinearContainerFactor(factor_,linearizationPoint_)); } @@ -140,21 +140,14 @@ class LinearContainerFactor : public NonlinearFactor { * Utility function for converting linear graphs to nonlinear graphs * consisting of LinearContainerFactors. */ - GTSAM_EXPORT static NonlinearFactorGraph ConvertLinearGraph(const GaussianFactorGraph& linear_graph, + GTSAM_EXPORT + static NonlinearFactorGraph ConvertLinearGraph(const GaussianFactorGraph& linear_graph, const Values& linearizationPoint = Values()); -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - GTSAM_EXPORT static NonlinearFactorGraph convertLinearGraph(const GaussianFactorGraph& linear_graph, - const Values& linearizationPoint = Values()) { - return ConvertLinearGraph(linear_graph, linearizationPoint); - } -#endif - -protected: - GTSAM_EXPORT void initializeLinearizationPoint(const Values& linearizationPoint); - -private: + protected: + GTSAM_EXPORT void initializeLinearizationPoint(const Values& linearizationPoint); + private: /** Serialization function */ friend class boost::serialization::access; template diff --git a/gtsam/nonlinear/Marginals.h b/gtsam/nonlinear/Marginals.h index 4e201cc38..9935bafdd 100644 --- a/gtsam/nonlinear/Marginals.h +++ b/gtsam/nonlinear/Marginals.h @@ -48,7 +48,7 @@ class GTSAM_EXPORT Marginals { public: - /// Default constructor only for Cython wrapper + /// Default constructor only for wrappers Marginals(){} /** Construct a marginals class from a nonlinear factor graph. @@ -156,7 +156,7 @@ class GTSAM_EXPORT JointMarginal { FastMap indices_; public: - /// Default constructor only for Cython wrapper + /// Default constructor only for wrappers JointMarginal() {} /** Access a block, corresponding to a pair of variables, of the joint diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index abf6b257a..a85f27425 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -200,6 +200,10 @@ boost::tuple nonlinearConjugateGradient(const S &system, currentValues = system.advance(prevValues, alpha, direction); currentError = system.error(currentValues); + // User hook: + if (params.iterationHook) + params.iterationHook(iteration, prevError, currentError); + // Maybe show output if (params.verbosity >= NonlinearOptimizerParams::ERROR) std::cout << "iteration: " << iteration << ", currentError: " << currentError << std::endl; diff --git a/gtsam/nonlinear/NonlinearEquality.h b/gtsam/nonlinear/NonlinearEquality.h index d4eb655c3..6286b73da 100644 --- a/gtsam/nonlinear/NonlinearEquality.h +++ b/gtsam/nonlinear/NonlinearEquality.h @@ -107,15 +107,15 @@ class NonlinearEquality: public NoiseModelFactor1 { /// @name Testable /// @{ - virtual void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "Constraint: on [" << keyFormatter(this->key()) << "]\n"; traits::Print(feasible_, "Feasible Point:\n"); std::cout << "Variable Dimension: " << traits::GetDimension(feasible_) << std::endl; } /** Check if two factors are equal */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { + bool equals(const NonlinearFactor& f, double tol = 1e-9) const override { const This* e = dynamic_cast(&f); return e && Base::equals(f) && traits::Equals(feasible_,e->feasible_, tol) && std::abs(error_gain_ - e->error_gain_) < tol; @@ -126,7 +126,7 @@ class NonlinearEquality: public NoiseModelFactor1 { /// @{ /** actual error function calculation */ - virtual double error(const Values& c) const { + double error(const Values& c) const override { const T& xj = c.at(this->key()); Vector e = this->unwhitenedError(c); if (allow_error_ || !compare_(xj, feasible_)) { @@ -138,7 +138,7 @@ class NonlinearEquality: public NoiseModelFactor1 { /** error function */ Vector evaluateError(const T& xj, - boost::optional H = boost::none) const { + boost::optional H = boost::none) const override { const size_t nj = traits::GetDimension(feasible_); if (allow_error_) { if (H) @@ -158,7 +158,7 @@ class NonlinearEquality: public NoiseModelFactor1 { } // Linearize is over-written, because base linearization tries to whiten - virtual GaussianFactor::shared_ptr linearize(const Values& x) const { + GaussianFactor::shared_ptr linearize(const Values& x) const override { const T& xj = x.at(this->key()); Matrix A; Vector b = evaluateError(xj, A); @@ -168,14 +168,14 @@ class NonlinearEquality: public NoiseModelFactor1 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /// @} - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW private: @@ -242,14 +242,14 @@ class NonlinearEquality1: public NoiseModelFactor1 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** g(x) with optional derivative */ Vector evaluateError(const X& x1, - boost::optional H = boost::none) const { + boost::optional H = boost::none) const override { if (H) (*H) = Matrix::Identity(traits::GetDimension(x1),traits::GetDimension(x1)); // manifold equivalent of h(x)-z -> log(z,h(x)) @@ -257,15 +257,15 @@ class NonlinearEquality1: public NoiseModelFactor1 { } /** Print */ - virtual void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << ": NonlinearEquality1(" << keyFormatter(this->key()) << ")," << "\n"; this->noiseModel_->print(); traits::Print(value_, "Value"); } - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW private: @@ -317,21 +317,21 @@ class NonlinearEquality2: public NoiseModelFactor2 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** g(x) with optional derivative2 */ Vector evaluateError(const X& x1, const X& x2, boost::optional H1 = - boost::none, boost::optional H2 = boost::none) const { + boost::none, boost::optional H2 = boost::none) const override { static const size_t p = traits::dimension; if (H1) *H1 = -Matrix::Identity(p,p); if (H2) *H2 = Matrix::Identity(p,p); return traits::Local(x1,x2); } - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW private: diff --git a/gtsam/nonlinear/NonlinearFactor.cpp b/gtsam/nonlinear/NonlinearFactor.cpp index ee14e8073..8b8d2da6c 100644 --- a/gtsam/nonlinear/NonlinearFactor.cpp +++ b/gtsam/nonlinear/NonlinearFactor.cpp @@ -76,6 +76,14 @@ bool NoiseModelFactor::equals(const NonlinearFactor& f, double tol) const { && noiseModel_->equals(*e->noiseModel_, tol))); } +/* ************************************************************************* */ +NoiseModelFactor::shared_ptr NoiseModelFactor::cloneWithNewNoiseModel( + const SharedNoiseModel newNoise) const { + NoiseModelFactor::shared_ptr new_factor = boost::dynamic_pointer_cast(clone()); + new_factor->noiseModel_ = newNoise; + return new_factor; +} + /* ************************************************************************* */ static void check(const SharedNoiseModel& noiseModel, size_t m) { if (noiseModel && m != noiseModel->dim()) @@ -121,7 +129,7 @@ double NoiseModelFactor::error(const Values& c) const { const Vector b = unwhitenedError(c); check(noiseModel_, b.size()); if (noiseModel_) - return 0.5 * noiseModel_->distance(b); + return noiseModel_->loss(noiseModel_->squaredMahalanobisDistance(b)); else return 0.5 * b.squaredNorm(); } else { diff --git a/gtsam/nonlinear/NonlinearFactor.h b/gtsam/nonlinear/NonlinearFactor.h index 63547a248..00311fb87 100644 --- a/gtsam/nonlinear/NonlinearFactor.h +++ b/gtsam/nonlinear/NonlinearFactor.h @@ -29,13 +29,6 @@ #include #include -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -#define ADD_CLONE_NONLINEAR_FACTOR(Derived) \ - virtual gtsam::NonlinearFactor::shared_ptr clone() const { \ - return boost::static_pointer_cast( \ - gtsam::NonlinearFactor::shared_ptr(new Derived(*this))); } -#endif - namespace gtsam { using boost::assign::cref_list_of; @@ -195,14 +188,14 @@ class GTSAM_EXPORT NoiseModelFactor: public NonlinearFactor { public: /** Print */ - virtual void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /** Check if two factors are equal */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const; + bool equals(const NonlinearFactor& f, double tol = 1e-9) const override; /** get the dimension of the factor (number of rows on linearization) */ - virtual size_t dim() const { + size_t dim() const override { return noiseModel_->dim(); } @@ -242,30 +235,28 @@ class GTSAM_EXPORT NoiseModelFactor: public NonlinearFactor { * In this class, we take the raw prediction error \f$ h(x)-z \f$, ask the noise model * to transform it to \f$ (h(x)-z)^2/\sigma^2 \f$, and then multiply by 0.5. */ - virtual double error(const Values& c) const; + double error(const Values& c) const override; /** * Linearize a non-linearFactorN to get a GaussianFactor, * \f$ Ax-b \approx h(x+\delta x)-z = h(x) + A \delta x - z \f$ * Hence \f$ b = z - h(x) = - \mathtt{error\_vector}(x) \f$ */ - boost::shared_ptr linearize(const Values& x) const; - -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - SharedNoiseModel get_noiseModel() const { return noiseModel_; } - /// @} -#endif + boost::shared_ptr linearize(const Values& x) const override; -private: + /** + * Creates a shared_ptr clone of the + * factor with a new noise model + */ + shared_ptr cloneWithNewNoiseModel(const SharedNoiseModel newNoise) const; + private: /** Serialization function */ friend class boost::serialization::access; template void serialize(ARCHIVE & ar, const unsigned int /*version*/) { ar & boost::serialization::make_nvp("NonlinearFactor", - boost::serialization::base_object(*this)); + boost::serialization::base_object(*this)); ar & BOOST_SERIALIZATION_NVP(noiseModel_); } @@ -296,6 +287,8 @@ class NoiseModelFactor1: public NoiseModelFactor { typedef NoiseModelFactor1 This; public: + /// @name Constructors + /// @{ /** Default constructor for I/O only */ NoiseModelFactor1() {} @@ -309,16 +302,23 @@ class NoiseModelFactor1: public NoiseModelFactor { * @param noiseModel shared pointer to noise model * @param key1 by which to look up X value in Values */ - NoiseModelFactor1(const SharedNoiseModel& noiseModel, Key key1) : - Base(noiseModel, cref_list_of<1>(key1)) {} + NoiseModelFactor1(const SharedNoiseModel &noiseModel, Key key1) + : Base(noiseModel, cref_list_of<1>(key1)) {} - /** Calls the 1-key specific version of evaluateError, which is pure virtual - * so must be implemented in the derived class. - */ - virtual Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const { - if(this->active(x)) { - const X& x1 = x.at(keys_[0]); - if(H) { + /// @} + /// @name NoiseModelFactor methods + /// @{ + + /** + * Calls the 1-key specific version of evaluateError below, which is pure + * virtual so must be implemented in the derived class. + */ + Vector unwhitenedError( + const Values &x, + boost::optional &> H = boost::none) const override { + if (this->active(x)) { + const X &x1 = x.at(keys_[0]); + if (H) { return evaluateError(x1, (*H)[0]); } else { return evaluateError(x1); @@ -328,16 +328,22 @@ class NoiseModelFactor1: public NoiseModelFactor { } } + /// @} + /// @name Virtual methods + /// @{ + /** * Override this method to finish implementing a unary factor. * If the optional Matrix reference argument is specified, it should compute * both the function evaluation and its derivative in X. */ - virtual Vector evaluateError(const X& x, boost::optional H = - boost::none) const = 0; + virtual Vector + evaluateError(const X &x, + boost::optional H = boost::none) const = 0; -private: + /// @} +private: /** Serialization function */ friend class boost::serialization::access; template @@ -389,7 +395,7 @@ class NoiseModelFactor2: public NoiseModelFactor { /** Calls the 2-key specific version of evaluateError, which is pure virtual * so must be implemented in the derived class. */ - virtual Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const { + Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { if(this->active(x)) { const X1& x1 = x.at(keys_[0]); const X2& x2 = x.at(keys_[1]); @@ -467,7 +473,7 @@ class NoiseModelFactor3: public NoiseModelFactor { /** Calls the 3-key specific version of evaluateError, which is pure virtual * so must be implemented in the derived class. */ - virtual Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const { + Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { if(this->active(x)) { if(H) return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), (*H)[0], (*H)[1], (*H)[2]); @@ -547,7 +553,7 @@ class NoiseModelFactor4: public NoiseModelFactor { /** Calls the 4-key specific version of evaluateError, which is pure virtual * so must be implemented in the derived class. */ - virtual Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const { + Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { if(this->active(x)) { if(H) return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), x.at(keys_[3]), (*H)[0], (*H)[1], (*H)[2], (*H)[3]); @@ -631,7 +637,7 @@ class NoiseModelFactor5: public NoiseModelFactor { /** Calls the 5-key specific version of evaluateError, which is pure virtual * so must be implemented in the derived class. */ - virtual Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const { + Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { if(this->active(x)) { if(H) return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), x.at(keys_[3]), x.at(keys_[4]), (*H)[0], (*H)[1], (*H)[2], (*H)[3], (*H)[4]); @@ -719,7 +725,7 @@ class NoiseModelFactor6: public NoiseModelFactor { /** Calls the 6-key specific version of evaluateError, which is pure virtual * so must be implemented in the derived class. */ - virtual Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const { + Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { if(this->active(x)) { if(H) return evaluateError(x.at(keys_[0]), x.at(keys_[1]), x.at(keys_[2]), x.at(keys_[3]), x.at(keys_[4]), x.at(keys_[5]), (*H)[0], (*H)[1], (*H)[2], (*H)[3], (*H)[4], (*H)[5]); diff --git a/gtsam/nonlinear/NonlinearFactorGraph.cpp b/gtsam/nonlinear/NonlinearFactorGraph.cpp index 5f6cdcc98..6a9e0cd0a 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph.cpp +++ b/gtsam/nonlinear/NonlinearFactorGraph.cpp @@ -34,6 +34,7 @@ #endif #include +#include #include using namespace std; @@ -205,42 +206,49 @@ void NonlinearFactorGraph::saveGraph(std::ostream &stm, const Values& values, // Create factors and variable connections for(size_t i = 0; i < size(); ++i) { const NonlinearFactor::shared_ptr& factor = at(i); - if(formatting.plotFactorPoints) { + // If null pointer, move on to the next + if (!factor) { + continue; + } + + if (formatting.plotFactorPoints) { const KeyVector& keys = factor->keys(); - if (formatting.binaryEdges && keys.size()==2) { - stm << " var" << keys[0] << "--" << "var" << keys[1] << ";\n"; + if (formatting.binaryEdges && keys.size() == 2) { + stm << " var" << keys[0] << "--" + << "var" << keys[1] << ";\n"; } else { // Make each factor a dot stm << " factor" << i << "[label=\"\", shape=point"; { - map::const_iterator pos = formatting.factorPositions.find(i); - if(pos != formatting.factorPositions.end()) - stm << ", pos=\"" << formatting.scale*(pos->second.x() - minX) << "," - << formatting.scale*(pos->second.y() - minY) << "!\""; + map::const_iterator pos = + formatting.factorPositions.find(i); + if (pos != formatting.factorPositions.end()) + stm << ", pos=\"" << formatting.scale * (pos->second.x() - minX) + << "," << formatting.scale * (pos->second.y() - minY) + << "!\""; } stm << "];\n"; // Make factor-variable connections - if(formatting.connectKeysToFactor && factor) { - for(Key key: *factor) { - stm << " var" << key << "--" << "factor" << i << ";\n"; + if (formatting.connectKeysToFactor && factor) { + for (Key key : *factor) { + stm << " var" << key << "--" + << "factor" << i << ";\n"; } } } - } - else { - if(factor) { - Key k; - bool firstTime = true; - for(Key key: *this->at(i)) { - if(firstTime) { - k = key; - firstTime = false; - continue; - } - stm << " var" << key << "--" << "var" << k << ";\n"; + } else { + Key k; + bool firstTime = true; + for (Key key : *this->at(i)) { + if (firstTime) { k = key; + firstTime = false; + continue; } + stm << " var" << key << "--" + << "var" << k << ";\n"; + k = key; } } } @@ -249,6 +257,16 @@ void NonlinearFactorGraph::saveGraph(std::ostream &stm, const Values& values, stm << "}\n"; } +/* ************************************************************************* */ +void NonlinearFactorGraph::saveGraph( + const std::string& file, const Values& values, + const GraphvizFormatting& graphvizFormatting, + const KeyFormatter& keyFormatter) const { + std::ofstream of(file); + saveGraph(of, values, graphvizFormatting, keyFormatter); + of.close(); +} + /* ************************************************************************* */ double NonlinearFactorGraph::error(const Values& values) const { gttic(NonlinearFactorGraph_error); @@ -358,7 +376,7 @@ static Scatter scatterFromValues(const Values& values) { scatter.reserve(values.size()); // use "natural" ordering with keys taken from the initial values - for (const auto& key_value : values) { + for (const auto key_value : values) { scatter.add(key_value.key, key_value.value.dim()); } diff --git a/gtsam/nonlinear/NonlinearFactorGraph.h b/gtsam/nonlinear/NonlinearFactorGraph.h index 0e17700d0..989f493d3 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph.h +++ b/gtsam/nonlinear/NonlinearFactorGraph.h @@ -11,7 +11,7 @@ /** * @file NonlinearFactorGraph.h - * @brief Factor Graph Constsiting of non-linear factors + * @brief Factor Graph consisting of non-linear factors * @author Frank Dellaert * @author Carlos Nieto * @author Christian Potthast @@ -111,11 +111,21 @@ namespace gtsam { /** Test equality */ bool equals(const NonlinearFactorGraph& other, double tol = 1e-9) const; - /** Write the graph in GraphViz format for visualization */ + /// Write the graph in GraphViz format for visualization void saveGraph(std::ostream& stm, const Values& values = Values(), const GraphvizFormatting& graphvizFormatting = GraphvizFormatting(), const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + /** + * Write the graph in GraphViz format to file for visualization. + * + * This is a wrapper friendly version since wrapped languages don't have + * access to C++ streams. + */ + void saveGraph(const std::string& file, const Values& values = Values(), + const GraphvizFormatting& graphvizFormatting = GraphvizFormatting(), + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + /** unnormalized error, \f$ 0.5 \sum_i (h_i(X_i)-z)^2/\sigma^2 \f$ in the most common case */ double error(const Values& values) const; diff --git a/gtsam/nonlinear/NonlinearOptimizer.cpp b/gtsam/nonlinear/NonlinearOptimizer.cpp index 328a3facf..0d7e9e17f 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearOptimizer.cpp @@ -88,20 +88,28 @@ void NonlinearOptimizer::defaultOptimize() { } // Iterative loop + double newError = currentError; // used to avoid repeated calls to error() do { // Do next iteration - currentError = error(); + currentError = newError; iterate(); tictoc_finishedIteration(); + // Update newError for either printouts or conditional-end checks: + newError = error(); + + // User hook: + if (params.iterationHook) + params.iterationHook(iterations(), currentError, newError); + // Maybe show output if (params.verbosity >= NonlinearOptimizerParams::VALUES) values().print("newValues"); if (params.verbosity >= NonlinearOptimizerParams::ERROR) - cout << "newError: " << error() << endl; + cout << "newError: " << newError << endl; } while (iterations() < params.maxIterations && !checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, params.errorTol, - currentError, error(), params.verbosity) && std::isfinite(currentError)); + currentError, newError, params.verbosity) && std::isfinite(currentError)); // Printing if verbose if (params.verbosity >= NonlinearOptimizerParams::TERMINATION) { diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 103c231be..6fe369dd3 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -81,7 +81,7 @@ class GTSAM_EXPORT NonlinearOptimizer { public: /** A shared pointer to this class */ - typedef boost::shared_ptr shared_ptr; + using shared_ptr = boost::shared_ptr; /// @name Standard interface /// @{ @@ -105,14 +105,17 @@ class GTSAM_EXPORT NonlinearOptimizer { */ const Values& optimizeSafely(); - /// return error + /// return error in current optimizer state double error() const; - /// return number of iterations + /// return number of iterations in current optimizer state size_t iterations() const; - /// return values - const Values& values() const; + /// return values in current optimizer state + const Values &values() const; + + /// return the graph with nonlinear factors + const NonlinearFactorGraph &graph() const { return graph_; } /// @} diff --git a/gtsam/nonlinear/NonlinearOptimizerParams.h b/gtsam/nonlinear/NonlinearOptimizerParams.h index 65fdd1c92..218230421 100644 --- a/gtsam/nonlinear/NonlinearOptimizerParams.h +++ b/gtsam/nonlinear/NonlinearOptimizerParams.h @@ -38,21 +38,12 @@ class GTSAM_EXPORT NonlinearOptimizerParams { SILENT, TERMINATION, ERROR, VALUES, DELTA, LINEAR }; - size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) - double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) - double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) - double errorTol; ///< The maximum total error to stop iterating (default 0.0) - Verbosity verbosity; ///< The printing verbosity during optimization (default SILENT) - Ordering::OrderingType orderingType; ///< The method of ordering use during variable elimination (default COLAMD) - - NonlinearOptimizerParams() : - maxIterations(100), relativeErrorTol(1e-5), absoluteErrorTol(1e-5), errorTol( - 0.0), verbosity(SILENT), orderingType(Ordering::COLAMD), - linearSolverType(MULTIFRONTAL_CHOLESKY) {} - - virtual ~NonlinearOptimizerParams() { - } - virtual void print(const std::string& str = "") const; + size_t maxIterations = 100; ///< The maximum iterations to stop iterating (default 100) + double relativeErrorTol = 1e-5; ///< The maximum relative error decrease to stop iterating (default 1e-5) + double absoluteErrorTol = 1e-5; ///< The maximum absolute error decrease to stop iterating (default 1e-5) + double errorTol = 0.0; ///< The maximum total error to stop iterating (default 0.0) + Verbosity verbosity = SILENT; ///< The printing verbosity during optimization (default SILENT) + Ordering::OrderingType orderingType = Ordering::COLAMD; ///< The method of ordering use during variable elimination (default COLAMD) size_t getMaxIterations() const { return maxIterations; } double getRelativeErrorTol() const { return relativeErrorTol; } @@ -71,6 +62,37 @@ class GTSAM_EXPORT NonlinearOptimizerParams { static Verbosity verbosityTranslator(const std::string &s) ; static std::string verbosityTranslator(Verbosity value) ; + /** Type for an optional user-provided hook to be called after each + * internal optimizer iteration. See iterationHook below. */ + using IterationHook = std::function< + void(size_t /*iteration*/, double/*errorBefore*/, double/*errorAfter*/)>; + + /** Optional user-provided iteration hook to be called after each + * optimization iteration (Default: none). + * Note that `IterationHook` is defined as a std::function<> with this + * signature: + * \code + * void(size_t iteration, double errorBefore, double errorAfter) + * \endcode + * which allows binding by means of a reference to a regular function: + * \code + * void foo(size_t iteration, double errorBefore, double errorAfter); + * // ... + * lmOpts.iterationHook = &foo; + * \endcode + * or to a C++11 lambda (preferred if you need to capture additional + * context variables, such that the optimizer object itself, the factor graph, + * etc.): + * \code + * lmOpts.iterationHook = [&](size_t iter, double oldError, double newError) + * { + * // ... + * }; + * \endcode + * or to the result of a properly-formed `std::bind` call. + */ + IterationHook iterationHook; + /** See NonlinearOptimizerParams::linearSolverType */ enum LinearSolverType { MULTIFRONTAL_CHOLESKY, @@ -81,10 +103,27 @@ class GTSAM_EXPORT NonlinearOptimizerParams { CHOLMOD, /* Experimental Flag */ }; - LinearSolverType linearSolverType; ///< The type of linear solver to use in the nonlinear optimizer + LinearSolverType linearSolverType = MULTIFRONTAL_CHOLESKY; ///< The type of linear solver to use in the nonlinear optimizer boost::optional ordering; ///< The optional variable elimination ordering, or empty to use COLAMD (default: empty) IterativeOptimizationParameters::shared_ptr iterativeParams; ///< The container for iterativeOptimization parameters. used in CG Solvers. + NonlinearOptimizerParams() = default; + virtual ~NonlinearOptimizerParams() { + } + + virtual void print(const std::string& str = "") const; + + bool equals(const NonlinearOptimizerParams& other, double tol = 1e-9) const { + return maxIterations == other.getMaxIterations() + && std::abs(relativeErrorTol - other.getRelativeErrorTol()) <= tol + && std::abs(absoluteErrorTol - other.getAbsoluteErrorTol()) <= tol + && std::abs(errorTol - other.getErrorTol()) <= tol + && verbosityTranslator(verbosity) == other.getVerbosity(); + // && orderingType.equals(other.getOrderingType()_; + // && linearSolverType == other.getLinearSolverType(); + // TODO: check ordering, iterativeParams, and iterationsHook + } + inline bool isMultifrontal() const { return (linearSolverType == MULTIFRONTAL_CHOLESKY) || (linearSolverType == MULTIFRONTAL_QR); diff --git a/gtsam/nonlinear/PriorFactor.h b/gtsam/nonlinear/PriorFactor.h index 8d8c67d5c..191a7eeaa 100644 --- a/gtsam/nonlinear/PriorFactor.h +++ b/gtsam/nonlinear/PriorFactor.h @@ -65,15 +65,15 @@ namespace gtsam { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** implement functions needed for Testable */ /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const { + void print(const std::string& s, + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "PriorFactor on " << keyFormatter(this->key()) << "\n"; traits::Print(prior_, " prior mean: "); if (this->noiseModel_) @@ -83,7 +83,7 @@ namespace gtsam { } /** equals */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This* e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol) && traits::Equals(prior_, e->prior_, tol); } @@ -91,7 +91,7 @@ namespace gtsam { /** implement functions needed to derive from Factor */ /** vector of errors */ - Vector evaluateError(const T& x, boost::optional H = boost::none) const { + Vector evaluateError(const T& x, boost::optional H = boost::none) const override { if (H) (*H) = Matrix::Identity(traits::GetDimension(x),traits::GetDimension(x)); // manifold equivalent of z-x -> Local(x,z) // TODO(ASL) Add Jacobians. @@ -114,7 +114,12 @@ namespace gtsam { // Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html enum { NeedsToAlign = (sizeof(T) % 16) == 0 }; public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) + GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) }; + /// traits + template + struct traits > : public Testable > {}; + + } /// namespace gtsam diff --git a/gtsam/nonlinear/Values-inl.h b/gtsam/nonlinear/Values-inl.h index d0f8a4790..19813adba 100644 --- a/gtsam/nonlinear/Values-inl.h +++ b/gtsam/nonlinear/Values-inl.h @@ -217,7 +217,7 @@ namespace gtsam { /** Constructor from a Filtered view copies out all values */ template Values::Values(const Values::Filtered& view) { - for(const typename Filtered::KeyValuePair& key_value: view) { + for(const auto key_value: view) { Key key = key_value.key; insert(key, static_cast(key_value.value)); } @@ -226,7 +226,7 @@ namespace gtsam { /* ************************************************************************* */ template Values::Values(const Values::ConstFiltered& view) { - for(const typename ConstFiltered::KeyValuePair& key_value: view) { + for(const auto key_value: view) { Key key = key_value.key; insert(key, static_cast(key_value.value)); } @@ -338,19 +338,18 @@ namespace gtsam { } // internal /* ************************************************************************* */ - template - ValueType Values::at(Key j) const { + template + const ValueType Values::at(Key j) const { // Find the item KeyValueMap::const_iterator item = values_.find(j); // Throw exception if it does not exist - if(item == values_.end()) - throw ValuesKeyDoesNotExist("at", j); + if (item == values_.end()) throw ValuesKeyDoesNotExist("at", j); - // Check the type and throw exception if incorrect - // h() split in two lines to avoid internal compiler error (MSVC2017) - auto h = internal::handle(); - return h(j,item->second); + // Check the type and throw exception if incorrect + // h() split in two lines to avoid internal compiler error (MSVC2017) + auto h = internal::handle(); + return h(j, item->second); } /* ************************************************************************* */ diff --git a/gtsam/nonlinear/Values.cpp b/gtsam/nonlinear/Values.cpp index 98790ccd9..89a4206ee 100644 --- a/gtsam/nonlinear/Values.cpp +++ b/gtsam/nonlinear/Values.cpp @@ -52,6 +52,12 @@ namespace gtsam { Values::Values(Values&& other) : values_(std::move(other.values_)) { } + /* ************************************************************************* */ + Values::Values(std::initializer_list init) { + for (const auto &kv : init) + insert(kv.key, kv.value); + } + /* ************************************************************************* */ Values::Values(const Values& other, const VectorValues& delta) { for (const_iterator key_value = other.begin(); key_value != other.end(); ++key_value) { @@ -69,7 +75,8 @@ namespace gtsam { /* ************************************************************************* */ void Values::print(const string& str, const KeyFormatter& keyFormatter) const { - cout << str << "Values with " << size() << " values:" << endl; + cout << str << (str == "" ? "" : "\n"); + cout << "Values with " << size() << " values:\n"; for(const_iterator key_value = begin(); key_value != end(); ++key_value) { cout << "Value " << keyFormatter(key_value->key) << ": "; key_value->value.print(""); @@ -199,7 +206,7 @@ namespace gtsam { /* ************************************************************************* */ size_t Values::dim() const { size_t result = 0; - for(const ConstKeyValuePair& key_value: *this) { + for(const auto key_value: *this) { result += key_value.value.dim(); } return result; @@ -208,13 +215,13 @@ namespace gtsam { /* ************************************************************************* */ VectorValues Values::zeroVectors() const { VectorValues result; - for(const ConstKeyValuePair& key_value: *this) + for(const auto key_value: *this) result.insert(key_value.key, Vector::Zero(key_value.value.dim())); return result; } /* ************************************************************************* */ - const char* ValuesKeyAlreadyExists::what() const throw() { + const char* ValuesKeyAlreadyExists::what() const noexcept { if(message_.empty()) message_ = "Attempting to add a key-value pair with key \"" + DefaultKeyFormatter(key_) + "\", key already exists."; @@ -222,7 +229,7 @@ namespace gtsam { } /* ************************************************************************* */ - const char* ValuesKeyDoesNotExist::what() const throw() { + const char* ValuesKeyDoesNotExist::what() const noexcept { if(message_.empty()) message_ = "Attempting to " + std::string(operation_) + " the key \"" + @@ -231,7 +238,7 @@ namespace gtsam { } /* ************************************************************************* */ - const char* ValuesIncorrectType::what() const throw() { + const char* ValuesIncorrectType::what() const noexcept { if(message_.empty()) { std::string storedTypeName = demangle(storedTypeId_.name()); std::string requestedTypeName = demangle(requestedTypeId_.name()); @@ -251,7 +258,7 @@ namespace gtsam { } /* ************************************************************************* */ - const char* NoMatchFoundForFixed::what() const throw() { + const char* NoMatchFoundForFixed::what() const noexcept { if(message_.empty()) { ostringstream oss; oss diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index 041aa3441..120c8839c 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -149,6 +149,13 @@ namespace gtsam { /** Move constructor */ Values(Values&& other); + + /** Constructor from initializer list. Example usage: + * \code + * Values v = {{k1, genericValue(pose1)}, {k2, genericValue(point2)}}; + * \endcode + */ + Values(std::initializer_list init); /** Construct from a Values and an update vector: identical to other.retract(delta) */ Values(const Values& other, const VectorValues& delta); @@ -180,8 +187,8 @@ namespace gtsam { * Dynamic matrices/vectors can be retrieved as fixed-size, but not vice-versa. * @return The stored value */ - template - ValueType at(Key j) const; + template + const ValueType at(Key j) const; /// version for double double atDouble(size_t key) const { return at(key);} @@ -390,7 +397,7 @@ namespace gtsam { template size_t count() const { size_t i = 0; - for (const auto& key_value : *this) { + for (const auto key_value : *this) { if (dynamic_cast*>(&key_value.value)) ++i; } @@ -432,16 +439,16 @@ namespace gtsam { public: /// Construct with the key-value pair attempted to be added - ValuesKeyAlreadyExists(Key key) throw() : + ValuesKeyAlreadyExists(Key key) noexcept : key_(key) {} - virtual ~ValuesKeyAlreadyExists() throw() {} + virtual ~ValuesKeyAlreadyExists() noexcept {} /// The duplicate key that was attempted to be added - Key key() const throw() { return key_; } + Key key() const noexcept { return key_; } /// The message to be displayed to the user - GTSAM_EXPORT virtual const char* what() const throw(); + GTSAM_EXPORT const char* what() const noexcept override; }; /* ************************************************************************* */ @@ -455,16 +462,16 @@ namespace gtsam { public: /// Construct with the key that does not exist in the values - ValuesKeyDoesNotExist(const char* operation, Key key) throw() : + ValuesKeyDoesNotExist(const char* operation, Key key) noexcept : operation_(operation), key_(key) {} - virtual ~ValuesKeyDoesNotExist() throw() {} + virtual ~ValuesKeyDoesNotExist() noexcept {} /// The key that was attempted to be accessed that does not exist - Key key() const throw() { return key_; } + Key key() const noexcept { return key_; } /// The message to be displayed to the user - GTSAM_EXPORT virtual const char* what() const throw(); + GTSAM_EXPORT const char* what() const noexcept override; }; /* ************************************************************************* */ @@ -480,13 +487,13 @@ namespace gtsam { public: /// Construct with the key that does not exist in the values ValuesIncorrectType(Key key, - const std::type_info& storedTypeId, const std::type_info& requestedTypeId) throw() : + const std::type_info& storedTypeId, const std::type_info& requestedTypeId) noexcept : key_(key), storedTypeId_(storedTypeId), requestedTypeId_(requestedTypeId) {} - virtual ~ValuesIncorrectType() throw() {} + virtual ~ValuesIncorrectType() noexcept {} /// The key that was attempted to be accessed that does not exist - Key key() const throw() { return key_; } + Key key() const noexcept { return key_; } /// The typeid of the value stores in the Values const std::type_info& storedTypeId() const { return storedTypeId_; } @@ -495,18 +502,18 @@ namespace gtsam { const std::type_info& requestedTypeId() const { return requestedTypeId_; } /// The message to be displayed to the user - GTSAM_EXPORT virtual const char* what() const throw(); + GTSAM_EXPORT const char* what() const noexcept override; }; /* ************************************************************************* */ class DynamicValuesMismatched : public std::exception { public: - DynamicValuesMismatched() throw() {} + DynamicValuesMismatched() noexcept {} - virtual ~DynamicValuesMismatched() throw() {} + virtual ~DynamicValuesMismatched() noexcept {} - virtual const char* what() const throw() { + const char* what() const noexcept override { return "The Values 'this' and the argument passed to Values::localCoordinates have mismatched keys and values"; } }; @@ -522,14 +529,14 @@ namespace gtsam { mutable std::string message_; public: - NoMatchFoundForFixed(size_t M1, size_t N1, size_t M2, size_t N2) throw () : + NoMatchFoundForFixed(size_t M1, size_t N1, size_t M2, size_t N2) noexcept : M1_(M1), N1_(N1), M2_(M2), N2_(N2) { } - virtual ~NoMatchFoundForFixed() throw () { + virtual ~NoMatchFoundForFixed() noexcept { } - GTSAM_EXPORT virtual const char* what() const throw (); + GTSAM_EXPORT const char* what() const noexcept override; }; /* ************************************************************************* */ diff --git a/gtsam/nonlinear/WhiteNoiseFactor.h b/gtsam/nonlinear/WhiteNoiseFactor.h index 634736800..974998830 100644 --- a/gtsam/nonlinear/WhiteNoiseFactor.h +++ b/gtsam/nonlinear/WhiteNoiseFactor.h @@ -109,7 +109,7 @@ namespace gtsam { /// Print void print(const std::string& p = "WhiteNoiseFactor", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(p, keyFormatter); std::cout << p + ".z: " << z_ << std::endl; } @@ -119,12 +119,12 @@ namespace gtsam { /// @{ /// get the dimension of the factor (number of rows on linearization) - virtual size_t dim() const { + size_t dim() const override { return 2; } /// Calculate the error of the factor, typically equal to log-likelihood - inline double error(const Values& x) const { + double error(const Values& x) const override { return f(z_, x.at(meanKey_), x.at(precisionKey_)); } @@ -153,7 +153,7 @@ namespace gtsam { /// @{ /// linearize returns a Hessianfactor that is an approximation of error(p) - virtual boost::shared_ptr linearize(const Values& x) const { + boost::shared_ptr linearize(const Values& x) const override { double u = x.at(meanKey_); double p = x.at(precisionKey_); Key j1 = meanKey_; @@ -163,7 +163,7 @@ namespace gtsam { // TODO: Frank commented this out for now, can it go? // /// @return a deep copy of this factor - // virtual gtsam::NonlinearFactor::shared_ptr clone() const { + // gtsam::NonlinearFactor::shared_ptr clone() const override { // return boost::static_pointer_cast( // gtsam::NonlinearFactor::shared_ptr(new This(*this))); } diff --git a/gtsam/nonlinear/internal/CallRecord.h b/gtsam/nonlinear/internal/CallRecord.h index 8bdc0652e..707c617ee 100644 --- a/gtsam/nonlinear/internal/CallRecord.h +++ b/gtsam/nonlinear/internal/CallRecord.h @@ -144,43 +144,43 @@ struct CallRecordImplementor: public CallRecord { return static_cast(*this); } - virtual void _print(const std::string& indent) const { + void _print(const std::string& indent) const override { derived().print(indent); } // Called from base class non-virtual inline method startReverseAD2 // Calls non-virtual function startReverseAD4, implemented in Derived (ExpressionNode::Record) - virtual void _startReverseAD3(JacobianMap& jacobians) const { + void _startReverseAD3(JacobianMap& jacobians) const override { derived().startReverseAD4(jacobians); } - virtual void _reverseAD3(const Matrix & dFdT, JacobianMap& jacobians) const { + void _reverseAD3(const Matrix & dFdT, JacobianMap& jacobians) const override { derived().reverseAD4(dFdT, jacobians); } - virtual void _reverseAD3( + void _reverseAD3( const Eigen::Matrix & dFdT, - JacobianMap& jacobians) const { + JacobianMap& jacobians) const override { derived().reverseAD4(dFdT, jacobians); } - virtual void _reverseAD3(const Eigen::Matrix & dFdT, - JacobianMap& jacobians) const { + void _reverseAD3(const Eigen::Matrix & dFdT, + JacobianMap& jacobians) const override { derived().reverseAD4(dFdT, jacobians); } - virtual void _reverseAD3(const Eigen::Matrix & dFdT, - JacobianMap& jacobians) const { + void _reverseAD3(const Eigen::Matrix & dFdT, + JacobianMap& jacobians) const override { derived().reverseAD4(dFdT, jacobians); } - virtual void _reverseAD3(const Eigen::Matrix & dFdT, - JacobianMap& jacobians) const { + void _reverseAD3(const Eigen::Matrix & dFdT, + JacobianMap& jacobians) const override { derived().reverseAD4(dFdT, jacobians); } - virtual void _reverseAD3(const Eigen::Matrix & dFdT, - JacobianMap& jacobians) const { + void _reverseAD3(const Eigen::Matrix & dFdT, + JacobianMap& jacobians) const override { derived().reverseAD4(dFdT, jacobians); } - virtual void _reverseAD3(const Eigen::Matrix & dFdT, - JacobianMap& jacobians) const { + void _reverseAD3(const Eigen::Matrix & dFdT, + JacobianMap& jacobians) const override { derived().reverseAD4(dFdT, jacobians); } }; diff --git a/gtsam/nonlinear/internal/ExecutionTrace.h b/gtsam/nonlinear/internal/ExecutionTrace.h index ace0aaea8..2943b5e68 100644 --- a/gtsam/nonlinear/internal/ExecutionTrace.h +++ b/gtsam/nonlinear/internal/ExecutionTrace.h @@ -169,6 +169,12 @@ class ExecutionTrace { content.ptr->reverseAD2(dTdA, jacobians); } + ~ExecutionTrace() { + if (kind == Function) { + content.ptr->~CallRecord(); + } + } + /// Define type so we can apply it as a meta-function typedef ExecutionTrace type; }; diff --git a/gtsam/nonlinear/internal/ExpressionNode.h b/gtsam/nonlinear/internal/ExpressionNode.h index 0011efb74..ee3dc8929 100644 --- a/gtsam/nonlinear/internal/ExpressionNode.h +++ b/gtsam/nonlinear/internal/ExpressionNode.h @@ -135,22 +135,22 @@ class ConstantExpression: public ExpressionNode { } /// Print - virtual void print(const std::string& indent = "") const { + void print(const std::string& indent = "") const override { std::cout << indent << "Constant" << std::endl; } /// Return value - virtual T value(const Values& values) const { + T value(const Values& values) const override { return constant_; } /// Construct an execution trace for reverse AD - virtual T traceExecution(const Values& values, ExecutionTrace& trace, - ExecutionTraceStorage* traceStorage) const { + T traceExecution(const Values& values, ExecutionTrace& trace, + ExecutionTraceStorage* traceStorage) const override { return constant_; } - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; //----------------------------------------------------------------------------- @@ -176,30 +176,30 @@ class LeafExpression: public ExpressionNode { } /// Print - virtual void print(const std::string& indent = "") const { + void print(const std::string& indent = "") const override { std::cout << indent << "Leaf, key = " << DefaultKeyFormatter(key_) << std::endl; } /// Return keys that play in this expression - virtual std::set keys() const { + std::set keys() const override { std::set keys; keys.insert(key_); return keys; } /// Return dimensions for each argument - virtual void dims(std::map& map) const { + void dims(std::map& map) const override { map[key_] = traits::dimension; } /// Return value - virtual T value(const Values& values) const { + T value(const Values& values) const override { return values.at(key_); } /// Construct an execution trace for reverse AD - virtual T traceExecution(const Values& values, ExecutionTrace& trace, - ExecutionTraceStorage* traceStorage) const { + T traceExecution(const Values& values, ExecutionTrace& trace, + ExecutionTraceStorage* traceStorage) const override { trace.setLeaf(key_); return values.at(key_); } @@ -248,23 +248,23 @@ class UnaryExpression: public ExpressionNode { } /// Print - virtual void print(const std::string& indent = "") const { + void print(const std::string& indent = "") const override { std::cout << indent << "UnaryExpression" << std::endl; expression1_->print(indent + " "); } /// Return value - virtual T value(const Values& values) const { + T value(const Values& values) const override { return function_(expression1_->value(values), boost::none); } /// Return keys that play in this expression - virtual std::set keys() const { + std::set keys() const override { return expression1_->keys(); } /// Return dimensions for each argument - virtual void dims(std::map& map) const { + void dims(std::map& map) const override { expression1_->dims(map); } @@ -307,8 +307,8 @@ class UnaryExpression: public ExpressionNode { }; /// Construct an execution trace for reverse AD - virtual T traceExecution(const Values& values, ExecutionTrace& trace, - ExecutionTraceStorage* ptr) const { + T traceExecution(const Values& values, ExecutionTrace& trace, + ExecutionTraceStorage* ptr) const override { assert(reinterpret_cast(ptr) % TraceAlignment == 0); // Create a Record in the memory pointed to by ptr @@ -357,21 +357,21 @@ class BinaryExpression: public ExpressionNode { } /// Print - virtual void print(const std::string& indent = "") const { + void print(const std::string& indent = "") const override { std::cout << indent << "BinaryExpression" << std::endl; expression1_->print(indent + " "); expression2_->print(indent + " "); } /// Return value - virtual T value(const Values& values) const { + T value(const Values& values) const override { using boost::none; return function_(expression1_->value(values), expression2_->value(values), none, none); } /// Return keys that play in this expression - virtual std::set keys() const { + std::set keys() const override { std::set keys = expression1_->keys(); std::set myKeys = expression2_->keys(); keys.insert(myKeys.begin(), myKeys.end()); @@ -379,7 +379,7 @@ class BinaryExpression: public ExpressionNode { } /// Return dimensions for each argument - virtual void dims(std::map& map) const { + void dims(std::map& map) const override { expression1_->dims(map); expression2_->dims(map); } @@ -426,8 +426,8 @@ class BinaryExpression: public ExpressionNode { }; /// Construct an execution trace for reverse AD, see UnaryExpression for explanation - virtual T traceExecution(const Values& values, ExecutionTrace& trace, - ExecutionTraceStorage* ptr) const { + T traceExecution(const Values& values, ExecutionTrace& trace, + ExecutionTraceStorage* ptr) const override { assert(reinterpret_cast(ptr) % TraceAlignment == 0); Record* record = new (ptr) Record(values, *expression1_, *expression2_, ptr); trace.setFunction(record); @@ -464,7 +464,7 @@ class TernaryExpression: public ExpressionNode { } /// Print - virtual void print(const std::string& indent = "") const { + void print(const std::string& indent = "") const override { std::cout << indent << "TernaryExpression" << std::endl; expression1_->print(indent + " "); expression2_->print(indent + " "); @@ -472,14 +472,14 @@ class TernaryExpression: public ExpressionNode { } /// Return value - virtual T value(const Values& values) const { + T value(const Values& values) const override { using boost::none; return function_(expression1_->value(values), expression2_->value(values), expression3_->value(values), none, none, none); } /// Return keys that play in this expression - virtual std::set keys() const { + std::set keys() const override { std::set keys = expression1_->keys(); std::set myKeys = expression2_->keys(); keys.insert(myKeys.begin(), myKeys.end()); @@ -489,7 +489,7 @@ class TernaryExpression: public ExpressionNode { } /// Return dimensions for each argument - virtual void dims(std::map& map) const { + void dims(std::map& map) const override { expression1_->dims(map); expression2_->dims(map); expression3_->dims(map); @@ -544,8 +544,8 @@ class TernaryExpression: public ExpressionNode { }; /// Construct an execution trace for reverse AD, see UnaryExpression for explanation - virtual T traceExecution(const Values& values, ExecutionTrace& trace, - ExecutionTraceStorage* ptr) const { + T traceExecution(const Values& values, ExecutionTrace& trace, + ExecutionTraceStorage* ptr) const override { assert(reinterpret_cast(ptr) % TraceAlignment == 0); Record* record = new (ptr) Record(values, *expression1_, *expression2_, *expression3_, ptr); trace.setFunction(record); @@ -574,23 +574,23 @@ class ScalarMultiplyNode : public ExpressionNode { virtual ~ScalarMultiplyNode() {} /// Print - virtual void print(const std::string& indent = "") const { + void print(const std::string& indent = "") const override { std::cout << indent << "ScalarMultiplyNode" << std::endl; expression_->print(indent + " "); } /// Return value - virtual T value(const Values& values) const { + T value(const Values& values) const override { return scalar_ * expression_->value(values); } /// Return keys that play in this expression - virtual std::set keys() const { + std::set keys() const override { return expression_->keys(); } /// Return dimensions for each argument - virtual void dims(std::map& map) const { + void dims(std::map& map) const override { expression_->dims(map); } @@ -624,8 +624,8 @@ class ScalarMultiplyNode : public ExpressionNode { }; /// Construct an execution trace for reverse AD - virtual T traceExecution(const Values& values, ExecutionTrace& trace, - ExecutionTraceStorage* ptr) const { + T traceExecution(const Values& values, ExecutionTrace& trace, + ExecutionTraceStorage* ptr) const override { assert(reinterpret_cast(ptr) % TraceAlignment == 0); Record* record = new (ptr) Record(); ptr += upAligned(sizeof(Record)); @@ -662,19 +662,19 @@ class BinarySumNode : public ExpressionNode { virtual ~BinarySumNode() {} /// Print - virtual void print(const std::string& indent = "") const { + void print(const std::string& indent = "") const override { std::cout << indent << "BinarySumNode" << std::endl; expression1_->print(indent + " "); expression2_->print(indent + " "); } /// Return value - virtual T value(const Values& values) const { + T value(const Values& values) const override { return expression1_->value(values) + expression2_->value(values); } /// Return keys that play in this expression - virtual std::set keys() const { + std::set keys() const override { std::set keys = expression1_->keys(); std::set myKeys = expression2_->keys(); keys.insert(myKeys.begin(), myKeys.end()); @@ -682,7 +682,7 @@ class BinarySumNode : public ExpressionNode { } /// Return dimensions for each argument - virtual void dims(std::map& map) const { + void dims(std::map& map) const override { expression1_->dims(map); expression2_->dims(map); } @@ -717,8 +717,8 @@ class BinarySumNode : public ExpressionNode { }; /// Construct an execution trace for reverse AD - virtual T traceExecution(const Values& values, ExecutionTrace& trace, - ExecutionTraceStorage* ptr) const { + T traceExecution(const Values& values, ExecutionTrace& trace, + ExecutionTraceStorage* ptr) const override { assert(reinterpret_cast(ptr) % TraceAlignment == 0); Record* record = new (ptr) Record(); trace.setFunction(record); diff --git a/gtsam/nonlinear/internal/LevenbergMarquardtState.h b/gtsam/nonlinear/internal/LevenbergMarquardtState.h index 8ab2e7466..cee839540 100644 --- a/gtsam/nonlinear/internal/LevenbergMarquardtState.h +++ b/gtsam/nonlinear/internal/LevenbergMarquardtState.h @@ -126,7 +126,7 @@ struct LevenbergMarquardtState : public NonlinearOptimizerState { noiseModelCache.resize(0); // for each of the variables, add a prior damped.reserve(damped.size() + values.size()); - for (const auto& key_value : values) { + for (const auto key_value : values) { const Key key = key_value.key; const size_t dim = key_value.value.dim(); const CachedModel* item = getCachedModel(dim); diff --git a/gtsam/nonlinear/nonlinearExceptions.h b/gtsam/nonlinear/nonlinearExceptions.h index 8b32b6fdc..7b704ac39 100644 --- a/gtsam/nonlinear/nonlinearExceptions.h +++ b/gtsam/nonlinear/nonlinearExceptions.h @@ -35,11 +35,11 @@ namespace gtsam { KeyFormatter formatter_; mutable std::string what_; public: - MarginalizeNonleafException(Key key, KeyFormatter formatter = DefaultKeyFormatter) throw() : + MarginalizeNonleafException(Key key, KeyFormatter formatter = DefaultKeyFormatter) noexcept : key_(key), formatter_(formatter) {} - virtual ~MarginalizeNonleafException() throw() {} + virtual ~MarginalizeNonleafException() noexcept {} Key key() const { return key_; } - virtual const char* what() const throw() { + const char* what() const noexcept override { if(what_.empty()) what_ = "\nRequested to marginalize out variable " + formatter_(key_) + ", but this variable\n\ diff --git a/gtsam/nonlinear/tests/testAdaptAutoDiff.cpp b/gtsam/nonlinear/tests/testAdaptAutoDiff.cpp index a85118c00..1423b473e 100644 --- a/gtsam/nonlinear/tests/testAdaptAutoDiff.cpp +++ b/gtsam/nonlinear/tests/testAdaptAutoDiff.cpp @@ -41,7 +41,7 @@ struct Cal3Bundler0 : public Cal3Bundler { double v0 = 0) : Cal3Bundler(f, k1, k2, u0, v0) {} Cal3Bundler0 retract(const Vector& d) const { - return Cal3Bundler0(fx() + d(0), k1() + d(1), k2() + d(2), u0(), v0()); + return Cal3Bundler0(fx() + d(0), k1() + d(1), k2() + d(2), px(), py()); } Vector3 localCoordinates(const Cal3Bundler0& T2) const { return T2.vector() - vector(); @@ -168,7 +168,11 @@ Camera camera(Pose3(Rot3().retract(Vector3(0.1, 0.2, 0.3)), Point3(0, 5, 0)), Point3 point(10, 0, -5); // negative Z-axis convention of Snavely! Vector9 P = Camera().localCoordinates(camera); Vector3 X = point; +#ifdef GTSAM_POSE3_EXPMAP +Vector2 expectedMeasurement(1.3124675, 1.2057287); +#else Vector2 expectedMeasurement(1.2431567, 1.2525694); +#endif Matrix E1 = numericalDerivative21(adapted, P, X); Matrix E2 = numericalDerivative22(adapted, P, X); } @@ -177,7 +181,11 @@ Matrix E2 = numericalDerivative22(adapted, P, X); // Check that Local worked as expected TEST(AdaptAutoDiff, Local) { using namespace example; +#ifdef GTSAM_POSE3_EXPMAP + Vector9 expectedP = (Vector9() << 0.1, 0.2, 0.3, 0.7583528428, 4.9582357859, -0.224941471539, 1, 0, 0).finished(); +#else Vector9 expectedP = (Vector9() << 0.1, 0.2, 0.3, 0, 5, 0, 1, 0, 0).finished(); +#endif EXPECT(equal_with_abs_tol(expectedP, P)); Vector3 expectedX(10, 0, -5); // negative Z-axis convention of Snavely! EXPECT(equal_with_abs_tol(expectedX, X)); diff --git a/gtsam/nonlinear/tests/testCallRecord.cpp b/gtsam/nonlinear/tests/testCallRecord.cpp index c5ccc0f52..66c56e696 100644 --- a/gtsam/nonlinear/tests/testCallRecord.cpp +++ b/gtsam/nonlinear/tests/testCallRecord.cpp @@ -98,7 +98,8 @@ struct Record: public internal::CallRecordImplementor { friend struct internal::CallRecordImplementor; }; -internal::JacobianMap & NJM= *static_cast(nullptr); +internal::JacobianMap* NJM_ptr = static_cast(nullptr); +internal::JacobianMap & NJM = *NJM_ptr; /* ************************************************************************* */ typedef Eigen::Matrix DynRowMat; diff --git a/gtsam/nonlinear/tests/testExecutionTrace.cpp b/gtsam/nonlinear/tests/testExecutionTrace.cpp index c2b245780..58f76089a 100644 --- a/gtsam/nonlinear/tests/testExecutionTrace.cpp +++ b/gtsam/nonlinear/tests/testExecutionTrace.cpp @@ -16,6 +16,7 @@ * @brief unit tests for Expression internals */ +#include #include #include diff --git a/gtsam/nonlinear/tests/testFunctorizedFactor.cpp b/gtsam/nonlinear/tests/testFunctorizedFactor.cpp new file mode 100644 index 000000000..cb91320cf --- /dev/null +++ b/gtsam/nonlinear/tests/testFunctorizedFactor.cpp @@ -0,0 +1,176 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------1------------------------------------------- + */ + +/** + * @file testFunctorizedFactor.cpp + * @date May 31, 2020 + * @author Varun Agrawal + * @brief unit tests for FunctorizedFactor class + */ + +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +Key key = Symbol('X', 0); +auto model = noiseModel::Isotropic::Sigma(9, 1); + +/// Functor that takes a matrix and multiplies every element by m +class MultiplyFunctor { + double m_; ///< simple multiplier + + public: + MultiplyFunctor(double m) : m_(m) {} + + Matrix operator()(const Matrix &X, + OptionalJacobian<-1, -1> H = boost::none) const { + if (H) *H = m_ * Matrix::Identity(X.rows() * X.cols(), X.rows() * X.cols()); + return m_ * X; + } +}; + +/* ************************************************************************* */ +// Test identity operation for FunctorizedFactor. +TEST(FunctorizedFactor, Identity) { + Matrix X = Matrix::Identity(3, 3), measurement = Matrix::Identity(3, 3); + + double multiplier = 1.0; + auto functor = MultiplyFunctor(multiplier); + auto factor = MakeFunctorizedFactor(key, measurement, model, functor); + + Vector error = factor.evaluateError(X); + + EXPECT(assert_equal(Vector::Zero(9), error, 1e-9)); +} + +/* ************************************************************************* */ +// Test FunctorizedFactor with multiplier value of 2. +TEST(FunctorizedFactor, Multiply2) { + double multiplier = 2.0; + Matrix X = Matrix::Identity(3, 3); + Matrix measurement = multiplier * Matrix::Identity(3, 3); + + auto factor = MakeFunctorizedFactor(key, measurement, model, + MultiplyFunctor(multiplier)); + + Vector error = factor.evaluateError(X); + + EXPECT(assert_equal(Vector::Zero(9), error, 1e-9)); +} + +/* ************************************************************************* */ +// Test equality function for FunctorizedFactor. +TEST(FunctorizedFactor, Equality) { + Matrix measurement = Matrix::Identity(2, 2); + + double multiplier = 2.0; + + auto factor1 = MakeFunctorizedFactor(key, measurement, model, + MultiplyFunctor(multiplier)); + auto factor2 = MakeFunctorizedFactor(key, measurement, model, + MultiplyFunctor(multiplier)); + + EXPECT(factor1.equals(factor2)); +} + +/* *************************************************************************** */ +// Test Jacobians of FunctorizedFactor. +TEST(FunctorizedFactor, Jacobians) { + Matrix X = Matrix::Identity(3, 3); + Matrix actualH; + + double multiplier = 2.0; + + auto factor = + MakeFunctorizedFactor(key, X, model, MultiplyFunctor(multiplier)); + + Values values; + values.insert(key, X); + + // Check Jacobians + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); +} + +/* ************************************************************************* */ +// Test print result of FunctorizedFactor. +TEST(FunctorizedFactor, Print) { + Matrix X = Matrix::Identity(2, 2); + + double multiplier = 2.0; + + auto factor = + MakeFunctorizedFactor(key, X, model, MultiplyFunctor(multiplier)); + + string expected = + " keys = { X0 }\n" + " noise model: unit (9) \n" + "FunctorizedFactor(X0)\n" + " measurement: [\n" + " 1, 0;\n" + " 0, 1\n" + "]\n" + " noise model sigmas: 1 1 1 1 1 1 1 1 1\n"; + + EXPECT(assert_print_equal(expected, factor)); +} + +/* ************************************************************************* */ +// Test FunctorizedFactor using a std::function type. +TEST(FunctorizedFactor, Functional) { + double multiplier = 2.0; + Matrix X = Matrix::Identity(3, 3); + Matrix measurement = multiplier * Matrix::Identity(3, 3); + + std::function)> functional = + MultiplyFunctor(multiplier); + auto factor = + MakeFunctorizedFactor(key, measurement, model, functional); + + Vector error = factor.evaluateError(X); + + EXPECT(assert_equal(Vector::Zero(9), error, 1e-9)); +} + +/* ************************************************************************* */ +// Test FunctorizedFactor with a lambda function. +TEST(FunctorizedFactor, Lambda) { + double multiplier = 2.0; + Matrix X = Matrix::Identity(3, 3); + Matrix measurement = multiplier * Matrix::Identity(3, 3); + + auto lambda = [multiplier](const Matrix &X, + OptionalJacobian<-1, -1> H = boost::none) { + if (H) + *H = multiplier * + Matrix::Identity(X.rows() * X.cols(), X.rows() * X.cols()); + return multiplier * X; + }; + // FunctorizedFactor factor(key, measurement, model, lambda); + auto factor = MakeFunctorizedFactor(key, measurement, model, lambda); + + Vector error = factor.evaluateError(X); + + EXPECT(assert_equal(Vector::Zero(9), error, 1e-9)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/nonlinear/tests/testValues.cpp b/gtsam/nonlinear/tests/testValues.cpp index 2f624f527..aacceb276 100644 --- a/gtsam/nonlinear/tests/testValues.cpp +++ b/gtsam/nonlinear/tests/testValues.cpp @@ -175,10 +175,11 @@ TEST(Values, basic_functions) { Values values; const Values& values_c = values; - values.insert(2, Vector3()); - values.insert(4, Vector3()); - values.insert(6, Matrix23()); - values.insert(8, Matrix23()); + Matrix23 M1 = Matrix23::Zero(), M2 = Matrix23::Zero(); + values.insert(2, Vector3(0, 0, 0)); + values.insert(4, Vector3(0, 0, 0)); + values.insert(6, M1); + values.insert(8, M2); // find EXPECT_LONGS_EQUAL(4, values.find(4)->key); @@ -335,7 +336,7 @@ TEST(Values, filter) { int i = 0; Values::Filtered filtered = values.filter(boost::bind(std::greater_equal(), _1, 2)); EXPECT_LONGS_EQUAL(2, (long)filtered.size()); - for(const Values::Filtered<>::KeyValuePair& key_value: filtered) { + for(const auto key_value: filtered) { if(i == 0) { LONGS_EQUAL(2, (long)key_value.key); try {key_value.value.cast();} catch (const std::bad_cast& e) { FAIL("can't cast Value to Pose2");} @@ -370,7 +371,7 @@ TEST(Values, filter) { i = 0; Values::ConstFiltered pose_filtered = values.filter(); EXPECT_LONGS_EQUAL(2, (long)pose_filtered.size()); - for(const Values::ConstFiltered::KeyValuePair& key_value: pose_filtered) { + for(const auto key_value: pose_filtered) { if(i == 0) { EXPECT_LONGS_EQUAL(1, (long)key_value.key); EXPECT(assert_equal(pose1, key_value.value)); @@ -408,7 +409,7 @@ TEST(Values, Symbol_filter) { values.insert(Symbol('y', 3), pose3); int i = 0; - for(const Values::Filtered::KeyValuePair& key_value: values.filter(Symbol::ChrTest('y'))) { + for(const auto key_value: values.filter(Symbol::ChrTest('y'))) { if(i == 0) { LONGS_EQUAL(Symbol('y', 1), (long)key_value.key); EXPECT(assert_equal(pose1, key_value.value.cast())); @@ -593,19 +594,41 @@ TEST(Values, Demangle) { Values values; Matrix13 v; v << 5.0, 6.0, 7.0; values.insert(key1, v); - string expected = "Values with 1 values:\nValue v1: (Eigen::Matrix) [\n 5, 6, 7\n]\n\n"; + string expected = "Values with 1 values:\nValue v1: (Eigen::Matrix)\n[\n 5, 6, 7\n]\n\n"; - stringstream buffer; - streambuf * old = cout.rdbuf(buffer.rdbuf()); - - values.print(); + EXPECT(assert_print_equal(expected, values)); +} - string actual = buffer.str(); - cout.rdbuf(old); +/* ************************************************************************* */ +TEST(Values, brace_initializer) { + const Pose2 poseA(1.0, 2.0, 0.3), poseC(.0, .0, .0); + const Pose3 poseB(Pose2(0.1, 0.2, 0.3)); - EXPECT(assert_equal(expected, actual)); + { + Values values; + EXPECT_LONGS_EQUAL(0, values.size()); + values = { {key1, genericValue(1.0)} }; + EXPECT_LONGS_EQUAL(1, values.size()); + CHECK(values.at(key1) == 1.0); + } + { + Values values = { {key1, genericValue(poseA)}, {key2, genericValue(poseB)} }; + EXPECT_LONGS_EQUAL(2, values.size()); + EXPECT(assert_equal(values.at(key1), poseA)); + EXPECT(assert_equal(values.at(key2), poseB)); + } + // Test exception: duplicated key: + { + Values values; + CHECK_EXCEPTION((values = { + {key1, genericValue(poseA)}, + {key2, genericValue(poseB)}, + {key1, genericValue(poseC)} + }), std::exception); + } } + /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ diff --git a/gtsam/nonlinear/utilities.h b/gtsam/nonlinear/utilities.h index a78f7a2d0..ffc279872 100644 --- a/gtsam/nonlinear/utilities.h +++ b/gtsam/nonlinear/utilities.h @@ -107,6 +107,11 @@ Matrix extractPoint3(const Values& values) { return result; } +/// Extract all Pose3 values +Values allPose2s(const Values& values) { + return values.filter(); +} + /// Extract all Pose2 values into a single matrix [x y theta] Matrix extractPose2(const Values& values) { Values::ConstFiltered poses = values.filter(); diff --git a/gtsam/sam/BearingFactor.h b/gtsam/sam/BearingFactor.h index a9ed5ef4b..7076708db 100644 --- a/gtsam/sam/BearingFactor.h +++ b/gtsam/sam/BearingFactor.h @@ -34,8 +34,8 @@ struct Bearing; */ template ::result_type> -struct BearingFactor : public ExpressionFactor2 { - typedef ExpressionFactor2 Base; +struct BearingFactor : public ExpressionFactorN { + typedef ExpressionFactorN Base; /// default constructor BearingFactor() {} @@ -43,23 +43,38 @@ struct BearingFactor : public ExpressionFactor2 { /// primary constructor BearingFactor(Key key1, Key key2, const T& measured, const SharedNoiseModel& model) - : Base(key1, key2, model, measured) { - this->initialize(expression(key1, key2)); + : Base({key1, key2}, model, measured) { + this->initialize(expression({key1, key2})); } // Return measurement expression - virtual Expression expression(Key key1, Key key2) const { - Expression a1_(key1); - Expression a2_(key2); + Expression expression(const typename Base::ArrayNKeys &keys) const override { + Expression a1_(keys[0]); + Expression a2_(keys[1]); return Expression(Bearing(), a1_, a2_); } /// print void print(const std::string& s = "", - const KeyFormatter& kf = DefaultKeyFormatter) const { + const KeyFormatter& kf = DefaultKeyFormatter) const override { std::cout << s << "BearingFactor" << std::endl; Base::print(s, kf); } + + Vector evaluateError(const A1& a1, const A2& a2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const + { + std::vector Hs(2); + const auto &keys = Factor::keys(); + const Vector error = unwhitenedError( + {{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, + Hs); + if (H1) *H1 = Hs[0]; + if (H2) *H2 = Hs[1]; + return error; + } + private: friend class boost::serialization::access; diff --git a/gtsam/sam/BearingRangeFactor.h b/gtsam/sam/BearingRangeFactor.h index 44740f8ff..482dbb974 100644 --- a/gtsam/sam/BearingRangeFactor.h +++ b/gtsam/sam/BearingRangeFactor.h @@ -33,42 +33,63 @@ template ::result_type, typename R = typename Range::result_type> class BearingRangeFactor - : public ExpressionFactor2, A1, A2> { + : public ExpressionFactorN, A1, A2> { private: typedef BearingRange T; - typedef ExpressionFactor2 Base; + typedef ExpressionFactorN Base; typedef BearingRangeFactor This; public: typedef boost::shared_ptr shared_ptr; - /// default constructor + /// Default constructor BearingRangeFactor() {} - /// primary constructor - BearingRangeFactor(Key key1, Key key2, const B& measuredBearing, - const R& measuredRange, const SharedNoiseModel& model) - : Base(key1, key2, model, T(measuredBearing, measuredRange)) { - this->initialize(expression(key1, key2)); + /// Construct from BearingRange instance + BearingRangeFactor(Key key1, Key key2, const T &bearingRange, + const SharedNoiseModel &model) + : Base({{key1, key2}}, model, T(bearingRange)) { + this->initialize(expression({{key1, key2}})); + } + + /// Construct from separate bearing and range + BearingRangeFactor(Key key1, Key key2, const B &measuredBearing, + const R &measuredRange, const SharedNoiseModel &model) + : Base({{key1, key2}}, model, T(measuredBearing, measuredRange)) { + this->initialize(expression({{key1, key2}})); } virtual ~BearingRangeFactor() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } // Return measurement expression - virtual Expression expression(Key key1, Key key2) const { - return Expression(T::Measure, Expression(key1), - Expression(key2)); + Expression expression(const typename Base::ArrayNKeys& keys) const override { + return Expression(T::Measure, Expression(keys[0]), + Expression(keys[1])); + } + + Vector evaluateError(const A1& a1, const A2& a2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const + { + std::vector Hs(2); + const auto &keys = Factor::keys(); + const Vector error = unwhitenedError( + {{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, + Hs); + if (H1) *H1 = Hs[0]; + if (H2) *H2 = Hs[1]; + return error; } /// print - virtual void print(const std::string& s = "", - const KeyFormatter& kf = DefaultKeyFormatter) const { + void print(const std::string& s = "", + const KeyFormatter& kf = DefaultKeyFormatter) const override { std::cout << s << "BearingRangeFactor" << std::endl; Base::print(s, kf); } diff --git a/gtsam/sam/RangeFactor.h b/gtsam/sam/RangeFactor.h index 40a9cf758..d9890d2ef 100644 --- a/gtsam/sam/RangeFactor.h +++ b/gtsam/sam/RangeFactor.h @@ -32,36 +32,50 @@ struct Range; * @addtogroup SAM */ template -class RangeFactor : public ExpressionFactor2 { +class RangeFactor : public ExpressionFactorN { private: typedef RangeFactor This; - typedef ExpressionFactor2 Base; + typedef ExpressionFactorN Base; public: /// default constructor RangeFactor() {} RangeFactor(Key key1, Key key2, T measured, const SharedNoiseModel& model) - : Base(key1, key2, model, measured) { - this->initialize(expression(key1, key2)); + : Base({key1, key2}, model, measured) { + this->initialize(expression({key1, key2})); } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } // Return measurement expression - virtual Expression expression(Key key1, Key key2) const { - Expression a1_(key1); - Expression a2_(key2); + Expression expression(const typename Base::ArrayNKeys& keys) const override { + Expression a1_(keys[0]); + Expression a2_(keys[1]); return Expression(Range(), a1_, a2_); } + + Vector evaluateError(const A1& a1, const A2& a2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const + { + std::vector Hs(2); + const auto &keys = Factor::keys(); + const Vector error = Base::unwhitenedError( + {{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, + Hs); + if (H1) *H1 = Hs[0]; + if (H2) *H2 = Hs[1]; + return error; + } /// print void print(const std::string& s = "", - const KeyFormatter& kf = DefaultKeyFormatter) const { + const KeyFormatter& kf = DefaultKeyFormatter) const override { std::cout << s << "RangeFactor" << std::endl; Base::print(s, kf); } @@ -86,10 +100,10 @@ struct traits > */ template ::result_type> -class RangeFactorWithTransform : public ExpressionFactor2 { +class RangeFactorWithTransform : public ExpressionFactorN { private: typedef RangeFactorWithTransform This; - typedef ExpressionFactor2 Base; + typedef ExpressionFactorN Base; A1 body_T_sensor_; ///< The pose of the sensor in the body frame @@ -100,31 +114,45 @@ class RangeFactorWithTransform : public ExpressionFactor2 { RangeFactorWithTransform(Key key1, Key key2, T measured, const SharedNoiseModel& model, const A1& body_T_sensor) - : Base(key1, key2, model, measured), body_T_sensor_(body_T_sensor) { - this->initialize(expression(key1, key2)); + : Base({key1, key2}, model, measured), body_T_sensor_(body_T_sensor) { + this->initialize(expression({key1, key2})); } virtual ~RangeFactorWithTransform() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } // Return measurement expression - virtual Expression expression(Key key1, Key key2) const { + Expression expression(const typename Base::ArrayNKeys& keys) const override { Expression body_T_sensor__(body_T_sensor_); - Expression nav_T_body_(key1); + Expression nav_T_body_(keys[0]); Expression nav_T_sensor_(traits::Compose, nav_T_body_, body_T_sensor__); - Expression a2_(key2); + Expression a2_(keys[1]); return Expression(Range(), nav_T_sensor_, a2_); } + Vector evaluateError(const A1& a1, const A2& a2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const + { + std::vector Hs(2); + const auto &keys = Factor::keys(); + const Vector error = Base::unwhitenedError( + {{keys[0], genericValue(a1)}, {keys[1], genericValue(a2)}}, + Hs); + if (H1) *H1 = Hs[0]; + if (H2) *H2 = Hs[1]; + return error; + } + /** print contents */ void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "RangeFactorWithTransform" << std::endl; this->body_T_sensor_.print(" sensor pose in body frame: "); Base::print(s, keyFormatter); @@ -135,9 +163,12 @@ class RangeFactorWithTransform : public ExpressionFactor2 { friend class boost::serialization::access; template void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + // **IMPORTANT** We need to (de)serialize parameters before the base class, + // since it calls expression() and we need all parameters ready at that + // point. + ar& BOOST_SERIALIZATION_NVP(body_T_sensor_); ar& boost::serialization::make_nvp( "Base", boost::serialization::base_object(*this)); - ar& BOOST_SERIALIZATION_NVP(body_T_sensor_); } }; // \ RangeFactorWithTransform diff --git a/gtsam/sam/tests/testBearingFactor.cpp b/gtsam/sam/tests/testBearingFactor.cpp index 12635a7e5..17a049a1d 100644 --- a/gtsam/sam/tests/testBearingFactor.cpp +++ b/gtsam/sam/tests/testBearingFactor.cpp @@ -68,7 +68,7 @@ TEST(BearingFactor, 2D) { values.insert(poseKey, Pose2(1.0, 2.0, 0.57)); values.insert(pointKey, Point2(-4.0, 11.0)); - EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression(poseKey, pointKey), + EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression({poseKey, pointKey}), values, 1e-7, 1e-5); EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); } @@ -104,7 +104,7 @@ TEST(BearingFactor, Serialization3D) { // values.insert(poseKey, Pose3()); // values.insert(pointKey, Point3(1, 0, 0)); // -// EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression(poseKey, pointKey), +// EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression({poseKey, pointKey}), // values, 1e-7, 1e-5); // EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); //} diff --git a/gtsam/sam/tests/testBearingRangeFactor.cpp b/gtsam/sam/tests/testBearingRangeFactor.cpp index 4c7a9ab91..735358d89 100644 --- a/gtsam/sam/tests/testBearingRangeFactor.cpp +++ b/gtsam/sam/tests/testBearingRangeFactor.cpp @@ -67,7 +67,7 @@ TEST(BearingRangeFactor, 2D) { values.insert(poseKey, Pose2(1.0, 2.0, 0.57)); values.insert(pointKey, Point2(-4.0, 11.0)); - EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression(poseKey, pointKey), + EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression({poseKey, pointKey}), values, 1e-7, 1e-5); EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); } @@ -95,7 +95,7 @@ TEST(BearingRangeFactor, Serialization3D) { // values.insert(poseKey, Pose3()); // values.insert(pointKey, Point3(1, 0, 0)); // -// EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression(poseKey, pointKey), +// EXPECT_CORRECT_EXPRESSION_JACOBIANS(factor.expression({poseKey, pointKey}), // values, 1e-7, 1e-5); // EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); //} diff --git a/gtsam/sam/tests/testRangeFactor.cpp b/gtsam/sam/tests/testRangeFactor.cpp index 54d4a43c0..8ae3d818b 100644 --- a/gtsam/sam/tests/testRangeFactor.cpp +++ b/gtsam/sam/tests/testRangeFactor.cpp @@ -141,6 +141,27 @@ TEST( RangeFactor, EqualsWithTransform ) { body_P_sensor_3D); CHECK(assert_equal(factor3D_1, factor3D_2)); } +/* ************************************************************************* */ +TEST( RangeFactor, EqualsAfterDeserializing) { + // Check that the same results are obtained after deserializing: + Pose3 body_P_sensor_3D(Rot3::RzRyRx(-M_PI_2, 0.0, -M_PI_2), + Point3(0.25, -0.10, 1.0)); + + RangeFactorWithTransform3D factor3D_1(poseKey, pointKey, measurement, model, + body_P_sensor_3D), factor3D_2; + + // check with Equal() trait: + gtsam::serializationTestHelpers::roundtripXML(factor3D_1, factor3D_2); + CHECK(assert_equal(factor3D_1, factor3D_2)); + + const Pose3 pose(Rot3::RzRyRx(0.2, -0.3, 1.75), Point3(1.0, 2.0, -3.0)); + const Point3 point(-2.0, 11.0, 1.0); + const Values values = {{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}; + + const Vector error_1 = factor3D_1.unwhitenedError(values); + const Vector error_2 = factor3D_2.unwhitenedError(values); + CHECK(assert_equal(error_1, error_2)); +} /* ************************************************************************* */ TEST( RangeFactor, Error2D ) { @@ -152,7 +173,7 @@ TEST( RangeFactor, Error2D ) { Point2 point(-4.0, 11.0); // Use the factor to calculate the error - Vector actualError(factor.evaluateError(pose, point)); + Vector actualError(factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}})); // The expected error is ||(5.0, 9.0)|| - 10.0 = 0.295630141 meter / UnitCovariance Vector expectedError = (Vector(1) << 0.295630141).finished(); @@ -175,7 +196,7 @@ TEST( RangeFactor, Error2DWithTransform ) { Point2 point(-4.0, 11.0); // Use the factor to calculate the error - Vector actualError(factor.evaluateError(pose, point)); + Vector actualError(factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}})); // The expected error is ||(5.0, 9.0)|| - 10.0 = 0.295630141 meter / UnitCovariance Vector expectedError = (Vector(1) << 0.295630141).finished(); @@ -194,7 +215,7 @@ TEST( RangeFactor, Error3D ) { Point3 point(-2.0, 11.0, 1.0); // Use the factor to calculate the error - Vector actualError(factor.evaluateError(pose, point)); + Vector actualError(factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}})); // The expected error is ||(3.0, 9.0, 4.0)|| - 10.0 = 0.295630141 meter / UnitCovariance Vector expectedError = (Vector(1) << 0.295630141).finished(); @@ -218,7 +239,7 @@ TEST( RangeFactor, Error3DWithTransform ) { Point3 point(-2.0, 11.0, 1.0); // Use the factor to calculate the error - Vector actualError(factor.evaluateError(pose, point)); + Vector actualError(factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}})); // The expected error is ||(3.0, 9.0, 4.0)|| - 10.0 = 0.295630141 meter / UnitCovariance Vector expectedError = (Vector(1) << 0.295630141).finished(); @@ -266,8 +287,10 @@ TEST( RangeFactor, Jacobian2DWithTransform ) { Point2 point(-4.0, 11.0); // Use the factor to calculate the Jacobians - Matrix H1Actual, H2Actual; - factor.evaluateError(pose, point, H1Actual, H2Actual); + std::vector actualHs(2); + factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}, actualHs); + const Matrix& H1Actual = actualHs.at(0); + const Matrix& H2Actual = actualHs.at(1); // Use numerical derivatives to calculate the Jacobians Matrix H1Expected, H2Expected; @@ -291,8 +314,10 @@ TEST( RangeFactor, Jacobian3D ) { Point3 point(-2.0, 11.0, 1.0); // Use the factor to calculate the Jacobians - Matrix H1Actual, H2Actual; - factor.evaluateError(pose, point, H1Actual, H2Actual); + std::vector actualHs(2); + factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}, actualHs); + const Matrix& H1Actual = actualHs.at(0); + const Matrix& H2Actual = actualHs.at(1); // Use numerical derivatives to calculate the Jacobians Matrix H1Expected, H2Expected; @@ -321,8 +346,10 @@ TEST( RangeFactor, Jacobian3DWithTransform ) { Point3 point(-2.0, 11.0, 1.0); // Use the factor to calculate the Jacobians - Matrix H1Actual, H2Actual; - factor.evaluateError(pose, point, H1Actual, H2Actual); + std::vector actualHs(2); + factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}, actualHs); + const Matrix& H1Actual = actualHs.at(0); + const Matrix& H2Actual = actualHs.at(1); // Use numerical derivatives to calculate the Jacobians Matrix H1Expected, H2Expected; @@ -350,7 +377,7 @@ TEST(RangeFactor, Point3) { Vector expectedError = (Vector(1) << 0.295630141).finished(); // Verify we get the expected error - CHECK(assert_equal(expectedError, factor.evaluateError(pose, point), 1e-9)); + CHECK(assert_equal(expectedError, factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}), 1e-9)); } /* ************************************************************************* */ @@ -393,7 +420,7 @@ TEST(RangeFactor, NonGTSAM) { Vector expectedError = (Vector(1) << 0.295630141).finished(); // Verify we get the expected error - CHECK(assert_equal(expectedError, factor.evaluateError(pose, point), 1e-9)); + CHECK(assert_equal(expectedError, factor.unwhitenedError({{poseKey, genericValue(pose)}, {pointKey, genericValue(point)}}), 1e-9)); } /* ************************************************************************* */ diff --git a/gtsam/sfm/BinaryMeasurement.h b/gtsam/sfm/BinaryMeasurement.h new file mode 100644 index 000000000..99e553f7a --- /dev/null +++ b/gtsam/sfm/BinaryMeasurement.h @@ -0,0 +1,84 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +#pragma once + +/** + * @file BinaryMeasurement.h + * @author Akshay Krishnan + * @date July 2020 + * @brief Binary measurement represents a measurement between two keys in a + * graph. A binary measurement is similar to a BetweenFactor, except that it + * does not contain an error function. It is a measurement (along with a noise + * model) from one key to another. Note that the direction is important. A + * measurement from key1 to key2 is not the same as the same measurement from + * key2 to key1. + */ + +#include +#include +#include +#include + +#include +#include + +namespace gtsam { + +template class BinaryMeasurement : public Factor { + // Check that T type is testable + BOOST_CONCEPT_ASSERT((IsTestable)); + +public: + // shorthand for a smart pointer to a measurement + using shared_ptr = typename boost::shared_ptr; + +private: + T measured_; ///< The measurement + SharedNoiseModel noiseModel_; ///< Noise model + + public: + BinaryMeasurement(Key key1, Key key2, const T &measured, + const SharedNoiseModel &model = nullptr) + : Factor(std::vector({key1, key2})), + measured_(measured), + noiseModel_(model) {} + + /// @name Standard Interface + /// @{ + + Key key1() const { return keys_[0]; } + Key key2() const { return keys_[1]; } + const T &measured() const { return measured_; } + const SharedNoiseModel &noiseModel() const { return noiseModel_; } + + /// @} + /// @name Testable + /// @{ + + void print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const { + std::cout << s << "BinaryMeasurement(" << keyFormatter(this->key1()) << "," + << keyFormatter(this->key2()) << ")\n"; + traits::Print(measured_, " measured: "); + this->noiseModel_->print(" noise model: "); + } + + bool equals(const BinaryMeasurement &expected, double tol = 1e-9) const { + const BinaryMeasurement *e = + dynamic_cast *>(&expected); + return e != nullptr && Factor::equals(*e) && + traits::Equals(this->measured_, e->measured_, tol) && + noiseModel_->equals(*expected.noiseModel()); + } + /// @} +}; +} // namespace gtsam diff --git a/gtsam/sfm/MFAS.cpp b/gtsam/sfm/MFAS.cpp new file mode 100644 index 000000000..913752d8a --- /dev/null +++ b/gtsam/sfm/MFAS.cpp @@ -0,0 +1,171 @@ +/** + * @file MFAS.cpp + * @brief Source file for the MFAS class + * @author Akshay Krishnan + * @date July 2020 + */ + +#include + +#include +#include +#include +#include +#include + +using namespace gtsam; +using std::map; +using std::pair; +using std::unordered_map; +using std::vector; +using std::unordered_set; + +// A node in the graph. +struct GraphNode { + double inWeightSum; // Sum of absolute weights of incoming edges + double outWeightSum; // Sum of absolute weights of outgoing edges + unordered_set inNeighbors; // Nodes from which there is an incoming edge + unordered_set outNeighbors; // Nodes to which there is an outgoing edge + + // Heuristic for the node that is to select nodes in MFAS. + double heuristic() const { return (outWeightSum + 1) / (inWeightSum + 1); } +}; + +// A graph is a map from key to GraphNode. This function returns the graph from +// the edgeWeights between keys. +unordered_map graphFromEdges( + const map& edgeWeights) { + unordered_map graph; + + for (const auto& edgeWeight : edgeWeights) { + // The weights can be either negative or positive. The direction of the edge + // is the direction of positive weight. This means that the edges is from + // edge.first -> edge.second if weight is positive and edge.second -> + // edge.first if weight is negative. + const MFAS::KeyPair& edge = edgeWeight.first; + const double& weight = edgeWeight.second; + + Key edgeSource = weight >= 0 ? edge.first : edge.second; + Key edgeDest = weight >= 0 ? edge.second : edge.first; + + // Update the in weight and neighbors for the destination. + graph[edgeDest].inWeightSum += std::abs(weight); + graph[edgeDest].inNeighbors.insert(edgeSource); + + // Update the out weight and neighbors for the source. + graph[edgeSource].outWeightSum += std::abs(weight); + graph[edgeSource].outNeighbors.insert(edgeDest); + } + return graph; +} + +// Selects the next node in the ordering from the graph. +Key selectNextNodeInOrdering(const unordered_map& graph) { + // Find the root nodes in the graph. + for (const auto& keyNode : graph) { + // It is a root node if the inWeightSum is close to zero. + if (keyNode.second.inWeightSum < 1e-8) { + // TODO(akshay-krishnan) if there are multiple roots, it is better to + // choose the one with highest heuristic. This is missing in the 1dsfm + // solution. + return keyNode.first; + } + } + // If there are no root nodes, return the node with the highest heuristic. + return std::max_element(graph.begin(), graph.end(), + [](const std::pair& keyNode1, + const std::pair& keyNode2) { + return keyNode1.second.heuristic() < + keyNode2.second.heuristic(); + }) + ->first; +} + +// Returns the absolute weight of the edge between node1 and node2. +double absWeightOfEdge(const Key key1, const Key key2, + const map& edgeWeights) { + // Check the direction of the edge before returning. + return edgeWeights.find(MFAS::KeyPair(key1, key2)) != edgeWeights.end() + ? std::abs(edgeWeights.at(MFAS::KeyPair(key1, key2))) + : std::abs(edgeWeights.at(MFAS::KeyPair(key2, key1))); +} + +// Removes a node from the graph and updates edge weights of its neighbors. +void removeNodeFromGraph(const Key node, + const map edgeWeights, + unordered_map& graph) { + // Update the outweights and outNeighbors of node's inNeighbors + for (const Key neighbor : graph[node].inNeighbors) { + // the edge could be either (*it, choice) with a positive weight or + // (choice, *it) with a negative weight + graph[neighbor].outWeightSum -= + absWeightOfEdge(node, neighbor, edgeWeights); + graph[neighbor].outNeighbors.erase(node); + } + // Update the inWeights and inNeighbors of node's outNeighbors + for (const Key neighbor : graph[node].outNeighbors) { + graph[neighbor].inWeightSum -= absWeightOfEdge(node, neighbor, edgeWeights); + graph[neighbor].inNeighbors.erase(node); + } + // Erase node. + graph.erase(node); +} + +MFAS::MFAS(const TranslationEdges& relativeTranslations, + const Unit3& projectionDirection) { + // Iterate over edges, obtain weights by projecting + // their relativeTranslations along the projection direction + for (const auto& measurement : relativeTranslations) { + edgeWeights_[std::make_pair(measurement.key1(), measurement.key2())] = + measurement.measured().dot(projectionDirection); + } +} + +KeyVector MFAS::computeOrdering() const { + KeyVector ordering; // Nodes in MFAS order (result). + + // A graph is an unordered map from keys to nodes. Each node contains a list + // of its adjacent nodes. Create the graph from the edgeWeights. + unordered_map graph = graphFromEdges(edgeWeights_); + + // In each iteration, one node is removed from the graph and appended to the + // ordering. + while (!graph.empty()) { + Key selection = selectNextNodeInOrdering(graph); + removeNodeFromGraph(selection, edgeWeights_, graph); + ordering.push_back(selection); + } + return ordering; +} + +map MFAS::computeOutlierWeights() const { + // Find the ordering. + KeyVector ordering = computeOrdering(); + + // Create a map from the node key to its position in the ordering. This makes + // it easier to lookup positions of different nodes. + unordered_map orderingPositions; + for (size_t i = 0; i < ordering.size(); i++) { + orderingPositions[ordering[i]] = i; + } + + map outlierWeights; + // Check if the direction of each edge is consistent with the ordering. + for (const auto& edgeWeight : edgeWeights_) { + // Find edge source and destination. + Key source = edgeWeight.first.first; + Key dest = edgeWeight.first.second; + if (edgeWeight.second < 0) { + std::swap(source, dest); + } + + // If the direction is not consistent with the ordering (i.e dest occurs + // before src), it is an outlier edge, and has non-zero outlier weight. + if (orderingPositions.at(dest) < orderingPositions.at(source)) { + outlierWeights[edgeWeight.first] = std::abs(edgeWeight.second); + } else { + outlierWeights[edgeWeight.first] = 0; + } + } + return outlierWeights; +} diff --git a/gtsam/sfm/MFAS.h b/gtsam/sfm/MFAS.h new file mode 100644 index 000000000..decfbed0f --- /dev/null +++ b/gtsam/sfm/MFAS.h @@ -0,0 +1,101 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +#pragma once + +/** + * @file MFAS.h + * @brief MFAS class to solve Minimum Feedback Arc Set graph problem + * @author Akshay Krishnan + * @date September 2020 + */ + +#include +#include +#include + +#include +#include +#include + +namespace gtsam { + +/** + The MFAS class to solve a Minimum feedback arc set (MFAS) + problem. We implement the solution from: + Kyle Wilson and Noah Snavely, "Robust Global Translations with 1DSfM", + Proceedings of the European Conference on Computer Vision, ECCV 2014 + + Given a weighted directed graph, the objective in a Minimum feedback arc set + problem is to obtain a directed acyclic graph by removing + edges such that the total weight of removed edges is minimum. + + Although MFAS is a general graph problem and can be applied in many areas, + this classed was designed for the purpose of outlier rejection in a + translation averaging for SfM setting. For more details, refer to the above + paper. The nodes of the graph in this context represents cameras in 3D and the + edges between them represent unit translations in the world coordinate frame, + i.e w_aZb is the unit translation from a to b expressed in the world + coordinate frame. The weights for the edges are obtained by projecting the + unit translations in a projection direction. + @addtogroup SFM +*/ +class MFAS { + public: + // used to represent edges between two nodes in the graph. When used in + // translation averaging for global SfM + using KeyPair = std::pair; + using TranslationEdges = std::vector>; + + private: + // edges with a direction such that all weights are positive + // i.e, edges that originally had negative weights are flipped + std::map edgeWeights_; + + public: + /** + * @brief Construct from the weighted directed edges + * between the nodes. Each node is identified by a Key. + * @param edgeWeights: weights of edges in the graph + */ + MFAS(const std::map &edgeWeights) + : edgeWeights_(edgeWeights) {} + + /** + * @brief Constructor to be used in the context of translation averaging. + * Here, the nodes of the graph are cameras in 3D and the edges have a unit + * translation direction between them. The weights of the edges is computed by + * projecting them along a projection direction. + * @param relativeTranslations translation directions between the cameras + * @param projectionDirection direction in which edges are to be projected + */ + MFAS(const TranslationEdges &relativeTranslations, + const Unit3 &projectionDirection); + + /** + * @brief Computes the 1D MFAS ordering of nodes in the graph + * @return orderedNodes: vector of nodes in the obtained order + */ + KeyVector computeOrdering() const; + + /** + * @brief Computes the outlier weights of the graph. We define the outlier + * weight of a edge to be zero if the edge is an inlier and the magnitude of + * its edgeWeight if it is an outlier. This function internally calls + * computeOrdering and uses the obtained ordering to identify outlier edges. + * @return outlierWeights: map from an edge to its outlier weight. + */ + std::map computeOutlierWeights() const; +}; + +typedef std::map, double> KeyPairDoubleMap; + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanAveraging.cpp b/gtsam/sfm/ShonanAveraging.cpp new file mode 100644 index 000000000..5957047a3 --- /dev/null +++ b/gtsam/sfm/ShonanAveraging.cpp @@ -0,0 +1,905 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveraging.cpp + * @date March 2019 - August 2020 + * @author Frank Dellaert, David Rosen, and Jing Wu + * @brief Shonan Averaging algorithm + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace gtsam { + +// In Wrappers we have no access to this so have a default ready +static std::mt19937 kRandomNumberGenerator(42); + +using Sparse = Eigen::SparseMatrix; + +/* ************************************************************************* */ +template +ShonanAveragingParameters::ShonanAveragingParameters( + const LevenbergMarquardtParams &_lm, const std::string &method, + double optimalityThreshold, double alpha, double beta, double gamma) + : lm(_lm), + optimalityThreshold(optimalityThreshold), + alpha(alpha), + beta(beta), + gamma(gamma), + useHuber(false), + certifyOptimality(true) { + // By default, we will do conjugate gradient + lm.linearSolverType = LevenbergMarquardtParams::Iterative; + + // Create subgraph builder parameters + SubgraphBuilderParameters builderParameters; + builderParameters.skeletonType = SubgraphBuilderParameters::KRUSKAL; + builderParameters.skeletonWeight = SubgraphBuilderParameters::EQUAL; + builderParameters.augmentationWeight = SubgraphBuilderParameters::SKELETON; + builderParameters.augmentationFactor = 0.0; + + auto pcg = boost::make_shared(); + + // Choose optimization method + if (method == "SUBGRAPH") { + lm.iterativeParams = + boost::make_shared(builderParameters); + } else if (method == "SGPC") { + pcg->preconditioner_ = + boost::make_shared(builderParameters); + lm.iterativeParams = pcg; + } else if (method == "JACOBI") { + pcg->preconditioner_ = + boost::make_shared(); + lm.iterativeParams = pcg; + } else if (method == "QR") { + lm.setLinearSolverType("MULTIFRONTAL_QR"); + } else if (method == "CHOLESKY") { + lm.setLinearSolverType("MULTIFRONTAL_CHOLESKY"); + } else { + throw std::invalid_argument("ShonanAveragingParameters: unknown method"); + } +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 and d=3 +template struct ShonanAveragingParameters<2>; +template struct ShonanAveragingParameters<3>; + +/* ************************************************************************* */ +// Calculate number of unknown rotations referenced by measurements +// Throws exception of keys are not numbered as expected (may change in future). +template +static size_t NrUnknowns( + const typename ShonanAveraging::Measurements &measurements) { + Key maxKey = 0; + std::set keys; + for (const auto &measurement : measurements) { + for (const Key &key : measurement.keys()) { + maxKey = std::max(key, maxKey); + keys.insert(key); + } + } + size_t N = keys.size(); + if (maxKey != N - 1) { + throw std::invalid_argument( + "As of now, ShonanAveraging expects keys numbered 0..N-1"); + } + return N; +} + +/* ************************************************************************* */ +template +ShonanAveraging::ShonanAveraging(const Measurements &measurements, + const Parameters ¶meters) + : parameters_(parameters), + measurements_(measurements), + nrUnknowns_(NrUnknowns(measurements)) { + for (const auto &measurement : measurements_) { + const auto &model = measurement.noiseModel(); + if (model && model->dim() != SO::dimension) { + measurement.print("Factor with incorrect noise model:\n"); + throw std::invalid_argument( + "ShonanAveraging: measurements passed to " + "constructor have incorrect dimension."); + } + } + Q_ = buildQ(); + D_ = buildD(); + L_ = D_ - Q_; +} + +/* ************************************************************************* */ +template +NonlinearFactorGraph ShonanAveraging::buildGraphAt(size_t p) const { + NonlinearFactorGraph graph; + auto G = boost::make_shared(SO<-1>::VectorizedGenerators(p)); + + for (const auto &measurement : measurements_) { + const auto &keys = measurement.keys(); + const auto &Rij = measurement.measured(); + const auto &model = measurement.noiseModel(); + graph.emplace_shared>(keys[0], keys[1], Rij, p, model, G); + } + // Possibly add Karcher prior + if (parameters_.beta > 0) { + const size_t dim = SOn::Dimension(p); + graph.emplace_shared>(graph.keys(), dim); + } + // Possibly add gauge factors - they are probably useless as gradient is zero + if (parameters_.gamma > 0 && p > d + 1) { + for (auto key : graph.keys()) + graph.emplace_shared(key, p, d, parameters_.gamma); + } + return graph; +} + +/* ************************************************************************* */ +template +double ShonanAveraging::costAt(size_t p, const Values &values) const { + const NonlinearFactorGraph graph = buildGraphAt(p); + return graph.error(values); +} + +/* ************************************************************************* */ +template +boost::shared_ptr +ShonanAveraging::createOptimizerAt(size_t p, const Values &initial) const { + // Build graph + NonlinearFactorGraph graph = buildGraphAt(p); + + // Anchor prior is added here as depends on initial value (and cost is zero) + if (parameters_.alpha > 0) { + size_t i; + Rot value; + const size_t dim = SOn::Dimension(p); + std::tie(i, value) = parameters_.anchor; + auto model = noiseModel::Isotropic::Precision(dim, parameters_.alpha); + graph.emplace_shared>(i, SOn::Lift(p, value.matrix()), + model); + } + // Optimize + return boost::make_shared(graph, initial, + parameters_.lm); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::tryOptimizingAt(size_t p, + const Values &initial) const { + auto lm = createOptimizerAt(p, initial); + return lm->optimize(); +} + +/* ************************************************************************* */ +// Project to pxdN Stiefel manifold +template +Matrix ShonanAveraging::StiefelElementMatrix(const Values &values) { + const size_t N = values.size(); + const size_t p = values.at(0).rows(); + Matrix S(p, N * d); + for (const auto it : values.filter()) { + S.middleCols(it.key * d) = + it.value.matrix().leftCols(); // project Qj to Stiefel + } + return S; +} + +/* ************************************************************************* */ +template <> +Values ShonanAveraging<2>::projectFrom(size_t p, const Values &values) const { + Values result; + for (const auto it : values.filter()) { + assert(it.value.rows() == p); + const auto &M = it.value.matrix(); + const Rot2 R = Rot2::atan2(M(1, 0), M(0, 0)); + result.insert(it.key, R); + } + return result; +} + +template <> +Values ShonanAveraging<3>::projectFrom(size_t p, const Values &values) const { + Values result; + for (const auto it : values.filter()) { + assert(it.value.rows() == p); + const auto &M = it.value.matrix(); + const Rot3 R = Rot3::ClosestTo(M.topLeftCorner<3, 3>()); + result.insert(it.key, R); + } + return result; +} + +/* ************************************************************************* */ +template +static Matrix RoundSolutionS(const Matrix &S) { + const size_t N = S.cols() / d; + // First, compute a thin SVD of S + Eigen::JacobiSVD svd(S, Eigen::ComputeThinV); + const Vector sigmas = svd.singularValues(); + + // Construct a diagonal matrix comprised of the first d singular values + using DiagonalMatrix = Eigen::DiagonalMatrix; + DiagonalMatrix Sigma_d; + Sigma_d.diagonal() = sigmas.head(); + + // Now, construct a rank-d truncated singular value decomposition for S + Matrix R = Sigma_d * svd.matrixV().leftCols().transpose(); + + // Count the number of blocks whose determinants have positive sign + size_t numPositiveBlocks = 0; + for (size_t i = 0; i < N; ++i) { + // Compute the determinant of the ith dxd block of R + double determinant = R.middleCols(d * i).determinant(); + if (determinant > 0) ++numPositiveBlocks; + } + + if (numPositiveBlocks < N / 2) { + // Less than half of the total number of blocks have the correct sign. + // To reverse their orientations, multiply with a reflection matrix. + DiagonalMatrix reflector; + reflector.setIdentity(); + reflector.diagonal()(d - 1) = -1; + R = reflector * R; + } + + return R; +} + +/* ************************************************************************* */ +template <> +Values ShonanAveraging<2>::roundSolutionS(const Matrix &S) const { + // Round to a 2*2N matrix + Matrix R = RoundSolutionS<2>(S); + + // Finally, project each dxd rotation block to SO(2) + Values values; + for (size_t j = 0; j < nrUnknowns(); ++j) { + const Rot2 Ri = Rot2::atan2(R(1, 2 * j), R(0, 2 * j)); + values.insert(j, Ri); + } + return values; +} + +template <> +Values ShonanAveraging<3>::roundSolutionS(const Matrix &S) const { + // Round to a 3*3N matrix + Matrix R = RoundSolutionS<3>(S); + + // Finally, project each dxd rotation block to SO(3) + Values values; + for (size_t j = 0; j < nrUnknowns(); ++j) { + const Rot3 Ri = Rot3::ClosestTo(R.middleCols<3>(3 * j)); + values.insert(j, Ri); + } + return values; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::roundSolution(const Values &values) const { + // Project to pxdN Stiefel manifold... + Matrix S = StiefelElementMatrix(values); + // ...and call version above. + return roundSolutionS(S); +} + +/* ************************************************************************* */ +template +double ShonanAveraging::cost(const Values &values) const { + NonlinearFactorGraph graph; + for (const auto &measurement : measurements_) { + const auto &keys = measurement.keys(); + const auto &Rij = measurement.measured(); + const auto &model = measurement.noiseModel(); + graph.emplace_shared>>( + keys[0], keys[1], SO(Rij.matrix()), model); + } + // Finally, project each dxd rotation block to SO(d) + Values result; + for (const auto it : values.filter()) { + result.insert(it.key, SO(it.value.matrix())); + } + return graph.error(result); +} + +/* ************************************************************************* */ +// Get kappa from noise model +template +static double Kappa(const BinaryMeasurement &measurement, + const ShonanAveragingParameters ¶meters) { + const auto &isotropic = boost::dynamic_pointer_cast( + measurement.noiseModel()); + double sigma; + if (isotropic) { + sigma = isotropic->sigma(); + } else { + const auto &robust = boost::dynamic_pointer_cast( + measurement.noiseModel()); + // Check if noise model is robust + if (robust) { + // If robust, check if optimality certificate is expected + if (parameters.getCertifyOptimality()) { + throw std::invalid_argument( + "Certification of optimality does not work with robust cost."); + } else { + // Optimality certificate not required, so setting default sigma + sigma = 1; + } + } else { + throw std::invalid_argument( + "Shonan averaging noise models must be isotropic (but robust losses " + "are allowed)."); + } + } + return 1.0 / (sigma * sigma); +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::buildD() const { + // Each measurement contributes 2*d elements along the diagonal of the + // degree matrix. + static constexpr size_t stride = 2 * d; + + // Reserve space for triplets + std::vector> triplets; + triplets.reserve(stride * measurements_.size()); + + for (const auto &measurement : measurements_) { + // Get pose keys + const auto &keys = measurement.keys(); + + // Get kappa from noise model + double kappa = Kappa(measurement, parameters_); + + const size_t di = d * keys[0], dj = d * keys[1]; + for (size_t k = 0; k < d; k++) { + // Elements of ith block-diagonal + triplets.emplace_back(di + k, di + k, kappa); + // Elements of jth block-diagonal + triplets.emplace_back(dj + k, dj + k, kappa); + } + } + + // Construct and return a sparse matrix from these triplets + const size_t dN = d * nrUnknowns(); + Sparse D(dN, dN); + D.setFromTriplets(triplets.begin(), triplets.end()); + + return D; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::buildQ() const { + // Each measurement contributes 2*d^2 elements on a pair of symmetric + // off-diagonal blocks + static constexpr size_t stride = 2 * d * d; + + // Reserve space for triplets + std::vector> triplets; + triplets.reserve(stride * measurements_.size()); + + for (const auto &measurement : measurements_) { + // Get pose keys + const auto &keys = measurement.keys(); + + // Extract rotation measurement + const auto Rij = measurement.measured().matrix(); + + // Get kappa from noise model + double kappa = Kappa(measurement, parameters_); + + const size_t di = d * keys[0], dj = d * keys[1]; + for (size_t r = 0; r < d; r++) { + for (size_t c = 0; c < d; c++) { + // Elements of ij block + triplets.emplace_back(di + r, dj + c, kappa * Rij(r, c)); + // Elements of ji block + triplets.emplace_back(dj + r, di + c, kappa * Rij(c, r)); + } + } + } + + // Construct and return a sparse matrix from these triplets + const size_t dN = d * nrUnknowns(); + Sparse Q(dN, dN); + Q.setFromTriplets(triplets.begin(), triplets.end()); + + return Q; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeLambda(const Matrix &S) const { + // Each pose contributes 2*d elements along the diagonal of Lambda + static constexpr size_t stride = d * d; + + // Reserve space for triplets + const size_t N = nrUnknowns(); + std::vector> triplets; + triplets.reserve(stride * N); + + // Do sparse-dense multiply to get Q*S' + auto QSt = Q_ * S.transpose(); + + for (size_t j = 0; j < N; j++) { + // Compute B, the building block for the j^th diagonal block of Lambda + const size_t dj = d * j; + Matrix B = QSt.middleRows(dj, d) * S.middleCols(dj); + + // Elements of jth block-diagonal + for (size_t r = 0; r < d; r++) + for (size_t c = 0; c < d; c++) + triplets.emplace_back(dj + r, dj + c, 0.5 * (B(r, c) + B(c, r))); + } + + // Construct and return a sparse matrix from these triplets + Sparse Lambda(d * N, d * N); + Lambda.setFromTriplets(triplets.begin(), triplets.end()); + return Lambda; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeLambda(const Values &values) const { + // Project to pxdN Stiefel manifold... + Matrix S = StiefelElementMatrix(values); + // ...and call version above. + return computeLambda(S); +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeA(const Values &values) const { + assert(values.size() == nrUnknowns()); + const Matrix S = StiefelElementMatrix(values); + auto Lambda = computeLambda(S); + return Lambda - Q_; +} + +/* ************************************************************************* */ +/// MINIMUM EIGENVALUE COMPUTATIONS + +/** This is a lightweight struct used in conjunction with Spectra to compute + * the minimum eigenvalue and eigenvector of a sparse matrix A; it has a single + * nontrivial function, perform_op(x,y), that computes and returns the product + * y = (A + sigma*I) x */ +struct MatrixProdFunctor { + // Const reference to an externally-held matrix whose minimum-eigenvalue we + // want to compute + const Sparse &A_; + + // Spectral shift + double sigma_; + + // Constructor + explicit MatrixProdFunctor(const Sparse &A, double sigma = 0) + : A_(A), sigma_(sigma) {} + + int rows() const { return A_.rows(); } + int cols() const { return A_.cols(); } + + // Matrix-vector multiplication operation + void perform_op(const double *x, double *y) const { + // Wrap the raw arrays as Eigen Vector types + Eigen::Map X(x, rows()); + Eigen::Map Y(y, rows()); + + // Do the multiplication using wrapped Eigen vectors + Y = A_ * X + sigma_ * X; + } +}; + +/// Function to compute the minimum eigenvalue of A using Lanczos in Spectra. +/// This does 2 things: +/// +/// (1) Quick (coarse) eigenvalue computation to estimate the largest-magnitude +/// eigenvalue (2) A second eigenvalue computation applied to A-sigma*I, where +/// sigma is chosen to make the minimum eigenvalue of A the extremal eigenvalue +/// of A-sigma*I +/// +/// Upon completion, this returns a boolean value indicating whether the minimum +/// eigenvalue was computed to the required precision -- if so, its sets the +/// values of minEigenValue and minEigenVector appropriately + +/// Note that in the following function signature, S is supposed to be the +/// block-row-matrix that is a critical point for the optimization algorithm; +/// either S (Stiefel manifold) or R (block rotations). We use this to +/// construct a starting vector v for the Lanczos process that will be close to +/// the minimum eigenvector we're looking for whenever the relaxation is exact +/// -- this is a key feature that helps to make this method fast. Note that +/// instead of passing in all of S, it would be enough to pass in one of S's +/// *rows*, if that's more convenient. + +// For the defaults, David Rosen says: +// - maxIterations refers to the max number of Lanczos iterations to run; +// ~1000 should be sufficiently large +// - We've been using 10^-4 for the nonnegativity tolerance +// - for numLanczosVectors, 20 is a good default value + +static bool SparseMinimumEigenValue( + const Sparse &A, const Matrix &S, double *minEigenValue, + Vector *minEigenVector = 0, size_t *numIterations = 0, + size_t maxIterations = 1000, + double minEigenvalueNonnegativityTolerance = 10e-4, + Eigen::Index numLanczosVectors = 20) { + // a. Estimate the largest-magnitude eigenvalue of this matrix using Lanczos + MatrixProdFunctor lmOperator(A); + Spectra::SymEigsSolver + lmEigenValueSolver(&lmOperator, 1, std::min(numLanczosVectors, A.rows())); + lmEigenValueSolver.init(); + + const int lmConverged = lmEigenValueSolver.compute( + maxIterations, 1e-4, Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); + + // Check convergence and bail out if necessary + if (lmConverged != 1) return false; + + const double lmEigenValue = lmEigenValueSolver.eigenvalues()(0); + + if (lmEigenValue < 0) { + // The largest-magnitude eigenvalue is negative, and therefore also the + // minimum eigenvalue, so just return this solution + *minEigenValue = lmEigenValue; + if (minEigenVector) { + *minEigenVector = lmEigenValueSolver.eigenvectors(1).col(0); + minEigenVector->normalize(); // Ensure that this is a unit vector + } + return true; + } + + // The largest-magnitude eigenvalue is positive, and is therefore the + // maximum eigenvalue. Therefore, after shifting the spectrum of A + // by -2*lmEigenValue (by forming A - 2*lambda_max*I), the shifted + // spectrum will lie in the interval [minEigenValue(A) - 2* lambda_max(A), + // -lambda_max*A]; in particular, the largest-magnitude eigenvalue of + // A - 2*lambda_max*I is minEigenValue - 2*lambda_max, with corresponding + // eigenvector v_min + + MatrixProdFunctor minShiftedOperator(A, -2 * lmEigenValue); + + Spectra::SymEigsSolver + minEigenValueSolver(&minShiftedOperator, 1, + std::min(numLanczosVectors, A.rows())); + + // If S is a critical point of F, then S^T is also in the null space of S - + // Lambda(S) (cf. Lemma 6 of the tech report), and therefore its rows are + // eigenvectors corresponding to the eigenvalue 0. In the case that the + // relaxation is exact, this is the *minimum* eigenvalue, and therefore the + // rows of S are exactly the eigenvectors that we're looking for. On the + // other hand, if the relaxation is *not* exact, then S - Lambda(S) has at + // least one strictly negative eigenvalue, and the rows of S are *unstable + // fixed points* for the Lanczos iterations. Thus, we will take a slightly + // "fuzzed" version of the first row of S as an initialization for the + // Lanczos iterations; this allows for rapid convergence in the case that + // the relaxation is exact (since are starting close to a solution), while + // simultaneously allowing the iterations to escape from this fixed point in + // the case that the relaxation is not exact. + Vector v0 = S.row(0).transpose(); + Vector perturbation(v0.size()); + perturbation.setRandom(); + perturbation.normalize(); + Vector xinit = v0 + (.03 * v0.norm()) * perturbation; // Perturb v0 by ~3% + + // Use this to initialize the eigensolver + minEigenValueSolver.init(xinit.data()); + + // Now determine the relative precision required in the Lanczos method in + // order to be able to estimate the smallest eigenvalue within an *absolute* + // tolerance of 'minEigenvalueNonnegativityTolerance' + const int minConverged = minEigenValueSolver.compute( + maxIterations, minEigenvalueNonnegativityTolerance / lmEigenValue, + Spectra::SELECT_EIGENVALUE::LARGEST_MAGN); + + if (minConverged != 1) return false; + + *minEigenValue = minEigenValueSolver.eigenvalues()(0) + 2 * lmEigenValue; + if (minEigenVector) { + *minEigenVector = minEigenValueSolver.eigenvectors(1).col(0); + minEigenVector->normalize(); // Ensure that this is a unit vector + } + if (numIterations) *numIterations = minEigenValueSolver.num_iterations(); + return true; +} + +/* ************************************************************************* */ +template +Sparse ShonanAveraging::computeA(const Matrix &S) const { + auto Lambda = computeLambda(S); + return Lambda - Q_; +} + +/* ************************************************************************* */ +template +double ShonanAveraging::computeMinEigenValue(const Values &values, + Vector *minEigenVector) const { + assert(values.size() == nrUnknowns()); + const Matrix S = StiefelElementMatrix(values); + auto A = computeA(S); + + double minEigenValue; + bool success = SparseMinimumEigenValue(A, S, &minEigenValue, minEigenVector); + if (!success) { + throw std::runtime_error( + "SparseMinimumEigenValue failed to compute minimum eigenvalue."); + } + return minEigenValue; +} + +/* ************************************************************************* */ +template +std::pair ShonanAveraging::computeMinEigenVector( + const Values &values) const { + Vector minEigenVector; + double minEigenValue = computeMinEigenValue(values, &minEigenVector); + return std::make_pair(minEigenValue, minEigenVector); +} + +/* ************************************************************************* */ +template +bool ShonanAveraging::checkOptimality(const Values &values) const { + double minEigenValue = computeMinEigenValue(values); + return minEigenValue > parameters_.optimalityThreshold; +} + +/* ************************************************************************* */ +template +VectorValues ShonanAveraging::TangentVectorValues(size_t p, + const Vector &v) { + VectorValues delta; + // Create a tangent direction xi with eigenvector segment v_i + const size_t dimension = SOn::Dimension(p); + double sign0 = pow(-1.0, round((p + 1) / 2) + 1); + for (size_t i = 0; i < v.size() / d; i++) { + // Assumes key is 0-based integer + const auto v_i = v.segment(d * i); + Vector xi = Vector::Zero(dimension); + double sign = sign0; + for (size_t j = 0; j < d; j++) { + xi(j + p - d - 1) = sign * v_i(d - j - 1); + sign = -sign; + } + delta.insert(i, xi); + } + return delta; +} + +/* ************************************************************************* */ +template +Matrix ShonanAveraging::riemannianGradient(size_t p, + const Values &values) const { + Matrix S_dot = StiefelElementMatrix(values); + // calculate the gradient of F(Q_dot) at Q_dot + Matrix euclideanGradient = 2 * (L_ * (S_dot.transpose())).transpose(); + // cout << "euclidean gradient rows and cols" << euclideanGradient.rows() << + // "\t" << euclideanGradient.cols() << endl; + + // project the gradient onto the entire euclidean space + Matrix symBlockDiagProduct(p, d * nrUnknowns()); + for (size_t i = 0; i < nrUnknowns(); i++) { + // Compute block product + const size_t di = d * i; + const Matrix P = S_dot.middleCols(di).transpose() * + euclideanGradient.middleCols(di); + // Symmetrize this block + Matrix S = .5 * (P + P.transpose()); + // Compute S_dot * S and set corresponding block + symBlockDiagProduct.middleCols(di) = S_dot.middleCols(di) * S; + } + Matrix riemannianGradient = euclideanGradient - symBlockDiagProduct; + return riemannianGradient; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::LiftwithDescent(size_t p, const Values &values, + const Vector &minEigenVector) { + Values lifted = LiftTo(p, values); + VectorValues delta = TangentVectorValues(p, minEigenVector); + return lifted.retract(delta); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeWithDescent( + size_t p, const Values &values, const Vector &minEigenVector, + double minEigenValue, double gradienTolerance, + double preconditionedGradNormTolerance) const { + double funcVal = costAt(p - 1, values); + double alphaMin = 1e-2; + double alpha = + std::max(1024 * alphaMin, 10 * gradienTolerance / fabs(minEigenValue)); + vector alphas; + vector fvals; + // line search + while ((alpha >= alphaMin)) { + Values Qplus = LiftwithDescent(p, values, alpha * minEigenVector); + double funcValTest = costAt(p, Qplus); + Matrix gradTest = riemannianGradient(p, Qplus); + double gradTestNorm = gradTest.norm(); + // Record alpha and funcVal + alphas.push_back(alpha); + fvals.push_back(funcValTest); + if ((funcVal > funcValTest) && (gradTestNorm > gradienTolerance)) { + return Qplus; + } + alpha /= 2; + } + + auto fminIter = min_element(fvals.begin(), fvals.end()); + auto minIdx = distance(fvals.begin(), fminIter); + double fMin = fvals[minIdx]; + double aMin = alphas[minIdx]; + if (fMin < funcVal) { + Values Qplus = LiftwithDescent(p, values, aMin * minEigenVector); + return Qplus; + } + + return LiftwithDescent(p, values, alpha * minEigenVector); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomly(std::mt19937 &rng) const { + Values initial; + for (size_t j = 0; j < nrUnknowns(); j++) { + initial.insert(j, Rot::Random(rng)); + } + return initial; +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomly() const { + return initializeRandomly(kRandomNumberGenerator); +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomlyAt(size_t p, + std::mt19937 &rng) const { + const Values randomRotations = initializeRandomly(rng); + return LiftTo(p, randomRotations); // lift to p! +} + +/* ************************************************************************* */ +template +Values ShonanAveraging::initializeRandomlyAt(size_t p) const { + return initializeRandomlyAt(p, kRandomNumberGenerator); +} + +/* ************************************************************************* */ +template +std::pair ShonanAveraging::run(const Values &initialEstimate, + size_t pMin, + size_t pMax) const { + Values Qstar; + Values initialSOp = LiftTo(pMin, initialEstimate); // lift to pMin! + for (size_t p = pMin; p <= pMax; p++) { + // Optimize until convergence at this level + Qstar = tryOptimizingAt(p, initialSOp); + if (parameters_.getUseHuber() || !parameters_.getCertifyOptimality()) { + // in this case, there is no optimality certification + if (pMin != pMax) { + throw std::runtime_error( + "When using robust norm, Shonan only tests a single rank. Set pMin = pMax"); + } + const Values SO3Values = roundSolution(Qstar); + return std::make_pair(SO3Values, 0); + } else { + // Check certificate of global optimality + Vector minEigenVector; + double minEigenValue = computeMinEigenValue(Qstar, &minEigenVector); + if (minEigenValue > parameters_.optimalityThreshold) { + // If at global optimum, round and return solution + const Values SO3Values = roundSolution(Qstar); + return std::make_pair(SO3Values, minEigenValue); + } + + // Not at global optimimum yet, so check whether we will go to next level + if (p != pMax) { + // Calculate initial estimate for next level by following minEigenVector + initialSOp = + initializeWithDescent(p + 1, Qstar, minEigenVector, minEigenValue); + } + } + } + throw std::runtime_error("Shonan::run did not converge for given pMax"); +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 +template class ShonanAveraging<2>; + +ShonanAveraging2::ShonanAveraging2(const Measurements &measurements, + const Parameters ¶meters) + : ShonanAveraging<2>(maybeRobust(measurements, parameters.getUseHuber()), + parameters) {} + +ShonanAveraging2::ShonanAveraging2(string g2oFile, const Parameters ¶meters) + : ShonanAveraging<2>(maybeRobust(parseMeasurements(g2oFile), + parameters.getUseHuber()), + parameters) {} + +/* ************************************************************************* */ +// Explicit instantiation for d=3 +template class ShonanAveraging<3>; + +ShonanAveraging3::ShonanAveraging3(const Measurements &measurements, + const Parameters ¶meters) + : ShonanAveraging<3>(maybeRobust(measurements, parameters.getUseHuber()), + parameters) {} + +ShonanAveraging3::ShonanAveraging3(string g2oFile, const Parameters ¶meters) + : ShonanAveraging<3>(maybeRobust(parseMeasurements(g2oFile), + parameters.getUseHuber()), + parameters) {} + +// TODO(frank): Deprecate after we land pybind wrapper + +// Extract Rot3 measurement from Pose3 betweenfactors +// Modeled after similar function in dataset.cpp +static BinaryMeasurement convert( + const BetweenFactor::shared_ptr &f) { + auto gaussian = + boost::dynamic_pointer_cast(f->noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose3 measurements " + "with Gaussian noise models."); + const Matrix6 M = gaussian->covariance(); + auto model = noiseModel::Robust::Create( + noiseModel::mEstimator::Huber::Create(1.345), + noiseModel::Gaussian::Covariance(M.block<3, 3>(3, 3))); + return BinaryMeasurement(f->key1(), f->key2(), f->measured().rotation(), + model); +} + +static ShonanAveraging3::Measurements extractRot3Measurements( + const BetweenFactorPose3s &factors) { + ShonanAveraging3::Measurements result; + result.reserve(factors.size()); + for (auto f : factors) result.push_back(convert(f)); + return result; +} + +ShonanAveraging3::ShonanAveraging3(const BetweenFactorPose3s &factors, + const Parameters ¶meters) + : ShonanAveraging<3>(maybeRobust(extractRot3Measurements(factors), + parameters.getUseHuber()), + parameters) {} + +/* ************************************************************************* */ +} // namespace gtsam diff --git a/gtsam/sfm/ShonanAveraging.h b/gtsam/sfm/ShonanAveraging.h new file mode 100644 index 000000000..b1f26acb8 --- /dev/null +++ b/gtsam/sfm/ShonanAveraging.h @@ -0,0 +1,436 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanAveraging.h + * @date March 2019 - August 2020 + * @author Frank Dellaert, David Rosen, and Jing Wu + * @brief Shonan Averaging algorithm + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace gtsam { +class NonlinearFactorGraph; +class LevenbergMarquardtOptimizer; + +/// Parameters governing optimization etc. +template +struct GTSAM_EXPORT ShonanAveragingParameters { + // Select Rot2 or Rot3 interface based template parameter d + using Rot = typename std::conditional::type; + using Anchor = std::pair; + + // Parameters themselves: + LevenbergMarquardtParams lm; ///< LM parameters + double optimalityThreshold; ///< threshold used in checkOptimality + Anchor anchor; ///< pose to use as anchor if not Karcher + double alpha; ///< weight of anchor-based prior (default 0) + double beta; ///< weight of Karcher-based prior (default 1) + double gamma; ///< weight of gauge-fixing factors (default 0) + /// if enabled, the Huber loss is used (default false) + bool useHuber; + /// if enabled solution optimality is certified (default true) + bool certifyOptimality; + + ShonanAveragingParameters(const LevenbergMarquardtParams &lm = + LevenbergMarquardtParams::CeresDefaults(), + const std::string &method = "JACOBI", + double optimalityThreshold = -1e-4, + double alpha = 0.0, double beta = 1.0, + double gamma = 0.0); + + LevenbergMarquardtParams getLMParams() const { return lm; } + + void setOptimalityThreshold(double value) { optimalityThreshold = value; } + double getOptimalityThreshold() const { return optimalityThreshold; } + + void setAnchor(size_t index, const Rot &value) { anchor = {index, value}; } + std::pair getAnchor() const { return anchor; } + + void setAnchorWeight(double value) { alpha = value; } + double getAnchorWeight() const { return alpha; } + + void setKarcherWeight(double value) { beta = value; } + double getKarcherWeight() const { return beta; } + + void setGaugesWeight(double value) { gamma = value; } + double getGaugesWeight() const { return gamma; } + + void setUseHuber(bool value) { useHuber = value; } + bool getUseHuber() const { return useHuber; } + + void setCertifyOptimality(bool value) { certifyOptimality = value; } + bool getCertifyOptimality() const { return certifyOptimality; } + + /// Print the parameters and flags used for rotation averaging. + void print() const { + std::cout << " ShonanAveragingParameters: " << std::endl; + std::cout << " alpha: " << alpha << std::endl; + std::cout << " beta: " << beta << std::endl; + std::cout << " gamma: " << gamma << std::endl; + std::cout << " useHuber: " << useHuber << std::endl; + } +}; + +using ShonanAveragingParameters2 = ShonanAveragingParameters<2>; +using ShonanAveragingParameters3 = ShonanAveragingParameters<3>; + +/** + * Class that implements Shonan Averaging from our ECCV'20 paper. + * Note: The "basic" API uses all Rot values (Rot2 or Rot3, depending on value + * of d), whereas the different levels and "advanced" API at SO(p) needs Values + * of type SOn. + * + * The template parameter d can be 2 or 3. + * Both are specialized in the .cpp file. + * + * If you use this code in your work, please consider citing our paper: + * Shonan Rotation Averaging, Global Optimality by Surfing SO(p)^n + * Frank Dellaert, David M. Rosen, Jing Wu, Robert Mahony, and Luca Carlone, + * European Computer Vision Conference, 2020. + * You can view our ECCV spotlight video at https://youtu.be/5ppaqMyHtE0 + */ +template +class GTSAM_EXPORT ShonanAveraging { + public: + using Sparse = Eigen::SparseMatrix; + + // Define the Parameters type and use its typedef of the rotation type: + using Parameters = ShonanAveragingParameters; + using Rot = typename Parameters::Rot; + + // We store SO(d) BetweenFactors to get noise model + using Measurements = std::vector>; + + private: + Parameters parameters_; + Measurements measurements_; + size_t nrUnknowns_; + Sparse D_; // Sparse (diagonal) degree matrix + Sparse Q_; // Sparse measurement matrix, == \tilde{R} in Eriksson18cvpr + Sparse L_; // connection Laplacian L = D - Q, needed for optimality check + + /** + * Build 3Nx3N sparse matrix consisting of rotation measurements, arranged as + * (i,j) and (j,i) blocks within a sparse matrix. + */ + Sparse buildQ() const; + + /// Build 3Nx3N sparse degree matrix D + Sparse buildD() const; + + public: + /// @name Standard Constructors + /// @{ + + /// Construct from set of relative measurements (given as BetweenFactor + /// for now) NoiseModel *must* be isotropic. + ShonanAveraging(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + + /// @} + /// @name Query properties + /// @{ + + /// Return number of unknowns + size_t nrUnknowns() const { return nrUnknowns_; } + + /// Return number of measurements + size_t nrMeasurements() const { return measurements_.size(); } + + /// k^th binary measurement + const BinaryMeasurement &measurement(size_t k) const { + return measurements_[k]; + } + + /** + * Update factors to use robust Huber loss. + * + * @param measurements Vector of BinaryMeasurements. + * @param k Huber noise model threshold. + */ + Measurements makeNoiseModelRobust(const Measurements &measurements, + double k = 1.345) const { + Measurements robustMeasurements; + for (auto &measurement : measurements) { + auto model = measurement.noiseModel(); + const auto &robust = + boost::dynamic_pointer_cast(model); + + SharedNoiseModel robust_model; + // Check if the noise model is already robust + if (robust) { + robust_model = model; + } else { + // make robust + robust_model = noiseModel::Robust::Create( + noiseModel::mEstimator::Huber::Create(k), model); + } + BinaryMeasurement meas(measurement.key1(), measurement.key2(), + measurement.measured(), robust_model); + robustMeasurements.push_back(meas); + } + return robustMeasurements; + } + + /// k^th measurement, as a Rot. + const Rot &measured(size_t k) const { return measurements_[k].measured(); } + + /// Keys for k^th measurement, as a vector of Key values. + const KeyVector &keys(size_t k) const { return measurements_[k].keys(); } + + /// @} + /// @name Matrix API (advanced use, debugging) + /// @{ + + Sparse D() const { return D_; } ///< Sparse version of D + Matrix denseD() const { return Matrix(D_); } ///< Dense version of D + Sparse Q() const { return Q_; } ///< Sparse version of Q + Matrix denseQ() const { return Matrix(Q_); } ///< Dense version of Q + Sparse L() const { return L_; } ///< Sparse version of L + Matrix denseL() const { return Matrix(L_); } ///< Dense version of L + + /// Version that takes pxdN Stiefel manifold elements + Sparse computeLambda(const Matrix &S) const; + + /// Dense versions of computeLambda for wrapper/testing + Matrix computeLambda_(const Values &values) const { + return Matrix(computeLambda(values)); + } + + /// Dense versions of computeLambda for wrapper/testing + Matrix computeLambda_(const Matrix &S) const { + return Matrix(computeLambda(S)); + } + + /// Compute A matrix whose Eigenvalues we will examine + Sparse computeA(const Values &values) const; + + /// Version that takes pxdN Stiefel manifold elements + Sparse computeA(const Matrix &S) const; + + /// Dense version of computeA for wrapper/testing + Matrix computeA_(const Values &values) const { + return Matrix(computeA(values)); + } + + /// Project to pxdN Stiefel manifold + static Matrix StiefelElementMatrix(const Values &values); + + /** + * Compute minimum eigenvalue for optimality check. + * @param values: should be of type SOn + */ + double computeMinEigenValue(const Values &values, + Vector *minEigenVector = nullptr) const; + + /// Project pxdN Stiefel manifold matrix S to Rot3^N + Values roundSolutionS(const Matrix &S) const; + + /// Create a VectorValues with eigenvector v_i + static VectorValues TangentVectorValues(size_t p, const Vector &v); + + /// Calculate the riemannian gradient of F(values) at values + Matrix riemannianGradient(size_t p, const Values &values) const; + + /** + * Lift up the dimension of values in type SO(p-1) with descent direction + * provided by minEigenVector and return new values in type SO(p) + */ + static Values LiftwithDescent(size_t p, const Values &values, + const Vector &minEigenVector); + + /** + * Given some values at p-1, return new values at p, by doing a line search + * along the descent direction, computed from the minimum eigenvector at p-1. + * @param values should be of type SO(p-1) + * @param minEigenVector corresponding to minEigenValue at level p-1 + * @return values of type SO(p) + */ + Values initializeWithDescent( + size_t p, const Values &values, const Vector &minEigenVector, + double minEigenValue, double gradienTolerance = 1e-2, + double preconditionedGradNormTolerance = 1e-4) const; + /// @} + /// @name Advanced API + /// @{ + + /** + * Build graph for SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + */ + NonlinearFactorGraph buildGraphAt(size_t p) const; + + /** + * Create initial Values of type SO(p) + * @param p the dimensionality of the rotation manifold + */ + Values initializeRandomlyAt(size_t p, std::mt19937 &rng) const; + + /// Version of initializeRandomlyAt with fixed random seed. + Values initializeRandomlyAt(size_t p) const; + + /** + * Calculate cost for SO(p) + * Values should be of type SO(p) + */ + double costAt(size_t p, const Values &values) const; + + /** + * Given an estimated local minimum Yopt for the (possibly lifted) + * relaxation, this function computes and returns the block-diagonal elements + * of the corresponding Lagrange multiplier. + */ + Sparse computeLambda(const Values &values) const; + + /** + * Compute minimum eigenvalue for optimality check. + * @param values: should be of type SOn + * @return minEigenVector and minEigenValue + */ + std::pair computeMinEigenVector(const Values &values) const; + + /** + * Check optimality + * @param values: should be of type SOn + */ + bool checkOptimality(const Values &values) const; + + /** + * Try to create optimizer at SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + * @param initial initial SO(p) values + * @return lm optimizer + */ + boost::shared_ptr createOptimizerAt( + size_t p, const Values &initial) const; + + /** + * Try to optimize at SO(p) + * @param p the dimensionality of the rotation manifold to optimize over + * @param initial initial SO(p) values + * @return SO(p) values + */ + Values tryOptimizingAt(size_t p, const Values &initial) const; + + /** + * Project from SO(p) to Rot2 or Rot3 + * Values should be of type SO(p) + */ + Values projectFrom(size_t p, const Values &values) const; + + /** + * Project from SO(p)^N to Rot2^N or Rot3^N + * Values should be of type SO(p) + */ + Values roundSolution(const Values &values) const; + + /// Lift Values of type T to SO(p) + template + static Values LiftTo(size_t p, const Values &values) { + Values result; + for (const auto it : values.filter()) { + result.insert(it.key, SOn::Lift(p, it.value.matrix())); + } + return result; + } + + /// @} + /// @name Basic API + /// @{ + + /** + * Calculate cost for SO(3) + * Values should be of type Rot3 + */ + double cost(const Values &values) const; + + /** + * Initialize randomly at SO(d) + * @param rng random number generator + * Example: + * std::mt19937 rng(42); + * Values initial = initializeRandomly(rng, p); + */ + Values initializeRandomly(std::mt19937 &rng) const; + + /// Random initialization for wrapper, fixed random seed. + Values initializeRandomly() const; + + /** + * Optimize at different values of p until convergence. + * @param initial initial Rot3 values + * @param pMin value of p to start Riemanian staircase at (default: d). + * @param pMax maximum value of p to try (default: 10) + * @return (Rot3 values, minimum eigenvalue) + */ + std::pair run(const Values &initialEstimate, size_t pMin = d, + size_t pMax = 10) const; + /// @} + + /** + * Helper function to convert measurements to robust noise model + * if flag is set. + * + * @tparam T the type of measurement, e.g. Rot3. + * @param measurements vector of BinaryMeasurements of type T. + * @param useRobustModel flag indicating whether use robust noise model + * instead. + */ + template + inline std::vector> maybeRobust( + const std::vector> &measurements, + bool useRobustModel = false) const { + return useRobustModel ? makeNoiseModelRobust(measurements) : measurements; + } +}; + +// Subclasses for d=2 and d=3 that explicitly instantiate, as well as provide a +// convenience interface with file access. + +class ShonanAveraging2 : public ShonanAveraging<2> { + public: + ShonanAveraging2(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + explicit ShonanAveraging2(std::string g2oFile, + const Parameters ¶meters = Parameters()); +}; + +class ShonanAveraging3 : public ShonanAveraging<3> { + public: + ShonanAveraging3(const Measurements &measurements, + const Parameters ¶meters = Parameters()); + explicit ShonanAveraging3(std::string g2oFile, + const Parameters ¶meters = Parameters()); + + // TODO(frank): Deprecate after we land pybind wrapper + ShonanAveraging3(const BetweenFactorPose3s &factors, + const Parameters ¶meters = Parameters()); +}; +} // namespace gtsam diff --git a/gtsam/sfm/ShonanFactor.cpp b/gtsam/sfm/ShonanFactor.cpp new file mode 100644 index 000000000..b911fb5a4 --- /dev/null +++ b/gtsam/sfm/ShonanFactor.cpp @@ -0,0 +1,141 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file FrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Various factors that minimize some Frobenius norm + */ + +#include + +#include +#include +#include + +#include +#include +#include + +using namespace std; + +namespace gtsam { + +//****************************************************************************** +template +ShonanFactor::ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, + const SharedNoiseModel &model, + const boost::shared_ptr &G) + : NoiseModelFactor2(ConvertNoiseModel(model, p * d), j1, j2), + M_(R12.matrix()), // d*d in all cases + p_(p), // 4 for SO(4) + pp_(p * p), // 16 for SO(4) + G_(G) { + if (noiseModel()->dim() != d * p_) + throw std::invalid_argument( + "ShonanFactor: model with incorrect dimension."); + if (!G) { + G_ = boost::make_shared(); + *G_ = SOn::VectorizedGenerators(p); // expensive! + } + if (static_cast(G_->rows()) != pp_ || + static_cast(G_->cols()) != SOn::Dimension(p)) + throw std::invalid_argument("ShonanFactor: passed in generators " + "of incorrect dimension."); +} + +//****************************************************************************** +template +void ShonanFactor::print(const std::string &s, + const KeyFormatter &keyFormatter) const { + std::cout << s << "ShonanFactor<" << p_ << ">(" << keyFormatter(key1()) << "," + << keyFormatter(key2()) << ")\n"; + traits::Print(M_, " M: "); + noiseModel_->print(" noise model: "); +} + +//****************************************************************************** +template +bool ShonanFactor::equals(const NonlinearFactor &expected, + double tol) const { + auto e = dynamic_cast(&expected); + return e != nullptr && NoiseModelFactor2::equals(*e, tol) && + p_ == e->p_ && M_ == e->M_; +} + +//****************************************************************************** +template +void ShonanFactor::fillJacobians(const Matrix &M1, const Matrix &M2, + boost::optional H1, + boost::optional H2) const { + gttic(ShonanFactor_Jacobians); + const size_t dim = p_ * d; // Stiefel manifold dimension + + if (H1) { + // If asked, calculate Jacobian H1 as as (M' \otimes M1) * G + // M' = dxd, M1 = pxp, G = (p*p)xDim(p), result should be dim x Dim(p) + // (M' \otimes M1) is dim*dim, but last pp-dim columns are zero + *H1 = Matrix::Zero(dim, G_->cols()); + for (size_t j = 0; j < d; j++) { + auto MG_j = M1 * G_->middleRows(j * p_, p_); // p_ * Dim(p) + for (size_t i = 0; i < d; i++) { + H1->middleRows(i * p_, p_) -= M_(j, i) * MG_j; + } + } + } + if (H2) { + // If asked, calculate Jacobian H2 as as (I_d \otimes M2) * G + // I_d = dxd, M2 = pxp, G = (p*p)xDim(p), result should be dim x Dim(p) + // (I_d \otimes M2) is dim*dim, but again last pp-dim columns are zero + H2->resize(dim, G_->cols()); + for (size_t i = 0; i < d; i++) { + H2->middleRows(i * p_, p_) = M2 * G_->middleRows(i * p_, p_); + } + } +} + +//****************************************************************************** +template +Vector ShonanFactor::evaluateError(const SOn &Q1, const SOn &Q2, + boost::optional H1, + boost::optional H2) const { + gttic(ShonanFactor_evaluateError); + + const Matrix &M1 = Q1.matrix(); + const Matrix &M2 = Q2.matrix(); + if (M1.rows() != static_cast(p_) || M2.rows() != static_cast(p_)) + throw std::invalid_argument("Invalid dimension SOn values passed to " + "ShonanFactor::evaluateError"); + + const size_t dim = p_ * d; // Stiefel manifold dimension + Vector fQ2(dim), hQ1(dim); + + // Vectorize and extract only d leftmost columns, i.e. vec(M2*P) + fQ2 << Eigen::Map(M2.data(), dim, 1); + + // Vectorize M1*P*R12 + const Matrix Q1PR12 = M1.leftCols() * M_; + hQ1 << Eigen::Map(Q1PR12.data(), dim, 1); + + this->fillJacobians(M1, M2, H1, H2); + + return fQ2 - hQ1; +} + +/* ************************************************************************* */ +// Explicit instantiation for d=2 and d=3 +template class ShonanFactor<2>; +template class ShonanFactor<3>; + +//****************************************************************************** + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanFactor.h b/gtsam/sfm/ShonanFactor.h new file mode 100644 index 000000000..3c43c2c52 --- /dev/null +++ b/gtsam/sfm/ShonanFactor.h @@ -0,0 +1,91 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Main factor type in Shonan averaging, on SO(n) pairs + */ + +#pragma once + +#include +#include +#include +#include + +#include + +namespace gtsam { + +/** + * ShonanFactor is a BetweenFactor that moves in SO(p), but will + * land on the SO(d) sub-manifold of SO(p) at the global minimum. It projects + * the SO(p) matrices down to a Stiefel manifold of p*d matrices. + */ +template +class GTSAM_EXPORT ShonanFactor : public NoiseModelFactor2 { + Matrix M_; ///< measured rotation between R1 and R2 + size_t p_, pp_; ///< dimensionality constants + boost::shared_ptr G_; ///< matrix of vectorized generators + + // Select Rot2 or Rot3 interface based template parameter d + using Rot = typename std::conditional::type; + +public: + /// @name Constructor + /// @{ + + /// Constructor. Note we convert to d*p-dimensional noise model. + /// To save memory and mallocs, pass in the vectorized Lie algebra generators: + /// G = boost::make_shared(SOn::VectorizedGenerators(p)); + ShonanFactor(Key j1, Key j2, const Rot &R12, size_t p, + const SharedNoiseModel &model = nullptr, + const boost::shared_ptr &G = nullptr); + + /// @} + /// @name Testable + /// @{ + + /// print with optional string + void + print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override; + + /// assert equality up to a tolerance + bool equals(const NonlinearFactor &expected, + double tol = 1e-9) const override; + + /// @} + /// @name NoiseModelFactor2 methods + /// @{ + + /// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0] + /// projects down from SO(p) to the Stiefel manifold of px3 matrices. + Vector + evaluateError(const SOn &Q1, const SOn &Q2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const override; + /// @} + +private: + /// Calculate Jacobians if asked, Only implemented for d=2 and 3 in .cpp + void fillJacobians(const Matrix &M1, const Matrix &M2, + boost::optional H1, + boost::optional H2) const; +}; + +// Explicit instantiation for d=2 and d=3 in .cpp file: +using ShonanFactor2 = ShonanFactor<2>; +using ShonanFactor3 = ShonanFactor<3>; + +} // namespace gtsam diff --git a/gtsam/sfm/ShonanGaugeFactor.h b/gtsam/sfm/ShonanGaugeFactor.h new file mode 100644 index 000000000..4847c5d58 --- /dev/null +++ b/gtsam/sfm/ShonanGaugeFactor.h @@ -0,0 +1,108 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file ShonanGaugeFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Factor used in Shonan Averaging to clamp down gauge freedom + */ + +#pragma once + +#include +#include +#include + +#include + +namespace gtsam { +/** + * The ShonanGaugeFactor creates a constraint on a single SO(n) to avoid moving + * in the stabilizer. + * + * Details: SO(p) contains the p*3 Stiefel matrices of orthogonal frames: we + * take those to be the 3 columns on the left. + * The P*P skew-symmetric matrices associated with so(p) can be partitioned as + * (Appendix B in the ECCV paper): + * | [w] -K' | + * | K [g] | + * where w is the SO(3) space, K are the extra Stiefel diemnsions (wormhole!) + * and (g)amma are extra dimensions in SO(p) that do not modify the cost + * function. The latter corresponds to rotations SO(p-d), and so the first few + * values of p-d for d==3 with their corresponding dimensionality are {0:0, 1:0, + * 2:1, 3:3, ...} + * + * The ShonanGaugeFactor adds a unit Jacobian to these extra dimensions, + * essentially restricting the optimization to the Stiefel manifold. + */ +class GTSAM_EXPORT ShonanGaugeFactor : public NonlinearFactor { + // Row dimension, equal to the dimensionality of SO(p-d) + size_t rows_; + + /// Constant Jacobian + boost::shared_ptr whitenedJacobian_; + +public: + /** + * Construct from key for an SO(p) matrix, for base dimension d (2 or 3) + * If parameter gamma is given, it acts as a precision = 1/sigma^2, and + * the Jacobian will be multiplied with 1/sigma = sqrt(gamma). + */ + ShonanGaugeFactor(Key key, size_t p, size_t d = 3, + boost::optional gamma = boost::none) + : NonlinearFactor(boost::assign::cref_list_of<1>(key)) { + if (p < d) { + throw std::invalid_argument("ShonanGaugeFactor must have p>=d."); + } + // Calculate dimensions + size_t q = p - d; + size_t P = SOn::Dimension(p); // dimensionality of SO(p) + rows_ = SOn::Dimension(q); // dimensionality of SO(q), the gauge + + // Create constant Jacobian as a rows_*P matrix: there are rows_ penalized + // dimensions, but it is a bit tricky to find them among the P columns. + // The key is to look at how skew-symmetric matrices are laid out in SOn.h: + // the first tangent dimension will always be included, but beyond that we + // have to be careful. We always need to skip the d top-rows of the skew- + // symmetric matrix as they below to K, part of the Stiefel manifold. + Matrix A(rows_, P); + A.setZero(); + double invSigma = gamma ? std::sqrt(*gamma) : 1.0; + size_t i = 0, j = 0, n = p - 1 - d; + while (i < rows_) { + A.block(i, j, n, n) = invSigma * Matrix::Identity(n, n); + i += n; + j += n + d; // skip d columns + n -= 1; + } + // TODO(frank): assign the right one in the right columns + whitenedJacobian_ = + boost::make_shared(key, A, Vector::Zero(rows_)); + } + + /// Destructor + virtual ~ShonanGaugeFactor() {} + + /// Calculate the error of the factor: always zero + double error(const Values &c) const override { return 0; } + + /// get the dimension of the factor (number of rows on linearization) + size_t dim() const override { return rows_; } + + /// linearize to a GaussianFactor + boost::shared_ptr linearize(const Values &c) const override { + return whitenedJacobian_; + } +}; +// \ShonanGaugeFactor + +} // namespace gtsam \ No newline at end of file diff --git a/gtsam/sfm/TranslationFactor.h b/gtsam/sfm/TranslationFactor.h new file mode 100644 index 000000000..d1f85cf01 --- /dev/null +++ b/gtsam/sfm/TranslationFactor.h @@ -0,0 +1,86 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +#pragma once + +/** + * @file TranslationFactor.h + * @author Frank Dellaert + * @date March 2020 + * @brief Binary factor for a relative translation direction measurement. + */ + +#include +#include +#include +#include + +namespace gtsam { + +/** + * Binary factor for a relative translation direction measurement + * w_aZb. The measurement equation is + * w_aZb = Unit3(Tb - Ta) + * i.e., w_aZb is the translation direction from frame A to B, in world + * coordinates. Although Unit3 instances live on a manifold, following + * Wilson14eccv_1DSfM.pdf error we compute the *chordal distance* in the + * ambient world coordinate frame: + * normalized(Tb - Ta) - w_aZb.point3() + * + * @addtogroup SFM + * + * + */ +class TranslationFactor : public NoiseModelFactor2 { + private: + typedef NoiseModelFactor2 Base; + Point3 measured_w_aZb_; + + public: + /// default constructor + TranslationFactor() {} + + TranslationFactor(Key a, Key b, const Unit3& w_aZb, + const SharedNoiseModel& noiseModel) + : Base(noiseModel, a, b), measured_w_aZb_(w_aZb.point3()) {} + + /** + * @brief Caclulate error: (norm(Tb - Ta) - measurement) + * where Tb and Ta are Point3 translations and measurement is + * the Unit3 translation direction from a to b. + * + * @param Ta translation for key a + * @param Tb translation for key b + * @param H1 optional jacobian in Ta + * @param H2 optional jacobian in Tb + * @return * Vector + */ + Vector evaluateError( + const Point3& Ta, const Point3& Tb, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const override { + const Point3 dir = Tb - Ta; + Matrix33 H_predicted_dir; + const Point3 predicted = normalize(dir, H1 || H2 ? &H_predicted_dir : nullptr); + if (H1) *H1 = -H_predicted_dir; + if (H2) *H2 = H_predicted_dir; + return predicted - measured_w_aZb_; + } + + private: + friend class boost::serialization::access; + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar& boost::serialization::make_nvp( + "Base", boost::serialization::base_object(*this)); + } +}; // \ TranslationFactor +} // namespace gtsam diff --git a/gtsam/sfm/TranslationRecovery.cpp b/gtsam/sfm/TranslationRecovery.cpp new file mode 100644 index 000000000..d4100b00a --- /dev/null +++ b/gtsam/sfm/TranslationRecovery.cpp @@ -0,0 +1,143 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file TranslationRecovery.cpp + * @author Frank Dellaert, Akshay Krishnan + * @date March 2020 + * @brief Source code for recovering translations when rotations are given + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +using namespace gtsam; +using namespace std; + +TranslationRecovery::TranslationRecovery( + const TranslationRecovery::TranslationEdges &relativeTranslations, + const LevenbergMarquardtParams &lmParams) + : params_(lmParams) { + // Some relative translations may be zero. We treat nodes that have a zero + // relativeTranslation as a single node. + + // A DSFMap is used to find sets of nodes that have a zero relative + // translation. Add the nodes in each edge to the DSFMap, and merge nodes that + // are connected by a zero relative translation. + DSFMap sameTranslationDSF; + for (const auto &edge : relativeTranslations) { + Key key1 = sameTranslationDSF.find(edge.key1()); + Key key2 = sameTranslationDSF.find(edge.key2()); + if (key1 != key2 && edge.measured().equals(Unit3(0.0, 0.0, 0.0))) { + sameTranslationDSF.merge(key1, key2); + } + } + // Use only those edges for which two keys have a distinct root in the DSFMap. + for (const auto &edge : relativeTranslations) { + Key key1 = sameTranslationDSF.find(edge.key1()); + Key key2 = sameTranslationDSF.find(edge.key2()); + if (key1 == key2) continue; + relativeTranslations_.emplace_back(key1, key2, edge.measured(), + edge.noiseModel()); + } + // Store the DSF map for post-processing results. + sameTranslationNodes_ = sameTranslationDSF.sets(); +} + +NonlinearFactorGraph TranslationRecovery::buildGraph() const { + NonlinearFactorGraph graph; + + // Add all relative translation edges + for (auto edge : relativeTranslations_) { + graph.emplace_shared(edge.key1(), edge.key2(), + edge.measured(), edge.noiseModel()); + } + + return graph; +} + +void TranslationRecovery::addPrior( + const double scale, NonlinearFactorGraph *graph, + const SharedNoiseModel &priorNoiseModel) const { + auto edge = relativeTranslations_.begin(); + graph->emplace_shared >(edge->key1(), Point3(0, 0, 0), + priorNoiseModel); + graph->emplace_shared >( + edge->key2(), scale * edge->measured().point3(), edge->noiseModel()); +} + +Values TranslationRecovery::initalizeRandomly() const { + // Create a lambda expression that checks whether value exists and randomly + // initializes if not. + Values initial; + auto insert = [&initial](Key j) { + if (!initial.exists(j)) { + initial.insert(j, Vector3::Random()); + } + }; + + // Loop over measurements and add a random translation + for (auto edge : relativeTranslations_) { + insert(edge.key1()); + insert(edge.key2()); + } + return initial; +} + +Values TranslationRecovery::run(const double scale) const { + auto graph = buildGraph(); + addPrior(scale, &graph); + const Values initial = initalizeRandomly(); + LevenbergMarquardtOptimizer lm(graph, initial, params_); + Values result = lm.optimize(); + + // Nodes that were not optimized are stored in sameTranslationNodes_ as a map + // from a key that was optimized to keys that were not optimized. Iterate over + // map and add results for keys not optimized. + for (const auto &optimizedAndDuplicateKeys : sameTranslationNodes_) { + Key optimizedKey = optimizedAndDuplicateKeys.first; + std::set duplicateKeys = optimizedAndDuplicateKeys.second; + // Add the result for the duplicate key if it does not already exist. + for (const Key duplicateKey : duplicateKeys) { + if (result.exists(duplicateKey)) continue; + result.insert(duplicateKey, result.at(optimizedKey)); + } + } + return result; +} + +TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements( + const Values &poses, const vector &edges) { + auto edgeNoiseModel = noiseModel::Isotropic::Sigma(3, 0.01); + TranslationEdges relativeTranslations; + for (auto edge : edges) { + Key a, b; + tie(a, b) = edge; + const Pose3 wTa = poses.at(a), wTb = poses.at(b); + const Point3 Ta = wTa.translation(), Tb = wTb.translation(); + const Unit3 w_aZb(Tb - Ta); + relativeTranslations.emplace_back(a, b, w_aZb, edgeNoiseModel); + } + return relativeTranslations; +} diff --git a/gtsam/sfm/TranslationRecovery.h b/gtsam/sfm/TranslationRecovery.h new file mode 100644 index 000000000..c99836853 --- /dev/null +++ b/gtsam/sfm/TranslationRecovery.h @@ -0,0 +1,127 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file TranslationRecovery.h + * @author Frank Dellaert + * @date March 2020 + * @brief Recovering translations in an epipolar graph when rotations are given. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace gtsam { + +// Set up an optimization problem for the unknown translations Ti in the world +// coordinate frame, given the known camera attitudes wRi with respect to the +// world frame, and a set of (noisy) translation directions of type Unit3, +// w_aZb. The measurement equation is +// w_aZb = Unit3(Tb - Ta) (1) +// i.e., w_aZb is the translation direction from frame A to B, in world +// coordinates. Although Unit3 instances live on a manifold, following +// Wilson14eccv_1DSfM.pdf error we compute the *chordal distance* in the +// ambient world coordinate frame. +// +// It is clear that we cannot recover the scale, nor the absolute position, +// so the gauge freedom in this case is 3 + 1 = 4. We fix these by taking fixing +// the translations Ta and Tb associated with the first measurement w_aZb, +// clamping them to their initial values as given to this method. If no initial +// values are given, we use the origin for Tb and set Tb to make (1) come +// through, i.e., +// Tb = s * wRa * Point3(w_aZb) (2) +// where s is an arbitrary scale that can be supplied, default 1.0. Hence, two +// versions are supplied below corresponding to whether we have initial values +// or not. +class TranslationRecovery { + public: + using KeyPair = std::pair; + using TranslationEdges = std::vector>; + + private: + // Translation directions between camera pairs. + TranslationEdges relativeTranslations_; + + // Parameters used by the LM Optimizer. + LevenbergMarquardtParams params_; + + // Map from a key in the graph to a set of keys that share the same + // translation. + std::map> sameTranslationNodes_; + + public: + /** + * @brief Construct a new Translation Recovery object + * + * @param relativeTranslations the relative translations, in world coordinate + * frames, vector of BinaryMeasurements of Unit3, where each key of a + * measurement is a point in 3D. + * @param lmParams (optional) gtsam::LavenbergMarquardtParams that can be + * used to modify the parameters for the LM optimizer. By default, uses the + * default LM parameters. + */ + TranslationRecovery( + const TranslationEdges &relativeTranslations, + const LevenbergMarquardtParams &lmParams = LevenbergMarquardtParams()); + + /** + * @brief Build the factor graph to do the optimization. + * + * @return NonlinearFactorGraph + */ + NonlinearFactorGraph buildGraph() const; + + /** + * @brief Add priors on ednpoints of first measurement edge. + * + * @param scale scale for first relative translation which fixes gauge. + * @param graph factor graph to which prior is added. + * @param priorNoiseModel the noise model to use with the prior. + */ + void addPrior(const double scale, NonlinearFactorGraph *graph, + const SharedNoiseModel &priorNoiseModel = + noiseModel::Isotropic::Sigma(3, 0.01)) const; + + /** + * @brief Create random initial translations. + * + * @return Values + */ + Values initalizeRandomly() const; + + /** + * @brief Build and optimize factor graph. + * + * @param scale scale for first relative translation which fixes gauge. + * @return Values + */ + Values run(const double scale = 1.0) const; + + /** + * @brief Simulate translation direction measurements + * + * @param poses SE(3) ground truth poses stored as Values + * @param edges pairs (a,b) for which a measurement w_aZb will be generated. + * @return TranslationEdges vector of binary measurements where the keys are + * the cameras and the measurement is the simulated Unit3 translation + * direction between the cameras. + */ + static TranslationEdges SimulateMeasurements( + const Values &poses, const std::vector &edges); +}; +} // namespace gtsam diff --git a/gtsam/sfm/tests/testBinaryMeasurement.cpp b/gtsam/sfm/tests/testBinaryMeasurement.cpp new file mode 100644 index 000000000..ae13e54c4 --- /dev/null +++ b/gtsam/sfm/tests/testBinaryMeasurement.cpp @@ -0,0 +1,87 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testBinaryMeasurement.cpp + * @brief Unit tests for BinaryMeasurement class + * @author Akshay Krishnan + * @date July 2020 + */ + +#include + +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +static const Key kKey1(2), kKey2(1); + +// Create noise models for unit3 and rot3 +static SharedNoiseModel unit3_model(noiseModel::Isotropic::Sigma(2, 0.05)); +static SharedNoiseModel rot3_model(noiseModel::Isotropic::Sigma(3, 0.05)); + +const Unit3 unit3Measured(Vector3(1, 1, 1)); +const Rot3 rot3Measured; + +/* ************************************************************************* */ +TEST(BinaryMeasurement, Unit3) { + BinaryMeasurement unit3Measurement(kKey1, kKey2, unit3Measured, + unit3_model); + EXPECT_LONGS_EQUAL(unit3Measurement.key1(), kKey1); + EXPECT_LONGS_EQUAL(unit3Measurement.key2(), kKey2); + EXPECT(unit3Measurement.measured().equals(unit3Measured)); + + BinaryMeasurement unit3MeasurementCopy(kKey1, kKey2, unit3Measured, + unit3_model); + EXPECT(unit3Measurement.equals(unit3MeasurementCopy)); +} + +/* ************************************************************************* */ +TEST(BinaryMeasurement, Rot3) { + // testing the accessors + BinaryMeasurement rot3Measurement(kKey1, kKey2, rot3Measured, + rot3_model); + EXPECT_LONGS_EQUAL(rot3Measurement.key1(), kKey1); + EXPECT_LONGS_EQUAL(rot3Measurement.key2(), kKey2); + EXPECT(rot3Measurement.measured().equals(rot3Measured)); + + // testing the equals function + BinaryMeasurement rot3MeasurementCopy(kKey1, kKey2, rot3Measured, + rot3_model); + EXPECT(rot3Measurement.equals(rot3MeasurementCopy)); +} + +/* ************************************************************************* */ +TEST(BinaryMeasurement, Rot3MakeRobust) { + auto huber_model = noiseModel::Robust::Create( + noiseModel::mEstimator::Huber::Create(1.345), rot3_model); + BinaryMeasurement rot3Measurement(kKey1, kKey2, rot3Measured, + huber_model); + + EXPECT_LONGS_EQUAL(rot3Measurement.key1(), kKey1); + EXPECT_LONGS_EQUAL(rot3Measurement.key2(), kKey2); + EXPECT(rot3Measurement.measured().equals(rot3Measured)); + const auto &robust = boost::dynamic_pointer_cast( + rot3Measurement.noiseModel()); + EXPECT(robust); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testMFAS.cpp b/gtsam/sfm/tests/testMFAS.cpp new file mode 100644 index 000000000..a1f6dfa5f --- /dev/null +++ b/gtsam/sfm/tests/testMFAS.cpp @@ -0,0 +1,104 @@ +/** + * @file testMFAS.cpp + * @brief Unit tests for the MFAS class + * @author Akshay Krishnan + * @date July 2020 + */ + +#include + +#include + +using namespace std; +using namespace gtsam; + +/** + * We (partially) use the example from the paper on 1dsfm + * (https://research.cs.cornell.edu/1dsfm/docs/1DSfM_ECCV14.pdf, Fig 1, Page 5) + * for the unit tests here. The only change is that we leave out node 4 and use + * only nodes 0-3. This makes the test easier to understand and also + * avoids an ambiguity in the ground truth ordering that arises due to + * insufficient edges in the geaph when using the 4th node. + */ + +// edges in the graph - last edge from node 3 to 0 is an outlier +vector edges = {make_pair(3, 2), make_pair(0, 1), make_pair(3, 1), + make_pair(1, 2), make_pair(0, 2), make_pair(3, 0)}; +// nodes in the graph +KeyVector nodes = {Key(0), Key(1), Key(2), Key(3)}; +// weights from projecting in direction-1 (bad direction, outlier accepted) +vector weights1 = {2, 1.5, 0.5, 0.25, 1, 0.75}; +// weights from projecting in direction-2 (good direction, outlier rejected) +vector weights2 = {0.5, 0.75, -0.25, 0.75, 1, 0.5}; + +// helper function to obtain map from keypairs to weights from the +// vector representations +map getEdgeWeights(const vector &edges, + const vector &weights) { + map edgeWeights; + for (size_t i = 0; i < edges.size(); i++) { + edgeWeights[edges[i]] = weights[i]; + } + return edgeWeights; +} + +// test the ordering and the outlierWeights function using weights2 - outlier +// edge is rejected when projected in a direction that gives weights2 +TEST(MFAS, OrderingWeights2) { + MFAS mfas_obj(getEdgeWeights(edges, weights2)); + + KeyVector ordered_nodes = mfas_obj.computeOrdering(); + + // ground truth (expected) ordering in this example + KeyVector gt_ordered_nodes = {0, 1, 3, 2}; + + // check if the expected ordering is obtained + for (size_t i = 0; i < ordered_nodes.size(); i++) { + EXPECT_LONGS_EQUAL(gt_ordered_nodes[i], ordered_nodes[i]); + } + + map outlier_weights = mfas_obj.computeOutlierWeights(); + + // since edge between 3 and 0 is inconsistent with the ordering, it must have + // positive outlier weight, other outlier weights must be zero + for (auto &edge : edges) { + if (edge == make_pair(Key(3), Key(0)) || + edge == make_pair(Key(0), Key(3))) { + EXPECT_DOUBLES_EQUAL(outlier_weights[edge], 0.5, 1e-6); + } else { + EXPECT_DOUBLES_EQUAL(outlier_weights[edge], 0, 1e-6); + } + } +} + +// test the ordering function and the outlierWeights method using +// weights1 (outlier edge is accepted when projected in a direction that +// produces weights1) +TEST(MFAS, OrderingWeights1) { + MFAS mfas_obj(getEdgeWeights(edges, weights1)); + + KeyVector ordered_nodes = mfas_obj.computeOrdering(); + + // "ground truth" expected ordering in this example + KeyVector gt_ordered_nodes = {3, 0, 1, 2}; + + // check if the expected ordering is obtained + for (size_t i = 0; i < ordered_nodes.size(); i++) { + EXPECT_LONGS_EQUAL(gt_ordered_nodes[i], ordered_nodes[i]); + } + + map outlier_weights = mfas_obj.computeOutlierWeights(); + + // since edge between 3 and 0 is inconsistent with the ordering, it must have + // positive outlier weight, other outlier weights must be zero + for (auto &edge : edges) { + EXPECT_DOUBLES_EQUAL(outlier_weights[edge], 0, 1e-6); + } +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanAveraging.cpp b/gtsam/sfm/tests/testShonanAveraging.cpp new file mode 100644 index 000000000..06e909c61 --- /dev/null +++ b/gtsam/sfm/tests/testShonanAveraging.cpp @@ -0,0 +1,418 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanAveraging.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Unit tests for Shonan Averaging algorithm + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +template +using Rot = typename std::conditional::type; + +template +using Pose = typename std::conditional::type; + +ShonanAveraging3 fromExampleName( + const std::string &name, + ShonanAveraging3::Parameters parameters = ShonanAveraging3::Parameters()) { + string g2oFile = findExampleDataFile(name); + return ShonanAveraging3(g2oFile, parameters); +} + +static const ShonanAveraging3 kShonan = fromExampleName("toyExample.g2o"); + +static std::mt19937 kRandomNumberGenerator(42); + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkConstructor) { + EXPECT_LONGS_EQUAL(5, kShonan.nrUnknowns()); + + EXPECT_LONGS_EQUAL(15, kShonan.D().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.D().cols()); + auto D = kShonan.denseD(); + EXPECT_LONGS_EQUAL(15, D.rows()); + EXPECT_LONGS_EQUAL(15, D.cols()); + + EXPECT_LONGS_EQUAL(15, kShonan.Q().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.Q().cols()); + auto Q = kShonan.denseQ(); + EXPECT_LONGS_EQUAL(15, Q.rows()); + EXPECT_LONGS_EQUAL(15, Q.cols()); + + EXPECT_LONGS_EQUAL(15, kShonan.L().rows()); + EXPECT_LONGS_EQUAL(15, kShonan.L().cols()); + auto L = kShonan.denseL(); + EXPECT_LONGS_EQUAL(15, L.rows()); + EXPECT_LONGS_EQUAL(15, L.cols()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, buildGraphAt) { + auto graph = kShonan.buildGraphAt(5); + EXPECT_LONGS_EQUAL(7, graph.size()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkOptimality) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(4, randomRotations); // lift to 4! + auto Lambda = kShonan.computeLambda(random); + EXPECT_LONGS_EQUAL(15, Lambda.rows()); + EXPECT_LONGS_EQUAL(15, Lambda.cols()); + EXPECT_LONGS_EQUAL(45, Lambda.nonZeros()); + auto lambdaMin = kShonan.computeMinEigenValue(random); + // EXPECT_DOUBLES_EQUAL(-5.2964625490657866, lambdaMin, + // 1e-4); // Regression test + EXPECT_DOUBLES_EQUAL(-414.87376657555996, lambdaMin, + 1e-4); // Regression test + EXPECT(!kShonan.checkOptimality(random)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, checkSubgraph) { + // Create parameter with solver set to SUBGRAPH + auto params = ShonanAveragingParameters3( + gtsam::LevenbergMarquardtParams::CeresDefaults(), "SUBGRAPH"); + ShonanAveraging3::Measurements measurements; + + // The toyExample.g2o has 5 vertices, from 0-4 + // The edges are: 1-2, 2-3, 3-4, 3-1, 1-4, 0-1, + // which can build a connected graph + auto subgraphShonan = fromExampleName("toyExample.g2o", params); + + // Create initial random estimation + Values initial; + initial = subgraphShonan.initializeRandomly(kRandomNumberGenerator); + + // Run Shonan with SUBGRAPH solver + auto result = subgraphShonan.run(initial, 3, 3); + EXPECT_DOUBLES_EQUAL(1e-11, subgraphShonan.cost(result.first), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, tryOptimizingAt3) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values initial = ShonanAveraging3::LiftTo(3, randomRotations); // convert to SOn + EXPECT(!kShonan.checkOptimality(initial)); + const Values result = kShonan.tryOptimizingAt(3, initial); + EXPECT(kShonan.checkOptimality(result)); + auto lambdaMin = kShonan.computeMinEigenValue(result); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, lambdaMin, + 1e-4); // Regression test + EXPECT_DOUBLES_EQUAL(0, kShonan.costAt(3, result), 1e-4); + const Values SO3Values = kShonan.roundSolution(result); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(SO3Values), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, tryOptimizingAt4) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(4, randomRotations); // lift to 4! + const Values result = kShonan.tryOptimizingAt(4, random); + EXPECT(kShonan.checkOptimality(result)); + EXPECT_DOUBLES_EQUAL(0, kShonan.costAt(4, result), 1e-3); + auto lambdaMin = kShonan.computeMinEigenValue(result); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, lambdaMin, + 1e-4); // Regression test + const Values SO3Values = kShonan.roundSolution(result); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(SO3Values), 1e-4); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, TangentVectorValues) { + Vector9 v; + v << 1, 2, 3, 4, 5, 6, 7, 8, 9; + Vector expected0(10), expected1(10), expected2(10); + expected0 << 0, 3, -2, 1, 0, 0, 0, 0, 0, 0; + expected1 << 0, 6, -5, 4, 0, 0, 0, 0, 0, 0; + expected2 << 0, 9, -8, 7, 0, 0, 0, 0, 0, 0; + const VectorValues xi = ShonanAveraging3::TangentVectorValues(5, v); + EXPECT(assert_equal(expected0, xi[0])); + EXPECT(assert_equal(expected1, xi[1])); + EXPECT(assert_equal(expected2, xi[2])); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, LiftTo) { + auto I = genericValue(Rot3()); + Values initial{{0, I}, {1, I}, {2, I}}; + Values lifted = ShonanAveraging3::LiftTo(5, initial); + EXPECT(assert_equal(SOn(5), lifted.at(0))); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, CheckWithEigen) { + // control randomness + static std::mt19937 rng(0); + Vector descentDirection = Vector::Random(15); // for use below + const Values randomRotations = kShonan.initializeRandomly(rng); + Values random = ShonanAveraging3::LiftTo(3, randomRotations); + + // Optimize + const Values Qstar3 = kShonan.tryOptimizingAt(3, random); + + // Compute Eigenvalue with Spectra solver + double lambda = kShonan.computeMinEigenValue(Qstar3); + + // Check Eigenvalue with slow Eigen version, converts matrix A to dense matrix! + const Matrix S = ShonanAveraging3::StiefelElementMatrix(Qstar3); + auto A = kShonan.computeA(S); + bool computeEigenvectors = false; + Eigen::EigenSolver eigenSolver(Matrix(A), computeEigenvectors); + auto lambdas = eigenSolver.eigenvalues().real(); + double minEigenValue = lambdas(0); + for (int i = 1; i < lambdas.size(); i++) + minEigenValue = min(lambdas(i), minEigenValue); + + // Actual check + EXPECT_DOUBLES_EQUAL(0, lambda, 1e-11); + EXPECT_DOUBLES_EQUAL(0, minEigenValue, 1e-11); + + // Construct test descent direction (as minEigenVector is not predictable + // across platforms, being one from a basically flat 3d- subspace) + + // Check descent + Values initialQ4 = + ShonanAveraging3::LiftwithDescent(4, Qstar3, descentDirection); + EXPECT_LONGS_EQUAL(5, initialQ4.size()); + + // TODO(frank): uncomment this regression test: currently not repeatable + // across platforms. + // Matrix expected(4, 4); + // expected << 0.0459224, -0.688689, -0.216922, 0.690321, // + // 0.92381, 0.191931, 0.255854, 0.21042, // + // -0.376669, 0.301589, 0.687953, 0.542111, // + // -0.0508588, 0.630804, -0.643587, 0.43046; + // EXPECT(assert_equal(SOn(expected), initialQ4.at(0), 1e-5)); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, initializeWithDescent) { + const Values randomRotations = kShonan.initializeRandomly(kRandomNumberGenerator); + Values random = ShonanAveraging3::LiftTo(3, randomRotations); + const Values Qstar3 = kShonan.tryOptimizingAt(3, random); + Vector minEigenVector; + double lambdaMin = kShonan.computeMinEigenValue(Qstar3, &minEigenVector); + Values initialQ4 = + kShonan.initializeWithDescent(4, Qstar3, minEigenVector, lambdaMin); + EXPECT_LONGS_EQUAL(5, initialQ4.size()); +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, run) { + auto initial = kShonan.initializeRandomly(kRandomNumberGenerator); + auto result = kShonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, kShonan.cost(result.first), 1e-3); + EXPECT_DOUBLES_EQUAL(-5.427688831332745e-07, result.second, + 1e-4); // Regression test +} + +/* ************************************************************************* */ +namespace klaus { +// The data in the file is the Colmap solution +const Rot3 wR0(0.9992281076190063, -0.02676080288219576, -0.024497002638379624, + -0.015064701622500615); +const Rot3 wR1(0.998239108728862, -0.049543805396343954, -0.03232420352077356, + -0.004386230477751116); +const Rot3 wR2(0.9925378735259738, -0.07993768981394891, 0.0825062894866454, + -0.04088089479075661); +} // namespace klaus + +TEST(ShonanAveraging3, runKlaus) { + using namespace klaus; + + // Initialize a Shonan instance without the Karcher mean + ShonanAveraging3::Parameters parameters; + parameters.setKarcherWeight(0); + + // Load 3 pose example taken in Klaus by Shicong + static const ShonanAveraging3 shonan = + fromExampleName("Klaus3.g2o", parameters); + + // Check nr poses + EXPECT_LONGS_EQUAL(3, shonan.nrUnknowns()); + + // Colmap uses the Y-down vision frame, and the first 3 rotations are close to + // identity. We check that below. Note tolerance is quite high. + static const Rot3 identity; + EXPECT(assert_equal(identity, wR0, 0.2)); + EXPECT(assert_equal(identity, wR1, 0.2)); + EXPECT(assert_equal(identity, wR2, 0.2)); + + // Get measurements + const Rot3 R01 = shonan.measured(0); + const Rot3 R12 = shonan.measured(1); + const Rot3 R02 = shonan.measured(2); + + // Regression test to make sure data did not change. + EXPECT(assert_equal(Rot3(0.9995433591728293, -0.022048798853273946, + -0.01796327847857683, 0.010210006313668573), + R01)); + + // Check Colmap solution agrees OK with relative rotation measurements. + EXPECT(assert_equal(R01, wR0.between(wR1), 0.1)); + EXPECT(assert_equal(R12, wR1.between(wR2), 0.1)); + EXPECT(assert_equal(R02, wR0.between(wR2), 0.1)); + + // Run Shonan (with prior on first rotation) + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, shonan.cost(result.first), 1e-2); + EXPECT_DOUBLES_EQUAL(-9.2259161494467889e-05, result.second, + 1e-4); // Regression + + // Get Shonan solution in new frame R (R for result) + const Rot3 rR0 = result.first.at(0); + const Rot3 rR1 = result.first.at(1); + const Rot3 rR2 = result.first.at(2); + + // rR0 = rRw * wR0 => rRw = rR0 * wR0.inverse() + // rR1 = rRw * wR1 + // rR2 = rRw * wR2 + + const Rot3 rRw = rR0 * wR0.inverse(); + EXPECT(assert_equal(rRw * wR1, rR1, 0.1)) + EXPECT(assert_equal(rRw * wR2, rR2, 0.1)) +} + +/* ************************************************************************* */ +TEST(ShonanAveraging3, runKlausKarcher) { + using namespace klaus; + + // Load 3 pose example taken in Klaus by Shicong + static const ShonanAveraging3 shonan = fromExampleName("Klaus3.g2o"); + + // Run Shonan (with Karcher mean prior) + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 5); + EXPECT_DOUBLES_EQUAL(0, shonan.cost(result.first), 1e-2); + EXPECT_DOUBLES_EQUAL(-1.361402670507772e-05, result.second, + 1e-4); // Regression test + + // Get Shonan solution in new frame R (R for result) + const Rot3 rR0 = result.first.at(0); + const Rot3 rR1 = result.first.at(1); + const Rot3 rR2 = result.first.at(2); + + const Rot3 rRw = rR0 * wR0.inverse(); + EXPECT(assert_equal(rRw * wR1, rR1, 0.1)) + EXPECT(assert_equal(rRw * wR2, rR2, 0.1)) +} + +/* ************************************************************************* */ +TEST(ShonanAveraging2, noisyToyGraph) { + // Load 2D toy example + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + // lmParams.setVerbosityLM("SUMMARY"); + string g2oFile = findExampleDataFile("noisyToyGraph.txt"); + ShonanAveraging2::Parameters parameters(lmParams); + auto measurements = parseMeasurements(g2oFile); + ShonanAveraging2 shonan(measurements, parameters); + EXPECT_LONGS_EQUAL(4, shonan.nrUnknowns()); + + // Check graph building + NonlinearFactorGraph graph = shonan.buildGraphAt(2); + EXPECT_LONGS_EQUAL(6, graph.size()); + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 2); + EXPECT_DOUBLES_EQUAL(0.0008211, shonan.cost(result.first), 1e-6); + EXPECT_DOUBLES_EQUAL(0, result.second, 1e-10); // certificate! +} + +/* ************************************************************************* */ +TEST(ShonanAveraging2, noisyToyGraphWithHuber) { + // Load 2D toy example + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + string g2oFile = findExampleDataFile("noisyToyGraph.txt"); + ShonanAveraging2::Parameters parameters(lmParams); + auto measurements = parseMeasurements(g2oFile); + parameters.setUseHuber(true); + parameters.setCertifyOptimality(false); + + string parameters_print = + " ShonanAveragingParameters: \n alpha: 0\n beta: 1\n gamma: 0\n " + "useHuber: 1\n"; + assert_print_equal(parameters_print, parameters); + + ShonanAveraging2 shonan(measurements, parameters); + EXPECT_LONGS_EQUAL(4, shonan.nrUnknowns()); + + // Check graph building + NonlinearFactorGraph graph = shonan.buildGraphAt(2); + EXPECT_LONGS_EQUAL(6, graph.size()); + + // test that each factor is actually robust + for (size_t i=0; i<=4; i++) { // note: last is the Gauge factor and is not robust + const auto &robust = boost::dynamic_pointer_cast( + boost::dynamic_pointer_cast(graph[i])->noiseModel()); + EXPECT(robust); // we expect the factors to be use a robust noise model (in particular, Huber) + } + + // test result + auto initial = shonan.initializeRandomly(kRandomNumberGenerator); + auto result = shonan.run(initial, 2,2); + EXPECT_DOUBLES_EQUAL(0.0008211, shonan.cost(result.first), 1e-6); + EXPECT_DOUBLES_EQUAL(0, result.second, 1e-10); // certificate! +} + +/* ************************************************************************* */ +// Test alpha/beta/gamma prior weighting. +TEST(ShonanAveraging3, PriorWeights) { + auto lmParams = LevenbergMarquardtParams::CeresDefaults(); + ShonanAveraging3::Parameters params(lmParams); + EXPECT_DOUBLES_EQUAL(0, params.alpha, 1e-9); + EXPECT_DOUBLES_EQUAL(1, params.beta, 1e-9); + EXPECT_DOUBLES_EQUAL(0, params.gamma, 1e-9); + double alpha = 100.0, beta = 200.0, gamma = 300.0; + params.setAnchorWeight(alpha); + params.setKarcherWeight(beta); + params.setGaugesWeight(gamma); + EXPECT_DOUBLES_EQUAL(alpha, params.alpha, 1e-9); + EXPECT_DOUBLES_EQUAL(beta, params.beta, 1e-9); + EXPECT_DOUBLES_EQUAL(gamma, params.gamma, 1e-9); + params.setKarcherWeight(0); + static const ShonanAveraging3 shonan = fromExampleName("Klaus3.g2o", params); + for (auto i : {0,1,2}) { + const auto& m = shonan.measurement(i); + auto isotropic = + boost::static_pointer_cast(m.noiseModel()); + CHECK(isotropic != nullptr); + EXPECT_LONGS_EQUAL(3, isotropic->dim()); + EXPECT_DOUBLES_EQUAL(0.2, isotropic->sigma(), 1e-9); + } + auto I = genericValue(Rot3()); + Values initial{{0, I}, {1, I}, {2, I}}; + EXPECT_DOUBLES_EQUAL(3.0756, shonan.cost(initial), 1e-4); + auto result = shonan.run(initial, 3, 3); + EXPECT_DOUBLES_EQUAL(0.0015, shonan.cost(result.first), 1e-4); +} +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanFactor.cpp b/gtsam/sfm/tests/testShonanFactor.cpp new file mode 100644 index 000000000..ef94c5cf4 --- /dev/null +++ b/gtsam/sfm/tests/testShonanFactor.cpp @@ -0,0 +1,121 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * testFrobeniusFactor.cpp + * + * @file testFrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Check evaluateError for various Frobenius norm + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +//****************************************************************************** +namespace so3 { +SO3 id; +Vector3 v1 = (Vector(3) << 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1); +Vector3 v2 = (Vector(3) << 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace so3 + +//****************************************************************************** +namespace submanifold { +SO4 id; +Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1.tail<3>()); +SO4 Q1 = SO4::Expmap(v1); +Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2.tail<3>()); +SO4 Q2 = SO4::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace submanifold + +/* ************************************************************************* */ +TEST(ShonanFactor3, evaluateError) { + auto model = noiseModel::Isotropic::Sigma(3, 1.2); + for (const size_t p : {5, 4, 3}) { + Matrix M = Matrix::Identity(p, p); + M.topLeftCorner(3, 3) = submanifold::R1.matrix(); + SOn Q1(M); + M.topLeftCorner(3, 3) = submanifold::R2.matrix(); + SOn Q2(M); + auto factor = ShonanFactor3(1, 2, Rot3(::so3::R12.matrix()), p, model); + Matrix H1, H2; + factor.evaluateError(Q1, Q2, H1, H2); + + // Test derivatives + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + } +} + +/* ************************************************************************* */ +TEST(ShonanFactor3, equivalenceToSO3) { + using namespace ::submanifold; + auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1)); + auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension + auto factor3 = FrobeniusBetweenFactor(1, 2, R12, model); + auto factor4 = ShonanFactor3(1, 2, Rot3(R12.matrix()), 4, model); + const Matrix3 E3(factor3.evaluateError(R1, R2).data()); + const Matrix43 E4( + factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data()); + EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9)); + EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9)); +} + +/* ************************************************************************* */ +TEST(ShonanFactor2, evaluateError) { + auto model = noiseModel::Isotropic::Sigma(1, 1.2); + const Rot2 R1(0.3), R2(0.5), R12(0.2); + for (const size_t p : {5, 4, 3, 2}) { + Matrix M = Matrix::Identity(p, p); + M.topLeftCorner(2, 2) = R1.matrix(); + SOn Q1(M); + M.topLeftCorner(2, 2) = R2.matrix(); + SOn Q2(M); + auto factor = ShonanFactor2(1, 2, R12, p, model); + Matrix H1, H2; + factor.evaluateError(Q1, Q2, H1, H2); + + // Test derivatives + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); + } +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testShonanGaugeFactor.cpp b/gtsam/sfm/tests/testShonanGaugeFactor.cpp new file mode 100644 index 000000000..344394b9c --- /dev/null +++ b/gtsam/sfm/tests/testShonanGaugeFactor.cpp @@ -0,0 +1,106 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanGaugeFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Unit tests for ShonanGaugeFactor class + */ + +#include + +#include + +#include +#include + +using namespace std; +using namespace gtsam; + +/* ************************************************************************* */ +// Check dimensions of all low-dim GaugeFactors +TEST(ShonanAveraging, GaugeFactorLows) { + constexpr Key key(123); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 2, 2).dim()); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 3, 2).dim()); + EXPECT_LONGS_EQUAL(1, ShonanGaugeFactor(key, 4, 2).dim()); // SO(4-2) -> 1 + EXPECT_LONGS_EQUAL(3, ShonanGaugeFactor(key, 5, 2).dim()); // SO(5-2) -> 3 + + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 3, 3).dim()); + EXPECT_LONGS_EQUAL(0, ShonanGaugeFactor(key, 4, 3).dim()); + EXPECT_LONGS_EQUAL(1, ShonanGaugeFactor(key, 5, 3).dim()); // SO(5-3) -> 1 +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(6) +TEST(ShonanAveraging, GaugeFactorSO6) { + constexpr Key key(666); + ShonanGaugeFactor factor(key, 6); // For SO(6) + Matrix A = Matrix::Zero(3, 15); // SO(6-3) = SO(3) == 3-dimensional gauge + A(0, 0) = 1; // first 2 of 6^th skew column, which has 5 non-zero entries + A(1, 1) = 1; // then we skip 3 tangent dimensions + A(2, 5) = 1; // first of 5th skew colum, which has 4 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(3)); + Values values; + EXPECT_LONGS_EQUAL(3, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(7) +TEST(ShonanAveraging, GaugeFactorSO7) { + constexpr Key key(777); + ShonanGaugeFactor factor(key, 7); // For SO(7) + Matrix A = Matrix::Zero(6, 21); // SO(7-3) = SO(4) == 6-dimensional gauge + A(0, 0) = 1; // first 3 of 7^th skew column, which has 6 non-zero entries + A(1, 1) = 1; + A(2, 2) = 1; // then we skip 3 tangent dimensions + A(3, 6) = 1; // first 2 of 6^th skew column, which has 5 non-zero entries + A(4, 7) = 1; // then we skip 3 tangent dimensions + A(5, 11) = 1; // first of 5th skew colum, which has 4 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(6)); + Values values; + EXPECT_LONGS_EQUAL(6, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +// Check ShonanGaugeFactor for SO(6), with base SO(2) +TEST(ShonanAveraging, GaugeFactorSO6over2) { + constexpr Key key(602); + double gamma = 4; + ShonanGaugeFactor factor(key, 6, 2, gamma); // For SO(6), base SO(2) + Matrix A = Matrix::Zero(6, 15); // SO(6-2) = SO(4) == 6-dimensional gauge + A(0, 0) = 2; // first 3 of 6^th skew column, which has 5 non-zero entries + A(1, 1) = 2; + A(2, 2) = 2; // then we skip only 2 tangent dimensions + A(3, 5) = 2; // first 2 of 5^th skew column, which has 4 non-zero entries + A(4, 6) = 2; // then we skip only 2 tangent dimensions + A(5, 9) = 2; // first of 4th skew colum, which has 3 non-zero entries above + // diagonal. + JacobianFactor linearized(key, A, Vector::Zero(6)); + Values values; + EXPECT_LONGS_EQUAL(6, factor.dim()); + EXPECT(assert_equal(linearized, *boost::dynamic_pointer_cast( + factor.linearize(values)))); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/sfm/tests/testTranslationFactor.cpp b/gtsam/sfm/tests/testTranslationFactor.cpp new file mode 100644 index 000000000..3ff76ac5c --- /dev/null +++ b/gtsam/sfm/tests/testTranslationFactor.cpp @@ -0,0 +1,106 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testTranslationFactor.cpp + * @brief Unit tests for TranslationFactor Class + * @author Frank dellaert + * @date March 2020 + */ + +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +// Create a noise model for the chordal error +static SharedNoiseModel model(noiseModel::Isotropic::Sigma(3, 0.05)); + +// Keys are deliberately *not* in sorted order to test that case. +static const Key kKey1(2), kKey2(1); +static const Unit3 kMeasured(1, 0, 0); + +/* ************************************************************************* */ +TEST(TranslationFactor, Constructor) { + TranslationFactor factor(kKey1, kKey2, kMeasured, model); +} + +/* ************************************************************************* */ +TEST(TranslationFactor, ZeroError) { + // Create a factor + TranslationFactor factor(kKey1, kKey2, kMeasured, model); + + // Set the linearization + Point3 T1(1, 0, 0), T2(2, 0, 0); + + // Use the factor to calculate the error + Vector actualError(factor.evaluateError(T1, T2)); + + // Verify we get the expected error + Vector expected = (Vector3() << 0, 0, 0).finished(); + EXPECT(assert_equal(expected, actualError, 1e-9)); +} + +/* ************************************************************************* */ +TEST(TranslationFactor, NonZeroError) { + // create a factor + TranslationFactor factor(kKey1, kKey2, kMeasured, model); + + // set the linearization + Point3 T1(0, 1, 1), T2(0, 2, 2); + + // use the factor to calculate the error + Vector actualError(factor.evaluateError(T1, T2)); + + // verify we get the expected error + Vector expected = (Vector3() << -1, 1 / sqrt(2), 1 / sqrt(2)).finished(); + EXPECT(assert_equal(expected, actualError, 1e-9)); +} + +/* ************************************************************************* */ +Vector factorError(const Point3 &T1, const Point3 &T2, + const TranslationFactor &factor) { + return factor.evaluateError(T1, T2); +} + +TEST(TranslationFactor, Jacobian) { + // Create a factor + TranslationFactor factor(kKey1, kKey2, kMeasured, model); + + // Set the linearization + Point3 T1(1, 0, 0), T2(2, 0, 0); + + // Use the factor to calculate the Jacobians + Matrix H1Actual, H2Actual; + factor.evaluateError(T1, T2, H1Actual, H2Actual); + + // Use numerical derivatives to calculate the Jacobians + Matrix H1Expected, H2Expected; + H1Expected = numericalDerivative11( + boost::bind(&factorError, _1, T2, factor), T1); + H2Expected = numericalDerivative11( + boost::bind(&factorError, T1, _1, factor), T2); + + // Verify the Jacobians are correct + EXPECT(assert_equal(H1Expected, H1Actual, 1e-9)); + EXPECT(assert_equal(H2Expected, H2Actual, 1e-9)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/slam/AntiFactor.h b/gtsam/slam/AntiFactor.h index 9e4f7db5a..277080b6b 100644 --- a/gtsam/slam/AntiFactor.h +++ b/gtsam/slam/AntiFactor.h @@ -52,20 +52,20 @@ namespace gtsam { virtual ~AntiFactor() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** implement functions needed for Testable */ /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "AntiFactor version of:" << std::endl; factor_->print(s, keyFormatter); } /** equals */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This *e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol) && this->factor_->equals(*e->factor_, tol); } @@ -77,16 +77,16 @@ namespace gtsam { * For the AntiFactor, this will have the same magnitude of the original factor, * but the opposite sign. */ - double error(const Values& c) const { return -factor_->error(c); } + double error(const Values& c) const override { return -factor_->error(c); } /** get the dimension of the factor (same as the original factor) */ - size_t dim() const { return factor_->dim(); } + size_t dim() const override { return factor_->dim(); } /** * Checks whether this factor should be used based on a set of values. * The AntiFactor will have the same 'active' profile as the original factor. */ - bool active(const Values& c) const { return factor_->active(c); } + bool active(const Values& c) const override { return factor_->active(c); } /** * Linearize to a GaussianFactor. The AntiFactor always returns a Hessian Factor @@ -94,7 +94,7 @@ namespace gtsam { * returns a Jacobian instead of a full Hessian), but with the opposite sign. This * effectively cancels the effect of the original factor on the factor graph. */ - boost::shared_ptr linearize(const Values& c) const { + boost::shared_ptr linearize(const Values& c) const override { // Generate the linearized factor from the contained nonlinear factor GaussianFactor::shared_ptr gaussianFactor = factor_->linearize(c); diff --git a/gtsam/slam/BetweenFactor.h b/gtsam/slam/BetweenFactor.h index 23138b9e6..a594e95d5 100644 --- a/gtsam/slam/BetweenFactor.h +++ b/gtsam/slam/BetweenFactor.h @@ -21,6 +21,14 @@ #include #include +#ifdef _WIN32 +#define BETWEENFACTOR_VISIBILITY +#else +// This will trigger a LNKxxxx on MSVC, so disable for MSVC build +// Please refer to https://github.com/borglab/gtsam/blob/develop/Using-GTSAM-EXPORT.md +#define BETWEENFACTOR_VISIBILITY GTSAM_EXPORT +#endif + namespace gtsam { /** @@ -63,14 +71,16 @@ namespace gtsam { virtual ~BetweenFactor() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } - /** implement functions needed for Testable */ + /// @} + /// @name Testable + /// @{ - /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + /// print with optional string + void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "BetweenFactor(" << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << ")\n"; @@ -78,17 +88,19 @@ namespace gtsam { this->noiseModel_->print(" noise model: "); } - /** equals */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { + /// assert equality up to a tolerance + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This *e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol) && traits::Equals(this->measured_, e->measured_, tol); } - /** implement functions needed to derive from Factor */ + /// @} + /// @name NoiseModelFactor2 methods + /// @{ - /** vector of errors */ - Vector evaluateError(const T& p1, const T& p2, boost::optional H1 = - boost::none, boost::optional H2 = boost::none) const { + /// evaluate error, returns vector of errors size of tangent space + Vector evaluateError(const T& p1, const T& p2, boost::optional H1 = + boost::none, boost::optional H2 = boost::none) const override { T hx = traits::Between(p1, p2, H1, H2); // h(x) // manifold equivalent of h(x)-z -> log(z,h(x)) #ifdef SLOW_BUT_CORRECT_BETWEENFACTOR @@ -102,15 +114,15 @@ namespace gtsam { #endif } - /** return the measured */ + /// @} + /// @name Standard interface + /// @{ + + /// return the measurement const VALUE& measured() const { return measured_; } - - /** number of variables attached to this factor */ - std::size_t size() const { - return 2; - } + /// @} private: @@ -126,7 +138,7 @@ namespace gtsam { // Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html enum { NeedsToAlign = (sizeof(VALUE) % 16) == 0 }; public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) + GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) }; // \class BetweenFactor /// traits diff --git a/gtsam/slam/BoundingConstraint.h b/gtsam/slam/BoundingConstraint.h index f6561807e..43cc6d292 100644 --- a/gtsam/slam/BoundingConstraint.h +++ b/gtsam/slam/BoundingConstraint.h @@ -58,14 +58,14 @@ struct BoundingConstraint1: public NoiseModelFactor1 { boost::none) const = 0; /** active when constraint *NOT* met */ - bool active(const Values& c) const { + bool active(const Values& c) const override { // note: still active at equality to avoid zigzagging double x = value(c.at(this->key())); return (isGreaterThan_) ? x <= threshold_ : x >= threshold_; } Vector evaluateError(const X& x, boost::optional H = - boost::none) const { + boost::none) const override { Matrix D; double error = value(x, D) - threshold_; if (H) { @@ -126,7 +126,7 @@ struct BoundingConstraint2: public NoiseModelFactor2 { boost::optional H2 = boost::none) const = 0; /** active when constraint *NOT* met */ - bool active(const Values& c) const { + bool active(const Values& c) const override { // note: still active at equality to avoid zigzagging double x = value(c.at(this->key1()), c.at(this->key2())); return (isGreaterThan_) ? x <= threshold_ : x >= threshold_; @@ -134,7 +134,7 @@ struct BoundingConstraint2: public NoiseModelFactor2 { Vector evaluateError(const X1& x1, const X2& x2, boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + boost::optional H2 = boost::none) const override { Matrix D1, D2; double error = value(x1, x2, D1, D2) - threshold_; if (H1) { diff --git a/gtsam/slam/EssentialMatrixConstraint.h b/gtsam/slam/EssentialMatrixConstraint.h index 179200fe1..d21ead31f 100644 --- a/gtsam/slam/EssentialMatrixConstraint.h +++ b/gtsam/slam/EssentialMatrixConstraint.h @@ -61,7 +61,7 @@ class GTSAM_EXPORT EssentialMatrixConstraint: public NoiseModelFactor2( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -69,29 +69,24 @@ class GTSAM_EXPORT EssentialMatrixConstraint: public NoiseModelFactor2 Hp1 = boost::none, // - boost::optional Hp2 = boost::none) const; + boost::optional Hp2 = boost::none) const override; /** return the measured */ const EssentialMatrix& measured() const { return measuredE_; } - /** number of variables attached to this factor */ - std::size_t size() const { - return 2; - } - private: /** Serialization function */ @@ -105,7 +100,7 @@ class GTSAM_EXPORT EssentialMatrixConstraint: public NoiseModelFactor2 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /// print - virtual void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s); std::cout << " EssentialMatrixFactor with measurements\n (" << vA_.transpose() << ")' and (" << vB_.transpose() << ")'" @@ -74,14 +74,14 @@ class EssentialMatrixFactor: public NoiseModelFactor1 { /// vector of errors returns 1D vector Vector evaluateError(const EssentialMatrix& E, boost::optional H = - boost::none) const { + boost::none) const override { Vector error(1); error << E.error(vA_, vB_, H); return error; } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; /** @@ -131,14 +131,14 @@ class EssentialMatrixFactor2: public NoiseModelFactor2 } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /// print - virtual void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s); std::cout << " EssentialMatrixFactor2 with measurements\n (" << dP1_.transpose() << ")' and (" << pn_.transpose() @@ -152,7 +152,7 @@ class EssentialMatrixFactor2: public NoiseModelFactor2 */ Vector evaluateError(const EssentialMatrix& E, const double& d, boost::optional DE = boost::none, boost::optional Dd = - boost::none) const { + boost::none) const override { // We have point x,y in image 1 // Given a depth Z, the corresponding 3D point P1 = Z*(x,y,1) = (x,y,1)/d @@ -201,7 +201,7 @@ class EssentialMatrixFactor2: public NoiseModelFactor2 } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; // EssentialMatrixFactor2 @@ -250,14 +250,14 @@ class EssentialMatrixFactor3: public EssentialMatrixFactor2 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /// print - virtual void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s); std::cout << " EssentialMatrixFactor3 with rotation " << cRb_ << std::endl; } @@ -269,7 +269,7 @@ class EssentialMatrixFactor3: public EssentialMatrixFactor2 { */ Vector evaluateError(const EssentialMatrix& E, const double& d, boost::optional DE = boost::none, boost::optional Dd = - boost::none) const { + boost::none) const override { if (!DE) { // Convert E from body to camera frame EssentialMatrix cameraE = cRb_ * E; @@ -286,7 +286,7 @@ class EssentialMatrixFactor3: public EssentialMatrixFactor2 { } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; // EssentialMatrixFactor3 diff --git a/gtsam/slam/FrobeniusFactor.cpp b/gtsam/slam/FrobeniusFactor.cpp new file mode 100644 index 000000000..8c0baaf38 --- /dev/null +++ b/gtsam/slam/FrobeniusFactor.cpp @@ -0,0 +1,70 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file FrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Various factors that minimize some Frobenius norm + */ + +#include + +using namespace std; + +namespace gtsam { + +//****************************************************************************** +SharedNoiseModel +ConvertNoiseModel(const SharedNoiseModel &model, size_t d, bool defaultToUnit) { + double sigma = 1.0; + + if (model != nullptr) { + const auto &robust = boost::dynamic_pointer_cast(model); + Vector sigmas; + if (robust) { + sigmas = robust->noise()->sigmas(); + } else { + sigmas = model->sigmas(); + } + + size_t n = sigmas.size(); + if (n == 1) { + sigma = sigmas(0); // Rot2 + goto exit; + } + else if (n == 3 || n == 6) { + sigma = sigmas(2); // Pose2, Rot3, or Pose3 + if (sigmas(0) != sigma || sigmas(1) != sigma) { + if (!defaultToUnit) { + throw std::runtime_error("Can only convert isotropic rotation noise"); + } + } + goto exit; + } + if (!defaultToUnit) { + throw std::runtime_error("Can only convert Pose2/Pose3 noise models"); + } + } + exit: + auto isoModel = noiseModel::Isotropic::Sigma(d, sigma); + const auto &robust = boost::dynamic_pointer_cast(model); + if (robust) { + return noiseModel::Robust::Create( + noiseModel::mEstimator::Huber::Create(1.345), isoModel); + } else { + return isoModel; + } +} + +//****************************************************************************** + +} // namespace gtsam diff --git a/gtsam/slam/FrobeniusFactor.h b/gtsam/slam/FrobeniusFactor.h new file mode 100644 index 000000000..f17a9e421 --- /dev/null +++ b/gtsam/slam/FrobeniusFactor.h @@ -0,0 +1,160 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file FrobeniusFactor.h + * @date March 2019 + * @author Frank Dellaert + * @brief Various factors that minimize some Frobenius norm + */ + +#pragma once + +#include +#include +#include +#include + +namespace gtsam { + +/** + * When creating (any) FrobeniusFactor we can convert a Rot/Pose BetweenFactor + * noise model into a n-dimensional isotropic noise + * model used to weight the Frobenius norm. + * If the noise model passed is null we return a n-dimensional isotropic noise + * model with sigma=1.0. + * If not, we we check if the d-dimensional noise model on rotations is + * isotropic. If it is, we extend to 'n' dimensions, otherwise we throw an + * error. + * If the noise model is a robust error model, we use the sigmas of the + * underlying noise model. + * + * If defaultToUnit == false throws an exception on unexepcted input. + */ +GTSAM_EXPORT SharedNoiseModel +ConvertNoiseModel(const SharedNoiseModel &model, size_t n, + bool defaultToUnit = true); + +/** + * FrobeniusPrior calculates the Frobenius norm between a given matrix and an + * element of SO(3) or SO(4). + */ +template +class GTSAM_EXPORT FrobeniusPrior : public NoiseModelFactor1 { + enum { Dim = Rot::VectorN2::RowsAtCompileTime }; + using MatrixNN = typename Rot::MatrixNN; + Eigen::Matrix vecM_; ///< vectorized matrix to approximate + + public: + /// Constructor + FrobeniusPrior(Key j, const MatrixNN& M, + const SharedNoiseModel& model = nullptr) + : NoiseModelFactor1(ConvertNoiseModel(model, Dim), j) { + vecM_ << Eigen::Map(M.data(), Dim, 1); + } + + /// Error is just Frobenius norm between Rot element and vectorized matrix M. + Vector evaluateError(const Rot& R, + boost::optional H = boost::none) const override { + return R.vec(H) - vecM_; // Jacobian is computed only when needed. + } +}; + +/** + * FrobeniusFactor calculates the Frobenius norm between rotation matrices. + * The template argument can be any fixed-size SO. + */ +template +class GTSAM_EXPORT FrobeniusFactor : public NoiseModelFactor2 { + enum { Dim = Rot::VectorN2::RowsAtCompileTime }; + + public: + /// Constructor + FrobeniusFactor(Key j1, Key j2, const SharedNoiseModel& model = nullptr) + : NoiseModelFactor2(ConvertNoiseModel(model, Dim), j1, + j2) {} + + /// Error is just Frobenius norm between rotation matrices. + Vector evaluateError(const Rot& R1, const Rot& R2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const override { + Vector error = R2.vec(H2) - R1.vec(H1); + if (H1) *H1 = -*H1; + return error; + } +}; + +/** + * FrobeniusBetweenFactor is a BetweenFactor that evaluates the Frobenius norm + * of the rotation error between measured and predicted (rather than the + * Logmap of the error). This factor is only defined for fixed-dimension types, + * and in fact only SO3 and SO4 really work, as we need SO::AdjointMap. + */ +template +class GTSAM_EXPORT FrobeniusBetweenFactor : public NoiseModelFactor2 { + Rot R12_; ///< measured rotation between R1 and R2 + Eigen::Matrix + R2hat_H_R1_; ///< fixed derivative of R2hat wrpt R1 + enum { Dim = Rot::VectorN2::RowsAtCompileTime }; + + public: + /// @name Constructor + /// @{ + + /// Construct from two keys and measured rotation + FrobeniusBetweenFactor(Key j1, Key j2, const Rot& R12, + const SharedNoiseModel& model = nullptr) + : NoiseModelFactor2( + ConvertNoiseModel(model, Dim), j1, j2), + R12_(R12), + R2hat_H_R1_(R12.inverse().AdjointMap()) {} + + /// @} + /// @name Testable + /// @{ + + /// print with optional string + void + print(const std::string &s, + const KeyFormatter &keyFormatter = DefaultKeyFormatter) const override { + std::cout << s << "FrobeniusBetweenFactor<" << demangle(typeid(Rot).name()) + << ">(" << keyFormatter(this->key1()) << "," + << keyFormatter(this->key2()) << ")\n"; + traits::Print(R12_, " R12: "); + this->noiseModel_->print(" noise model: "); + } + + /// assert equality up to a tolerance + bool equals(const NonlinearFactor &expected, + double tol = 1e-9) const override { + auto e = dynamic_cast(&expected); + return e != nullptr && NoiseModelFactor2::equals(*e, tol) && + traits::Equals(this->R12_, e->R12_, tol); + } + + /// @} + /// @name NoiseModelFactor2 methods + /// @{ + + /// Error is Frobenius norm between R1*R12 and R2. + Vector evaluateError(const Rot& R1, const Rot& R2, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const override { + const Rot R2hat = R1.compose(R12_); + Eigen::Matrix vec_H_R2hat; + Vector error = R2.vec(H2) - R2hat.vec(H1 ? &vec_H_R2hat : nullptr); + if (H1) *H1 = -vec_H_R2hat * R2hat_H_R1_; + return error; + } + /// @} +}; + +} // namespace gtsam diff --git a/gtsam/slam/GeneralSFMFactor.h b/gtsam/slam/GeneralSFMFactor.h index 0eb489f35..c9639d4d5 100644 --- a/gtsam/slam/GeneralSFMFactor.h +++ b/gtsam/slam/GeneralSFMFactor.h @@ -86,17 +86,20 @@ class GeneralSFMFactor: public NoiseModelFactor2 { * @param cameraKey is the index of the camera * @param landmarkKey is the index of the landmark */ - GeneralSFMFactor(const Point2& measured, const SharedNoiseModel& model, Key cameraKey, Key landmarkKey) : - Base(model, cameraKey, landmarkKey), measured_(measured) {} + GeneralSFMFactor(const Point2& measured, const SharedNoiseModel& model, + Key cameraKey, Key landmarkKey) + : Base(model, cameraKey, landmarkKey), measured_(measured) {} - GeneralSFMFactor():measured_(0.0,0.0) {} ///< default constructor - GeneralSFMFactor(const Point2 & p):measured_(p) {} ///< constructor that takes a Point2 - GeneralSFMFactor(double x, double y):measured_(x,y) {} ///< constructor that takes doubles x,y to make a Point2 + GeneralSFMFactor() : measured_(0.0, 0.0) {} ///< default constructor + ///< constructor that takes a Point2 + GeneralSFMFactor(const Point2& p) : measured_(p) {} + ///< constructor that takes doubles x,y to make a Point2 + GeneralSFMFactor(double x, double y) : measured_(x, y) {} virtual ~GeneralSFMFactor() {} ///< destructor /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this)));} @@ -105,7 +108,7 @@ class GeneralSFMFactor: public NoiseModelFactor2 { * @param s optional string naming the factor * @param keyFormatter optional formatter for printing out Symbols */ - void print(const std::string& s = "SFMFactor", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "SFMFactor", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s, keyFormatter); traits::Print(measured_, s + ".z"); } @@ -113,27 +116,27 @@ class GeneralSFMFactor: public NoiseModelFactor2 { /** * equals */ - bool equals(const NonlinearFactor &p, double tol = 1e-9) const { + bool equals(const NonlinearFactor &p, double tol = 1e-9) const override { const This* e = dynamic_cast(&p); return e && Base::equals(p, tol) && traits::Equals(this->measured_, e->measured_, tol); } /** h(x)-z */ Vector evaluateError(const CAMERA& camera, const LANDMARK& point, - boost::optional H1=boost::none, boost::optional H2=boost::none) const { + boost::optional H1=boost::none, boost::optional H2=boost::none) const override { try { return camera.project2(point,H1,H2) - measured_; } catch( CheiralityException& e) { if (H1) *H1 = JacobianC::Zero(); if (H2) *H2 = JacobianL::Zero(); - // TODO warn if verbose output asked for + //TODO Print the exception via logging return Z_2x1; } } /// Linearize using fixed-size matrices - boost::shared_ptr linearize(const Values& values) const { + boost::shared_ptr linearize(const Values& values) const override { // Only linearize if the factor is active if (!this->active(values)) return boost::shared_ptr(); @@ -149,7 +152,7 @@ class GeneralSFMFactor: public NoiseModelFactor2 { H1.setZero(); H2.setZero(); b.setZero(); - // TODO warn if verbose output asked for + //TODO Print the exception via logging } // Whiten the system if needed @@ -230,7 +233,7 @@ class GeneralSFMFactor2: public NoiseModelFactor3 { virtual ~GeneralSFMFactor2() {} ///< destructor /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this)));} @@ -239,7 +242,7 @@ class GeneralSFMFactor2: public NoiseModelFactor3 { * @param s optional string naming the factor * @param keyFormatter optional formatter useful for printing Symbols */ - void print(const std::string& s = "SFMFactor2", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "SFMFactor2", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s, keyFormatter); traits::Print(measured_, s + ".z"); } @@ -247,7 +250,7 @@ class GeneralSFMFactor2: public NoiseModelFactor3 { /** * equals */ - bool equals(const NonlinearFactor &p, double tol = 1e-9) const { + bool equals(const NonlinearFactor &p, double tol = 1e-9) const override { const This* e = dynamic_cast(&p); return e && Base::equals(p, tol) && traits::Equals(this->measured_, e->measured_, tol); } @@ -256,7 +259,7 @@ class GeneralSFMFactor2: public NoiseModelFactor3 { Vector evaluateError(const Pose3& pose3, const Point3& point, const CALIBRATION &calib, boost::optional H1=boost::none, boost::optional H2=boost::none, - boost::optional H3=boost::none) const + boost::optional H3=boost::none) const override { try { Camera camera(pose3,calib); diff --git a/gtsam/slam/InitializePose.h b/gtsam/slam/InitializePose.h new file mode 100644 index 000000000..f9b99e47e --- /dev/null +++ b/gtsam/slam/InitializePose.h @@ -0,0 +1,99 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file InitializePose.h + * @author Frank Dellaert + * @date August, 2020 + * @brief common code between lago.* (2D) and InitializePose3.* (3D) + */ + +#pragma once + +#include +#include +#include +#include +#include + +namespace gtsam { +namespace initialize { + +static constexpr Key kAnchorKey = 99999999; + +/** + * Select the subgraph of betweenFactors and transforms priors into between + * wrt a fictitious node + */ +template +static NonlinearFactorGraph buildPoseGraph(const NonlinearFactorGraph& graph) { + NonlinearFactorGraph poseGraph; + + for (const auto& factor : graph) { + // recast to a between on Pose + if (auto between = + boost::dynamic_pointer_cast >(factor)) + poseGraph.add(between); + + // recast PriorFactor to BetweenFactor + if (auto prior = boost::dynamic_pointer_cast >(factor)) + poseGraph.emplace_shared >( + kAnchorKey, prior->keys()[0], prior->prior(), prior->noiseModel()); + } + return poseGraph; +} + +/** + * Use Gauss-Newton optimizer to optimize for poses given rotation estimates + */ +template +static Values computePoses(const Values& initialRot, + NonlinearFactorGraph* posegraph, + bool singleIter = true) { + const auto origin = Pose().translation(); + + // Upgrade rotations to full poses + Values initialPose; + for (const auto key_value : initialRot) { + Key key = key_value.key; + const auto& rot = initialRot.at(key); + Pose initializedPose = Pose(rot, origin); + initialPose.insert(key, initializedPose); + } + + // add prior on dummy node + auto priorModel = noiseModel::Unit::Create(Pose::dimension); + initialPose.insert(kAnchorKey, Pose()); + posegraph->emplace_shared >(kAnchorKey, Pose(), priorModel); + + // Create optimizer + GaussNewtonParams params; + if (singleIter) { + params.maxIterations = 1; + } else { + params.setVerbosity("TERMINATION"); + } + GaussNewtonOptimizer optimizer(*posegraph, initialPose, params); + Values GNresult = optimizer.optimize(); + + // put into Values structure + Values estimate; + for (const auto key_value : GNresult) { + Key key = key_value.key; + if (key != kAnchorKey) { + const Pose& pose = GNresult.at(key); + estimate.insert(key, pose); + } + } + return estimate; +} +} // namespace initialize +} // namespace gtsam diff --git a/gtsam/slam/InitializePose3.cpp b/gtsam/slam/InitializePose3.cpp index 3d4a4d40d..43404df53 100644 --- a/gtsam/slam/InitializePose3.cpp +++ b/gtsam/slam/InitializePose3.cpp @@ -10,28 +10,31 @@ * -------------------------------------------------------------------------- */ /** - * @file InitializePose3.h + * @file InitializePose3.cpp * @author Luca Carlone * @author Frank Dellaert * @date August, 2014 */ #include + +#include #include #include #include #include +#include #include #include #include +#include + using namespace std; namespace gtsam { -static const Key kAnchorKey = symbol('Z', 9999999); - /* ************************************************************************* */ GaussianFactorGraph InitializePose3::buildLinearOrientationGraph(const NonlinearFactorGraph& g) { @@ -62,7 +65,7 @@ GaussianFactorGraph InitializePose3::buildLinearOrientationGraph(const Nonlinear } // prior on the anchor orientation linearGraph.add( - kAnchorKey, I_9x9, + initialize::kAnchorKey, I_9x9, (Vector(9) << 1.0, 0.0, 0.0, /* */ 0.0, 1.0, 0.0, /* */ 0.0, 0.0, 1.0) .finished(), noiseModel::Isotropic::Precision(9, 1)); @@ -78,7 +81,7 @@ Values InitializePose3::normalizeRelaxedRotations( Values validRot3; for(const auto& it: relaxedRot3) { Key key = it.first; - if (key != kAnchorKey) { + if (key != initialize::kAnchorKey) { Matrix3 M; M << Eigen::Map(it.second.data()); // Recover M from vectorized @@ -91,24 +94,10 @@ Values InitializePose3::normalizeRelaxedRotations( } /* ************************************************************************* */ -NonlinearFactorGraph InitializePose3::buildPose3graph(const NonlinearFactorGraph& graph) { +NonlinearFactorGraph InitializePose3::buildPose3graph( + const NonlinearFactorGraph& graph) { gttic(InitializePose3_buildPose3graph); - NonlinearFactorGraph pose3Graph; - - for(const auto& factor: graph) { - - // recast to a between on Pose3 - const auto pose3Between = boost::dynamic_pointer_cast >(factor); - if (pose3Between) - pose3Graph.add(pose3Between); - - // recast PriorFactor to BetweenFactor - const auto pose3Prior = boost::dynamic_pointer_cast >(factor); - if (pose3Prior) - pose3Graph.emplace_shared >(kAnchorKey, pose3Prior->keys()[0], - pose3Prior->prior(), pose3Prior->noiseModel()); - } - return pose3Graph; + return initialize::buildPoseGraph(graph); } /* ************************************************************************* */ @@ -134,8 +123,8 @@ Values InitializePose3::computeOrientationsGradient( // this works on the inverse rotations, according to Tron&Vidal,2011 Values inverseRot; - inverseRot.insert(kAnchorKey, Rot3()); - for(const auto& key_value: givenGuess) { + inverseRot.insert(initialize::kAnchorKey, Rot3()); + for(const auto key_value: givenGuess) { Key key = key_value.key; const Pose3& pose = givenGuess.at(key); inverseRot.insert(key, pose.rotation().inverse()); @@ -145,12 +134,12 @@ Values InitializePose3::computeOrientationsGradient( KeyVectorMap adjEdgesMap; KeyRotMap factorId2RotMap; - createSymbolicGraph(adjEdgesMap, factorId2RotMap, pose3Graph); + createSymbolicGraph(pose3Graph, &adjEdgesMap, &factorId2RotMap); // calculate max node degree & allocate gradient size_t maxNodeDeg = 0; VectorValues grad; - for(const auto& key_value: inverseRot) { + for(const auto key_value: inverseRot) { Key key = key_value.key; grad.insert(key,Z_3x1); size_t currNodeDeg = (adjEdgesMap.at(key)).size(); @@ -173,7 +162,7 @@ Values InitializePose3::computeOrientationsGradient( ////////////////////////////////////////////////////////////////////////// // compute the gradient at each node maxGrad = 0; - for (const auto& key_value : inverseRot) { + for (const auto key_value : inverseRot) { Key key = key_value.key; Vector gradKey = Z_3x1; // collect the gradient for each edge incident on key @@ -212,11 +201,11 @@ Values InitializePose3::computeOrientationsGradient( } // enf of gradient iterations // Return correct rotations - const Rot3& Rref = inverseRot.at(kAnchorKey); // This will be set to the identity as so far we included no prior + const Rot3& Rref = inverseRot.at(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior Values estimateRot; - for(const auto& key_value: inverseRot) { + for(const auto key_value: inverseRot) { Key key = key_value.key; - if (key != kAnchorKey) { + if (key != initialize::kAnchorKey) { const Rot3& R = inverseRot.at(key); if(setRefFrame) estimateRot.insert(key, Rref.compose(R.inverse())); @@ -228,32 +217,34 @@ Values InitializePose3::computeOrientationsGradient( } /* ************************************************************************* */ -void InitializePose3::createSymbolicGraph(KeyVectorMap& adjEdgesMap, KeyRotMap& factorId2RotMap, - const NonlinearFactorGraph& pose3Graph) { +void InitializePose3::createSymbolicGraph( + const NonlinearFactorGraph& pose3Graph, KeyVectorMap* adjEdgesMap, + KeyRotMap* factorId2RotMap) { size_t factorId = 0; - for(const auto& factor: pose3Graph) { - auto pose3Between = boost::dynamic_pointer_cast >(factor); - if (pose3Between){ + for (const auto& factor : pose3Graph) { + auto pose3Between = + boost::dynamic_pointer_cast >(factor); + if (pose3Between) { Rot3 Rij = pose3Between->measured().rotation(); - factorId2RotMap.insert(pair(factorId,Rij)); + factorId2RotMap->emplace(factorId, Rij); Key key1 = pose3Between->key1(); - if (adjEdgesMap.find(key1) != adjEdgesMap.end()){ // key is already in - adjEdgesMap.at(key1).push_back(factorId); - }else{ + if (adjEdgesMap->find(key1) != adjEdgesMap->end()) { // key is already in + adjEdgesMap->at(key1).push_back(factorId); + } else { vector edge_id; edge_id.push_back(factorId); - adjEdgesMap.insert(pair >(key1, edge_id)); + adjEdgesMap->emplace(key1, edge_id); } Key key2 = pose3Between->key2(); - if (adjEdgesMap.find(key2) != adjEdgesMap.end()){ // key is already in - adjEdgesMap.at(key2).push_back(factorId); - }else{ + if (adjEdgesMap->find(key2) != adjEdgesMap->end()) { // key is already in + adjEdgesMap->at(key2).push_back(factorId); + } else { vector edge_id; edge_id.push_back(factorId); - adjEdgesMap.insert(pair >(key2, edge_id)); + adjEdgesMap->emplace(key2, edge_id); } - }else{ + } else { cout << "Error in createSymbolicGraph" << endl; } factorId++; @@ -293,50 +284,16 @@ Values InitializePose3::initializeOrientations(const NonlinearFactorGraph& graph } ///* ************************************************************************* */ -Values InitializePose3::computePoses(NonlinearFactorGraph& pose3graph, Values& initialRot) { +Values InitializePose3::computePoses(const Values& initialRot, + NonlinearFactorGraph* posegraph, + bool singleIter) { gttic(InitializePose3_computePoses); - - // put into Values structure - Values initialPose; - for (const auto& key_value : initialRot) { - Key key = key_value.key; - const Rot3& rot = initialRot.at(key); - Pose3 initializedPose = Pose3(rot, Point3(0, 0, 0)); - initialPose.insert(key, initializedPose); - } - - // add prior - noiseModel::Unit::shared_ptr priorModel = noiseModel::Unit::Create(6); - initialPose.insert(kAnchorKey, Pose3()); - pose3graph.emplace_shared >(kAnchorKey, Pose3(), priorModel); - - // Create optimizer - GaussNewtonParams params; - bool singleIter = true; - if (singleIter) { - params.maxIterations = 1; - } else { - cout << " \n\n\n\n performing more than 1 GN iterations \n\n\n" << endl; - params.setVerbosity("TERMINATION"); - } - GaussNewtonOptimizer optimizer(pose3graph, initialPose, params); - Values GNresult = optimizer.optimize(); - - // put into Values structure - Values estimate; - for (const auto& key_value : GNresult) { - Key key = key_value.key; - if (key != kAnchorKey) { - const Pose3& pose = GNresult.at(key); - estimate.insert(key, pose); - } - } - return estimate; + return initialize::computePoses(initialRot, posegraph, singleIter); } /* ************************************************************************* */ -Values InitializePose3::initialize(const NonlinearFactorGraph& graph, const Values& givenGuess, - bool useGradient) { +Values InitializePose3::initialize(const NonlinearFactorGraph& graph, + const Values& givenGuess, bool useGradient) { gttic(InitializePose3_initialize); Values initialValues; @@ -352,7 +309,7 @@ Values InitializePose3::initialize(const NonlinearFactorGraph& graph, const Valu orientations = computeOrientationsChordal(pose3Graph); // Compute the full poses (1 GN iteration on full poses) - return computePoses(pose3Graph, orientations); + return computePoses(orientations, &pose3Graph); } /* ************************************************************************* */ diff --git a/gtsam/slam/InitializePose3.h b/gtsam/slam/InitializePose3.h index ce1b28854..bf86ab6f2 100644 --- a/gtsam/slam/InitializePose3.h +++ b/gtsam/slam/InitializePose3.h @@ -26,6 +26,9 @@ #include #include +#include +#include + namespace gtsam { typedef std::map > KeyVectorMap; @@ -50,9 +53,9 @@ struct GTSAM_EXPORT InitializePose3 { const NonlinearFactorGraph& pose3Graph, const Values& givenGuess, size_t maxIter = 10000, const bool setRefFrame = true); - static void createSymbolicGraph(KeyVectorMap& adjEdgesMap, - KeyRotMap& factorId2RotMap, - const NonlinearFactorGraph& pose3Graph); + static void createSymbolicGraph(const NonlinearFactorGraph& pose3Graph, + KeyVectorMap* adjEdgesMap, + KeyRotMap* factorId2RotMap); static Vector3 gradientTron(const Rot3& R1, const Rot3& R2, const double a, const double b); @@ -64,8 +67,12 @@ struct GTSAM_EXPORT InitializePose3 { static NonlinearFactorGraph buildPose3graph( const NonlinearFactorGraph& graph); - static Values computePoses(NonlinearFactorGraph& pose3graph, - Values& initialRot); + /** + * Use Gauss-Newton optimizer to optimize for poses given rotation estimates + */ + static Values computePoses(const Values& initialRot, + NonlinearFactorGraph* poseGraph, + bool singleIter = true); /** * "extract" the Pose3 subgraph of the original graph, get orientations from diff --git a/gtsam/slam/KarcherMeanFactor-inl.h b/gtsam/slam/KarcherMeanFactor-inl.h index f10cc7e42..c81a9adc5 100644 --- a/gtsam/slam/KarcherMeanFactor-inl.h +++ b/gtsam/slam/KarcherMeanFactor-inl.h @@ -58,20 +58,22 @@ T FindKarcherMean(std::initializer_list&& rotations) { template template -KarcherMeanFactor::KarcherMeanFactor(const CONTAINER& keys, int d) - : NonlinearFactor(keys) { +KarcherMeanFactor::KarcherMeanFactor(const CONTAINER &keys, int d, + boost::optional beta) + : NonlinearFactor(keys), d_(static_cast(d)) { if (d <= 0) { throw std::invalid_argument( "KarcherMeanFactor needs dimension for dynamic types."); } - // Create the constant Jacobian made of D*D identity matrices, - // where D is the dimensionality of the manifold. - const auto I = Eigen::MatrixXd::Identity(d, d); + // Create the constant Jacobian made of d*d identity matrices, + // where d is the dimensionality of the manifold. + Matrix A = Matrix::Identity(d, d); + if (beta) A *= std::sqrt(*beta); std::map terms; for (Key j : keys) { - terms[j] = I; + terms[j] = A; } - jacobian_ = - boost::make_shared(terms, Eigen::VectorXd::Zero(d)); + whitenedJacobian_ = + boost::make_shared(terms, Vector::Zero(d)); } } // namespace gtsam \ No newline at end of file diff --git a/gtsam/slam/KarcherMeanFactor.h b/gtsam/slam/KarcherMeanFactor.h index 54b3930d4..b7cd3b11a 100644 --- a/gtsam/slam/KarcherMeanFactor.h +++ b/gtsam/slam/KarcherMeanFactor.h @@ -30,44 +30,51 @@ namespace gtsam { * PriorFactors. */ template -T FindKarcherMean(const std::vector>& rotations); +T FindKarcherMean(const std::vector> &rotations); -template -T FindKarcherMean(std::initializer_list&& rotations); +template T FindKarcherMean(std::initializer_list &&rotations); /** * The KarcherMeanFactor creates a constraint on all SO(n) variables with * given keys that the Karcher mean (see above) will stay the same. Note the * mean itself is irrelevant to the constraint and is not a parameter: the * constraint is implemented as enforcing that the sum of local updates is - * equal to zero, hence creating a rank-dim constraint. Note it is implemented as - * a soft constraint, as typically it is used to fix a gauge freedom. + * equal to zero, hence creating a rank-dim constraint. Note it is implemented + * as a soft constraint, as typically it is used to fix a gauge freedom. * */ -template -class KarcherMeanFactor : public NonlinearFactor { - /// Constant Jacobian made of d*d identity matrices - boost::shared_ptr jacobian_; +template class KarcherMeanFactor : public NonlinearFactor { + // Compile time dimension: can be -1 + enum { D = traits::dimension }; - enum {D = traits::dimension}; + // Runtime dimension: always >=0 + size_t d_; - public: - /// Construct from given keys. + /// Constant Jacobian made of d*d identity matrices + boost::shared_ptr whitenedJacobian_; + +public: + /** + * Construct from given keys. + * If parameter beta is given, it acts as a precision = 1/sigma^2, and + * the Jacobian will be multiplied with 1/sigma = sqrt(beta). + */ template - KarcherMeanFactor(const CONTAINER& keys, int d=D); + KarcherMeanFactor(const CONTAINER &keys, int d = D, + boost::optional beta = boost::none); /// Destructor virtual ~KarcherMeanFactor() {} /// Calculate the error of the factor: always zero - double error(const Values& c) const override { return 0; } + double error(const Values &c) const override { return 0; } /// get the dimension of the factor (number of rows on linearization) - size_t dim() const override { return D; } + size_t dim() const override { return d_; } /// linearize to a GaussianFactor - boost::shared_ptr linearize(const Values& c) const override { - return jacobian_; + boost::shared_ptr linearize(const Values &c) const override { + return whitenedJacobian_; } }; // \KarcherMeanFactor -} // namespace gtsam +} // namespace gtsam diff --git a/gtsam/slam/OrientedPlane3Factor.h b/gtsam/slam/OrientedPlane3Factor.h index 1f56adc45..e83464b1e 100644 --- a/gtsam/slam/OrientedPlane3Factor.h +++ b/gtsam/slam/OrientedPlane3Factor.h @@ -41,13 +41,13 @@ class OrientedPlane3Factor: public NoiseModelFactor2 { } /// print - virtual void print(const std::string& s = "OrientedPlane3Factor", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s = "OrientedPlane3Factor", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /// evaluateError - virtual Vector evaluateError(const Pose3& pose, const OrientedPlane3& plane, + Vector evaluateError(const Pose3& pose, const OrientedPlane3& plane, boost::optional H1 = boost::none, boost::optional H2 = - boost::none) const { + boost::none) const override { OrientedPlane3 predicted_plane = OrientedPlane3::Transform(plane, pose, H1, H2); Vector err(3); @@ -78,14 +78,14 @@ class OrientedPlane3DirectionPrior: public NoiseModelFactor1 { } /// print - virtual void print(const std::string& s = "OrientedPlane3DirectionPrior", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s = "OrientedPlane3DirectionPrior", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /// equals - virtual bool equals(const NonlinearFactor& expected, double tol = 1e-9) const; + bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; - virtual Vector evaluateError(const OrientedPlane3& plane, - boost::optional H = boost::none) const; + Vector evaluateError(const OrientedPlane3& plane, + boost::optional H = boost::none) const override; }; } // gtsam diff --git a/gtsam/slam/PoseRotationPrior.h b/gtsam/slam/PoseRotationPrior.h index f4ce1520a..7e8bdaa46 100644 --- a/gtsam/slam/PoseRotationPrior.h +++ b/gtsam/slam/PoseRotationPrior.h @@ -50,7 +50,7 @@ class PoseRotationPrior : public NoiseModelFactor1 { virtual ~PoseRotationPrior() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -60,19 +60,19 @@ class PoseRotationPrior : public NoiseModelFactor1 { // testable /** equals specialized to this factor */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This *e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol) && measured_.equals(e->measured_, tol); } /** print contents */ - void print(const std::string& s="", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s="", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s + "PoseRotationPrior", keyFormatter); measured_.print("Measured Rotation"); } /** h(x)-z */ - Vector evaluateError(const Pose& pose, boost::optional H = boost::none) const { + Vector evaluateError(const Pose& pose, boost::optional H = boost::none) const override { const Rotation& newR = pose.rotation(); if (H) { *H = Matrix::Zero(rDim, xDim); diff --git a/gtsam/slam/PoseTranslationPrior.h b/gtsam/slam/PoseTranslationPrior.h index 516582d83..0c029c501 100644 --- a/gtsam/slam/PoseTranslationPrior.h +++ b/gtsam/slam/PoseTranslationPrior.h @@ -54,12 +54,12 @@ class PoseTranslationPrior : public NoiseModelFactor1 { const Translation& measured() const { return measured_; } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** h(x)-z */ - Vector evaluateError(const Pose& pose, boost::optional H = boost::none) const { + Vector evaluateError(const Pose& pose, boost::optional H = boost::none) const override { const Translation& newTrans = pose.translation(); const Rotation& R = pose.rotation(); const int tDim = traits::GetDimension(newTrans); @@ -74,13 +74,13 @@ class PoseTranslationPrior : public NoiseModelFactor1 { } /** equals specialized to this factor */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This *e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol) && traits::Equals(measured_, e->measured_, tol); } /** print contents */ - void print(const std::string& s="", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s="", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s + "PoseTranslationPrior", keyFormatter); traits::Print(measured_, "Measured Translation"); } diff --git a/gtsam/slam/ProjectionFactor.h b/gtsam/slam/ProjectionFactor.h index 856913aae..266037469 100644 --- a/gtsam/slam/ProjectionFactor.h +++ b/gtsam/slam/ProjectionFactor.h @@ -100,7 +100,7 @@ namespace gtsam { virtual ~GenericProjectionFactor() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -109,7 +109,7 @@ namespace gtsam { * @param s optional string naming the factor * @param keyFormatter optional formatter useful for printing Symbols */ - void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "GenericProjectionFactor, z = "; traits::Print(measured_); if(this->body_P_sensor_) @@ -118,7 +118,7 @@ namespace gtsam { } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol) @@ -129,7 +129,7 @@ namespace gtsam { /// Evaluate error h(x)-z and optionally derivatives Vector evaluateError(const Pose3& pose, const Point3& point, - boost::optional H1 = boost::none, boost::optional H2 = boost::none) const { + boost::optional H1 = boost::none, boost::optional H2 = boost::none) const override { try { if(body_P_sensor_) { if(H1) { @@ -189,7 +189,7 @@ namespace gtsam { } public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; /// traits diff --git a/gtsam/slam/ReferenceFrameFactor.h b/gtsam/slam/ReferenceFrameFactor.h index 30f761934..e41b5f908 100644 --- a/gtsam/slam/ReferenceFrameFactor.h +++ b/gtsam/slam/ReferenceFrameFactor.h @@ -89,23 +89,23 @@ class ReferenceFrameFactor : public NoiseModelFactor3 { virtual ~ReferenceFrameFactor(){} - virtual NonlinearFactor::shared_ptr clone() const { + NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( NonlinearFactor::shared_ptr(new This(*this))); } /** Combined cost and derivative function using boost::optional */ - virtual Vector evaluateError(const Point& global, const Transform& trans, const Point& local, + Vector evaluateError(const Point& global, const Transform& trans, const Point& local, boost::optional Dforeign = boost::none, boost::optional Dtrans = boost::none, - boost::optional Dlocal = boost::none) const { + boost::optional Dlocal = boost::none) const override { Point newlocal = transform_point(trans, global, Dtrans, Dforeign); if (Dlocal) *Dlocal = -1* Matrix::Identity(traits::dimension, traits::dimension); return traits::Local(local,newlocal); } - virtual void print(const std::string& s="", - const gtsam::KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s="", + const gtsam::KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << ": ReferenceFrameFactor(" << "Global: " << keyFormatter(this->key1()) << "," << " Transform: " << keyFormatter(this->key2()) << "," diff --git a/gtsam/slam/RegularImplicitSchurFactor.h b/gtsam/slam/RegularImplicitSchurFactor.h index f14df9658..71474a8ab 100644 --- a/gtsam/slam/RegularImplicitSchurFactor.h +++ b/gtsam/slam/RegularImplicitSchurFactor.h @@ -10,7 +10,11 @@ #include #include #include + #include +#include +#include +#include namespace gtsam { @@ -76,7 +80,7 @@ class RegularImplicitSchurFactor: public GaussianFactor { /// print void print(const std::string& s = "", const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const { + DefaultKeyFormatter) const override { std::cout << " RegularImplicitSchurFactor " << std::endl; Factor::print(s); for (size_t pos = 0; pos < size(); ++pos) { @@ -88,7 +92,7 @@ class RegularImplicitSchurFactor: public GaussianFactor { } /// equals - bool equals(const GaussianFactor& lf, double tol) const { + bool equals(const GaussianFactor& lf, double tol) const override { const This* f = dynamic_cast(&lf); if (!f) return false; @@ -104,37 +108,36 @@ class RegularImplicitSchurFactor: public GaussianFactor { } /// Degrees of freedom of camera - virtual DenseIndex getDim(const_iterator variable) const { + DenseIndex getDim(const_iterator variable) const override { return D; } - virtual void updateHessian(const KeyVector& keys, - SymmetricBlockMatrix* info) const { + void updateHessian(const KeyVector& keys, + SymmetricBlockMatrix* info) const override { throw std::runtime_error( "RegularImplicitSchurFactor::updateHessian non implemented"); } - virtual Matrix augmentedJacobian() const { + Matrix augmentedJacobian() const override { throw std::runtime_error( "RegularImplicitSchurFactor::augmentedJacobian non implemented"); return Matrix(); } - virtual std::pair jacobian() const { + std::pair jacobian() const override { throw std::runtime_error( "RegularImplicitSchurFactor::jacobian non implemented"); return std::make_pair(Matrix(), Vector()); } /// *Compute* full augmented information matrix - virtual Matrix augmentedInformation() const { - + Matrix augmentedInformation() const override { // Do the Schur complement - SymmetricBlockMatrix augmentedHessian = // + SymmetricBlockMatrix augmentedHessian = Set::SchurComplement(FBlocks_, E_, b_); return augmentedHessian.selfadjointView(); } /// *Compute* full information matrix - virtual Matrix information() const { + Matrix information() const override { Matrix augmented = augmentedInformation(); int m = this->keys_.size(); size_t M = D * m; @@ -145,7 +148,7 @@ class RegularImplicitSchurFactor: public GaussianFactor { using GaussianFactor::hessianDiagonal; /// Add the diagonal of the Hessian for this factor to existing VectorValues - virtual void hessianDiagonalAdd(VectorValues &d) const override { + void hessianDiagonalAdd(VectorValues &d) const override { // diag(Hessian) = diag(F' * (I - E * PointCov * E') * F); for (size_t k = 0; k < size(); ++k) { // for each camera Key j = keys_[k]; @@ -176,7 +179,7 @@ class RegularImplicitSchurFactor: public GaussianFactor { * @brief add the contribution of this factor to the diagonal of the hessian * d(output) = d(input) + deltaHessianFactor */ - virtual void hessianDiagonal(double* d) const { + void hessianDiagonal(double* d) const override { // diag(Hessian) = diag(F' * (I - E * PointCov * E') * F); // Use eigen magic to access raw memory typedef Eigen::Matrix DVector; @@ -202,7 +205,7 @@ class RegularImplicitSchurFactor: public GaussianFactor { } /// Return the block diagonal of the Hessian for this factor - virtual std::map hessianBlockDiagonal() const { + std::map hessianBlockDiagonal() const override { std::map blocks; // F'*(I - E*P*E')*F for (size_t pos = 0; pos < size(); ++pos) { @@ -227,17 +230,18 @@ class RegularImplicitSchurFactor: public GaussianFactor { return blocks; } - virtual GaussianFactor::shared_ptr clone() const { + GaussianFactor::shared_ptr clone() const override { return boost::make_shared >(keys_, FBlocks_, PointCovariance_, E_, b_); throw std::runtime_error( "RegularImplicitSchurFactor::clone non implemented"); } - virtual bool empty() const { + + bool empty() const override { return false; } - virtual GaussianFactor::shared_ptr negate() const { + GaussianFactor::shared_ptr negate() const override { return boost::make_shared >(keys_, FBlocks_, PointCovariance_, E_, b_); throw std::runtime_error( @@ -288,7 +292,7 @@ class RegularImplicitSchurFactor: public GaussianFactor { * f = nonlinear error * (x'*H*x - 2*x'*eta + f) = x'*F'*Q*F*x - 2*x'*F'*Q *b + f = x'*F'*Q*(F*x - 2*b) + f */ - virtual double error(const VectorValues& x) const { + double error(const VectorValues& x) const override { // resize does not do malloc if correct size e1.resize(size()); @@ -383,13 +387,12 @@ class RegularImplicitSchurFactor: public GaussianFactor { void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector keys) const { } - ; /** * @brief Hessian-vector multiply, i.e. y += F'*alpha*(I - E*P*E')*F*x */ void multiplyHessianAdd(double alpha, const VectorValues& x, - VectorValues& y) const { + VectorValues& y) const override { // resize does not do malloc if correct size e1.resize(size()); @@ -432,7 +435,7 @@ class RegularImplicitSchurFactor: public GaussianFactor { /** * Calculate gradient, which is -F'Q*b, see paper */ - VectorValues gradientAtZero() const { + VectorValues gradientAtZero() const override { // calculate Q*b e1.resize(size()); e2.resize(size()); @@ -454,7 +457,7 @@ class RegularImplicitSchurFactor: public GaussianFactor { /** * Calculate gradient, which is -F'Q*b, see paper - RAW MEMORY ACCESS */ - virtual void gradientAtZero(double* d) const { + void gradientAtZero(double* d) const override { // Use eigen magic to access raw memory typedef Eigen::Matrix DVector; @@ -474,7 +477,7 @@ class RegularImplicitSchurFactor: public GaussianFactor { } /// Gradient wrt a key at any values - Vector gradient(Key key, const VectorValues& x) const { + Vector gradient(Key key, const VectorValues& x) const override { throw std::runtime_error( "gradient for RegularImplicitSchurFactor is not implemented yet"); } diff --git a/gtsam/slam/RotateFactor.h b/gtsam/slam/RotateFactor.h index b6ccc36a2..9e4091111 100644 --- a/gtsam/slam/RotateFactor.h +++ b/gtsam/slam/RotateFactor.h @@ -36,13 +36,13 @@ class RotateFactor: public NoiseModelFactor1 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /// print - virtual void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s); std::cout << "RotateFactor:]\n"; std::cout << "p: " << p_.transpose() << std::endl; @@ -51,7 +51,7 @@ class RotateFactor: public NoiseModelFactor1 { /// vector of errors returns 2D vector Vector evaluateError(const Rot3& R, - boost::optional H = boost::none) const { + boost::optional H = boost::none) const override { // predict p_ as q = R*z_, derivative H will be filled if not none Point3 q = R.rotate(z_,H); // error is just difference, and note derivative of that wrpt q is I3 @@ -88,13 +88,13 @@ class RotateDirectionsFactor: public NoiseModelFactor1 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /// print - virtual void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s); std::cout << "RotateDirectionsFactor:" << std::endl; i_p_.print("p"); @@ -102,7 +102,7 @@ class RotateDirectionsFactor: public NoiseModelFactor1 { } /// vector of errors returns 2D vector - Vector evaluateError(const Rot3& iRc, boost::optional H = boost::none) const { + Vector evaluateError(const Rot3& iRc, boost::optional H = boost::none) const override { Unit3 i_q = iRc * c_z_; Vector error = i_p_.error(i_q, H); if (H) { @@ -113,7 +113,7 @@ class RotateDirectionsFactor: public NoiseModelFactor1 { return error; } - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW }; } // namespace gtsam diff --git a/gtsam/slam/SmartFactorBase.h b/gtsam/slam/SmartFactorBase.h index 34f9b9e9f..d9f2b9c3d 100644 --- a/gtsam/slam/SmartFactorBase.h +++ b/gtsam/slam/SmartFactorBase.h @@ -81,7 +81,7 @@ class SmartFactorBase: public NonlinearFactor { mutable FBlocks Fs; public: - EIGEN_MAKE_ALIGNED_OPERATOR_NEW + GTSAM_MAKE_ALIGNED_OPERATOR_NEW /// shorthand for a smart pointer to a factor typedef boost::shared_ptr shared_ptr; @@ -150,7 +150,7 @@ class SmartFactorBase: public NonlinearFactor { } /// get the dimension (number of rows!) of the factor - virtual size_t dim() const { + size_t dim() const override { return ZDim * this->measured_.size(); } @@ -173,7 +173,7 @@ class SmartFactorBase: public NonlinearFactor { * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "", const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const { + DefaultKeyFormatter) const override { std::cout << s << "SmartFactorBase, z = \n"; for (size_t k = 0; k < measured_.size(); ++k) { std::cout << "measurement, p = " << measured_[k] << "\t"; @@ -185,7 +185,7 @@ class SmartFactorBase: public NonlinearFactor { } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); bool areMeasurementsEqual = true; @@ -207,10 +207,18 @@ class SmartFactorBase: public NonlinearFactor { Vector ue = cameras.reprojectionError(point, measured_, Fs, E); if (body_P_sensor_ && Fs) { const Pose3 sensor_P_body = body_P_sensor_->inverse(); + constexpr int camera_dim = traits::dimension; + constexpr int pose_dim = traits::dimension; + for (size_t i = 0; i < Fs->size(); i++) { - const Pose3 w_Pose_body = cameras[i].pose() * sensor_P_body; - Matrix J(6, 6); - const Pose3 world_P_body = w_Pose_body.compose(*body_P_sensor_, J); + const Pose3 world_P_body = cameras[i].pose() * sensor_P_body; + Eigen::Matrix J; + J.setZero(); + Eigen::Matrix H; + // Call compose to compute Jacobian for camera extrinsics + world_P_body.compose(*body_P_sensor_, H); + // Assign extrinsics part of the Jacobian + J.template block(0, 0) = H; Fs->at(i) = Fs->at(i) * J; } } diff --git a/gtsam/slam/SmartProjectionFactor.h b/gtsam/slam/SmartProjectionFactor.h index 15d632cda..bc01f5d85 100644 --- a/gtsam/slam/SmartProjectionFactor.h +++ b/gtsam/slam/SmartProjectionFactor.h @@ -99,7 +99,7 @@ class SmartProjectionFactor: public SmartFactorBase { * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "", const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const { + DefaultKeyFormatter) const override { std::cout << s << "SmartProjectionFactor\n"; std::cout << "linearizationMode:\n" << params_.linearizationMode << std::endl; @@ -110,7 +110,7 @@ class SmartProjectionFactor: public SmartFactorBase { } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && params_.linearizationMode == e->params_.linearizationMode && Base::equals(p, tol); @@ -305,8 +305,8 @@ class SmartProjectionFactor: public SmartFactorBase { } /// linearize - virtual boost::shared_ptr linearize( - const Values& values) const { + boost::shared_ptr linearize( + const Values& values) const override { return linearizeDamped(values); } @@ -409,7 +409,7 @@ class SmartProjectionFactor: public SmartFactorBase { } /// Calculate total reprojection error - virtual double error(const Values& values) const { + double error(const Values& values) const override { if (this->active(values)) { return totalReprojectionError(Base::cameras(values)); } else { // else of active flag @@ -443,25 +443,6 @@ class SmartProjectionFactor: public SmartFactorBase { /** return the farPoint state */ bool isFarPoint() const { return result_.farPoint(); } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - /// @name Deprecated - /// @{ - // It does not make sense to optimize for a camera where the pose would not be - // the actual pose of the camera. An unfortunate consequence of deprecating - // this constructor means that we cannot optimize for calibration when the - // camera is offset from the body pose. That would need a new factor with - // (body) pose and calibration as variables. However, that use case is - // unlikely: when a global offset is know, calibration is typically known. - SmartProjectionFactor( - const SharedNoiseModel& sharedNoiseModel, - const boost::optional body_P_sensor, - const SmartProjectionParams& params = SmartProjectionParams()) - : Base(sharedNoiseModel, body_P_sensor), - params_(params), - result_(TriangulationResult::Degenerate()) {} - /// @} -#endif - private: /// Serialization function diff --git a/gtsam/slam/SmartProjectionPoseFactor.h b/gtsam/slam/SmartProjectionPoseFactor.h index b5be46258..cccdf20d6 100644 --- a/gtsam/slam/SmartProjectionPoseFactor.h +++ b/gtsam/slam/SmartProjectionPoseFactor.h @@ -103,13 +103,13 @@ class SmartProjectionPoseFactor: public SmartProjectionFactor< * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "", const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const { + DefaultKeyFormatter) const override { std::cout << s << "SmartProjectionPoseFactor, z = \n "; Base::print("", keyFormatter); } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol); } @@ -117,7 +117,7 @@ class SmartProjectionPoseFactor: public SmartProjectionFactor< /** * error calculates the error of the factor. */ - virtual double error(const Values& values) const { + double error(const Values& values) const override { if (this->active(values)) { return this->totalReprojectionError(cameras(values)); } else { // else of active flag @@ -136,7 +136,7 @@ class SmartProjectionPoseFactor: public SmartProjectionFactor< * to keys involved in this factor * @return vector of Values */ - typename Base::Cameras cameras(const Values& values) const { + typename Base::Cameras cameras(const Values& values) const override { typename Base::Cameras cameras; for (const Key& k : this->keys_) { const Pose3 world_P_sensor_k = diff --git a/gtsam/slam/StereoFactor.h b/gtsam/slam/StereoFactor.h index 8828f5de7..6556723bd 100644 --- a/gtsam/slam/StereoFactor.h +++ b/gtsam/slam/StereoFactor.h @@ -91,7 +91,7 @@ class GenericStereoFactor: public NoiseModelFactor2 { virtual ~GenericStereoFactor() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -100,7 +100,7 @@ class GenericStereoFactor: public NoiseModelFactor2 { * @param s optional string naming the factor * @param keyFormatter optional formatter useful for printing Symbols */ - void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s, keyFormatter); measured_.print(s + ".z"); if(this->body_P_sensor_) @@ -110,7 +110,7 @@ class GenericStereoFactor: public NoiseModelFactor2 { /** * equals */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { + bool equals(const NonlinearFactor& f, double tol = 1e-9) const override { const GenericStereoFactor* e = dynamic_cast (&f); return e && Base::equals(f) @@ -120,7 +120,7 @@ class GenericStereoFactor: public NoiseModelFactor2 { /** h(x)-z */ Vector evaluateError(const Pose3& pose, const Point3& point, - boost::optional H1 = boost::none, boost::optional H2 = boost::none) const { + boost::optional H1 = boost::none, boost::optional H2 = boost::none) const override { try { if(body_P_sensor_) { if(H1) { diff --git a/gtsam/slam/TriangulationFactor.h b/gtsam/slam/TriangulationFactor.h index 811d92fbc..9d02ad321 100644 --- a/gtsam/slam/TriangulationFactor.h +++ b/gtsam/slam/TriangulationFactor.h @@ -90,7 +90,7 @@ class TriangulationFactor: public NoiseModelFactor1 { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -101,7 +101,7 @@ class TriangulationFactor: public NoiseModelFactor1 { * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "", const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const { + DefaultKeyFormatter) const override { std::cout << s << "TriangulationFactor,"; camera_.print("camera"); traits::Print(measured_, "z"); @@ -109,7 +109,7 @@ class TriangulationFactor: public NoiseModelFactor1 { } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol) && this->camera_.equals(e->camera_, tol) && traits::Equals(this->measured_, e->measured_, tol); @@ -117,7 +117,7 @@ class TriangulationFactor: public NoiseModelFactor1 { /// Evaluate error h(x)-z and optionally derivatives Vector evaluateError(const Point3& point, boost::optional H2 = - boost::none) const { + boost::none) const override { try { return traits::Local(measured_, camera_.project2(point, boost::none, H2)); } catch (CheiralityException& e) { @@ -143,7 +143,7 @@ class TriangulationFactor: public NoiseModelFactor1 { * \f$ Ax-b \approx h(x+\delta x)-z = h(x) + A \delta x - z \f$ * Hence \f$ b = z - h(x) = - \mathtt{error\_vector}(x) \f$ */ - boost::shared_ptr linearize(const Values& x) const { + boost::shared_ptr linearize(const Values& x) const override { // Only linearize if the factor is active if (!this->active(x)) return boost::shared_ptr(); diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 052bb3343..c8a8b15c5 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -11,32 +11,40 @@ /** * @file dataset.cpp * @date Jan 22, 2010 - * @author Kai Ni, Luca Carlone, Frank Dellaert + * @author Kai Ni + * @author Luca Carlone + * @author Frank Dellaert + * @author Varun Agrawal * @brief utility functions for loading datasets */ #include #include #include + #include #include #include -#include -#include + #include -#include #include + #include + +#include +#include + #include #include #include -#include #include #include +#include #include #include #include +#include #include #include @@ -45,14 +53,16 @@ using namespace std; namespace fs = boost::filesystem; -using namespace gtsam::symbol_shorthand; +using gtsam::symbol_shorthand::L; +using gtsam::symbol_shorthand::P; +using gtsam::symbol_shorthand::X; #define LINESIZE 81920 namespace gtsam { /* ************************************************************************* */ -string findExampleDataFile(const string& name) { +string findExampleDataFile(const string &name) { // Search source tree and installed location vector rootsToSearch; @@ -67,10 +77,11 @@ string findExampleDataFile(const string& name) { namesToSearch.push_back(name + ".txt"); namesToSearch.push_back(name + ".out"); namesToSearch.push_back(name + ".xml"); + namesToSearch.push_back(name + ".g2o"); // Find first name that exists - for(const fs::path& root: rootsToSearch) { - for(const fs::path& name: namesToSearch) { + for (const fs::path root : rootsToSearch) { + for (const fs::path name : namesToSearch) { if (fs::is_regular_file(root / name)) return (root / name).string(); } @@ -78,79 +89,167 @@ string findExampleDataFile(const string& name) { // If we did not return already, then we did not find the file throw invalid_argument( - "gtsam::findExampleDataFile could not find a matching file in\n" - GTSAM_SOURCE_TREE_DATASET_DIR " or\n" - GTSAM_INSTALLED_DATASET_DIR " named\n" + name + ", " + name - + ".graph, or " + name + ".txt"); + "gtsam::findExampleDataFile could not find a matching " + "file in\n" GTSAM_SOURCE_TREE_DATASET_DIR + " or\n" GTSAM_INSTALLED_DATASET_DIR " named\n" + + name + ", " + name + ".g2o, " + ", " + name + ".graph, or " + name + + ".txt"); } /* ************************************************************************* */ -string createRewrittenFileName(const string& name) { +string createRewrittenFileName(const string &name) { // Search source tree and installed location if (!exists(fs::path(name))) { throw invalid_argument( - "gtsam::createRewrittenFileName could not find a matching file in\n" - + name); + "gtsam::createRewrittenFileName could not find a matching file in\n" + + name); } fs::path p(name); - fs::path newpath = fs::path(p.parent_path().string()) - / fs::path(p.stem().string() + "-rewritten.txt"); + fs::path newpath = fs::path(p.parent_path().string()) / + fs::path(p.stem().string() + "-rewritten.txt"); return newpath.string(); } /* ************************************************************************* */ -GraphAndValues load2D(pair dataset, int maxID, - bool addNoise, bool smart, NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { - return load2D(dataset.first, dataset.second, maxID, addNoise, smart, - noiseFormat, kernelFunctionType); +// Type for parser functions used in parseLines below. +template +using Parser = + std::function(istream &is, const string &tag)>; + +// Parse a file by calling the parse(is, tag) function for every line. +// The result of calling the function is ignored, so typically parse function +// works via a side effect, like adding a factor into a graph etc. +template +static void parseLines(const string &filename, Parser parse) { + ifstream is(filename.c_str()); + if (!is) + throw invalid_argument("parse: can not find file " + filename); + string tag; + while (is >> tag) { + parse(is, tag); // ignore return value + is.ignore(LINESIZE, '\n'); + } +} + +/* ************************************************************************* */ +// Parse types T into a size_t-indexed map +template +map parseToMap(const string &filename, Parser> parse, + size_t maxIndex) { + map result; + Parser> emplace = [&](istream &is, const string &tag) { + if (auto t = parse(is, tag)) { + if (!maxIndex || t->first <= maxIndex) + result.emplace(*t); + } + return boost::none; + }; + parseLines(filename, emplace); + return result; } /* ************************************************************************* */ -// Read noise parameters and interpret them according to flags -static SharedNoiseModel readNoiseModel(ifstream& is, bool smart, - NoiseFormat noiseFormat, KernelFunctionType kernelFunctionType) { - double v1, v2, v3, v4, v5, v6; - is >> v1 >> v2 >> v3 >> v4 >> v5 >> v6; +// Parse a file and push results on a vector +template +static vector parseToVector(const string &filename, Parser parse) { + vector result; + Parser add = [&result, parse](istream &is, const string &tag) { + if (auto t = parse(is, tag)) + result.push_back(*t); + return boost::none; + }; + parseLines(filename, add); + return result; +} + +/* ************************************************************************* */ +boost::optional parseVertexPose(istream &is, const string &tag) { + if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { + size_t id; + double x, y, yaw; + if (!(is >> id >> x >> y >> yaw)) { + throw std::runtime_error("parseVertexPose encountered malformed line"); + } + return IndexedPose(id, Pose2(x, y, yaw)); + } else { + return boost::none; + } +} + +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPose, maxIndex); +} + +/* ************************************************************************* */ +boost::optional parseVertexLandmark(istream &is, + const string &tag) { + if (tag == "VERTEX_XY") { + size_t id; + double x, y; + if (!(is >> id >> x >> y)) { + throw std::runtime_error( + "parseVertexLandmark encountered malformed line"); + } + return IndexedLandmark(id, Point2(x, y)); + } else { + return boost::none; + } +} + +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexLandmark, maxIndex); +} +/* ************************************************************************* */ +// Interpret noise parameters according to flags +static SharedNoiseModel +createNoiseModel(const Vector6 v, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { if (noiseFormat == NoiseFormatAUTO) { // Try to guess covariance matrix layout - if (v1 != 0.0 && v2 == 0.0 && v3 != 0.0 && v4 != 0.0 && v5 == 0.0 - && v6 == 0.0) { - // NoiseFormatGRAPH + if (v(0) != 0.0 && v(1) == 0.0 && v(2) != 0.0 && // + v(3) != 0.0 && v(4) == 0.0 && v(5) == 0.0) { noiseFormat = NoiseFormatGRAPH; - } else if (v1 != 0.0 && v2 == 0.0 && v3 == 0.0 && v4 != 0.0 && v5 == 0.0 - && v6 != 0.0) { - // NoiseFormatCOV + } else if (v(0) != 0.0 && v(1) == 0.0 && v(2) == 0.0 && // + v(3) != 0.0 && v(4) == 0.0 && v(5) != 0.0) { noiseFormat = NoiseFormatCOV; } else { throw std::invalid_argument( - "load2D: unrecognized covariance matrix format in dataset file. Please specify the noise format."); + "load2D: unrecognized covariance matrix format in dataset file. " + "Please specify the noise format."); } } // Read matrix and check that diagonal entries are non-zero - Matrix M(3, 3); + Matrix3 M; switch (noiseFormat) { case NoiseFormatG2O: case NoiseFormatCOV: - // i.e., [ v1 v2 v3; v2' v4 v5; v3' v5' v6 ] - if (v1 == 0.0 || v4 == 0.0 || v6 == 0.0) + // i.e., [v(0) v(1) v(2); + // v(1)' v(3) v(4); + // v(2)' v(4)' v(5) ] + if (v(0) == 0.0 || v(3) == 0.0 || v(5) == 0.0) throw runtime_error( "load2D::readNoiseModel looks like this is not G2O matrix order"); - M << v1, v2, v3, v2, v4, v5, v3, v5, v6; + M << v(0), v(1), v(2), v(1), v(3), v(4), v(2), v(4), v(5); break; case NoiseFormatTORO: case NoiseFormatGRAPH: // http://www.openslam.org/toro.html // inf_ff inf_fs inf_ss inf_rr inf_fr inf_sr - // i.e., [ v1 v2 v5; v2' v3 v6; v5' v6' v4 ] - if (v1 == 0.0 || v3 == 0.0 || v4 == 0.0) + // i.e., [v(0) v(1) v(4); + // v(1)' v(2) v(5); + // v(4)' v(5)' v(3) ] + if (v(0) == 0.0 || v(2) == 0.0 || v(3) == 0.0) throw invalid_argument( "load2D::readNoiseModel looks like this is not TORO matrix order"); - M << v1, v2, v5, v2, v3, v6, v5, v6, v4; + M << v(0), v(1), v(4), v(1), v(2), v(5), v(4), v(5), v(3); break; default: throw runtime_error("load2D: invalid noise format"); @@ -192,315 +291,444 @@ static SharedNoiseModel readNoiseModel(ifstream& is, bool smart, } /* ************************************************************************* */ -boost::optional parseVertex(istream& is, const string& tag) { - if ((tag == "VERTEX2") || (tag == "VERTEX_SE2") || (tag == "VERTEX")) { - Key id; +boost::optional parseEdge(istream &is, const string &tag) { + if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") || + (tag == "ODOMETRY")) { + + size_t id1, id2; double x, y, yaw; - is >> id >> x >> y >> yaw; - return IndexedPose(id, Pose2(x, y, yaw)); + if (!(is >> id1 >> id2 >> x >> y >> yaw)) { + throw std::runtime_error("parseEdge encountered malformed line"); + } + return IndexedEdge({id1, id2}, Pose2(x, y, yaw)); } else { return boost::none; } } /* ************************************************************************* */ -boost::optional parseEdge(istream& is, const string& tag) { - if ((tag == "EDGE2") || (tag == "EDGE") || (tag == "EDGE_SE2") - || (tag == "ODOMETRY")) { +// Measurement parsers are implemented as a functor, as they have several +// options to filter and create the measurement model. +template struct ParseMeasurement; - Key id1, id2; - double x, y, yaw; - is >> id1 >> id2 >> x >> y >> yaw; - return IndexedEdge(pair(id1, id2), Pose2(x, y, yaw)); - } else { - return boost::none; +/* ************************************************************************* */ +// Converting from Measurement to BetweenFactor is generic +template struct ParseFactor : ParseMeasurement { + ParseFactor(const ParseMeasurement &parent) + : ParseMeasurement(parent) {} + + // We parse a measurement then convert + typename boost::optional::shared_ptr> + operator()(istream &is, const string &tag) { + if (auto m = ParseMeasurement::operator()(is, tag)) + return boost::make_shared>( + m->key1(), m->key2(), m->measured(), m->noiseModel()); + else + return boost::none; } -} +}; /* ************************************************************************* */ -GraphAndValues load2D(const string& filename, SharedNoiseModel model, Key maxID, - bool addNoise, bool smart, NoiseFormat noiseFormat, - KernelFunctionType kernelFunctionType) { - - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("load2D: can not find file " + filename); - - Values::shared_ptr initial(new Values); - NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); - - string tag; - - // load the poses - while (!is.eof()) { - if (!(is >> tag)) - break; - - const auto indexed_pose = parseVertex(is, tag); - if (indexed_pose) { - Key id = indexed_pose->first; - - // optional filter - if (maxID && id >= maxID) - continue; +// Pose2 measurement parser +template <> struct ParseMeasurement { + // The arguments + boost::shared_ptr sampler; + size_t maxIndex; + + // For noise model creation + bool smart; + NoiseFormat noiseFormat; + KernelFunctionType kernelFunctionType; + + // If this is not null, will use instead of parsed model: + SharedNoiseModel model; - initial->insert(id, indexed_pose->second); - } - is.ignore(LINESIZE, '\n'); + // The actual parser + boost::optional> operator()(istream &is, + const string &tag) { + auto edge = parseEdge(is, tag); + if (!edge) + return boost::none; + + // parse noise model + Vector6 v; + is >> v(0) >> v(1) >> v(2) >> v(3) >> v(4) >> v(5); + + // optional filter + size_t id1, id2; + tie(id1, id2) = edge->first; + if (maxIndex && (id1 > maxIndex || id2 > maxIndex)) + return boost::none; + + // Get pose and optionally add noise + Pose2 &pose = edge->second; + if (sampler) + pose = pose.retract(sampler->sample()); + + // emplace measurement + auto modelFromFile = + createNoiseModel(v, smart, noiseFormat, kernelFunctionType); + return BinaryMeasurement(id1, id2, pose, + model ? model : modelFromFile); } - is.clear(); /* clears the end-of-file and error flags */ - is.seekg(0, ios::beg); +}; - // If asked, create a sampler with random number generator - std::unique_ptr sampler; - if (addNoise) { - noiseModel::Diagonal::shared_ptr noise; - if (model) - noise = boost::dynamic_pointer_cast(model); - if (!noise) - throw invalid_argument( - "gtsam::load2D: invalid noise model for adding noise" - "(current version assumes diagonal noise model)!"); - sampler.reset(new Sampler(noise)); - } +/* ************************************************************************* */ +// Create a sampler to corrupt a measurement +boost::shared_ptr createSampler(const SharedNoiseModel &model) { + auto noise = boost::dynamic_pointer_cast(model); + if (!noise) + throw invalid_argument("gtsam::load: invalid noise model for adding noise" + "(current version assumes diagonal noise model)!"); + return boost::shared_ptr(new Sampler(noise)); +} + +/* ************************************************************************* */ +// Implementation of parseMeasurements for Pose2 +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseMeasurement parse{model ? createSampler(model) : nullptr, + maxIndex, true, NoiseFormatAUTO, + KernelFunctionTypeNONE}; + return parseToVector>(filename, parse); +} - // Parse the pose constraints - Key id1, id2; - bool haveLandmark = false; - const bool useModelInFile = !model; - while (!is.eof()) { - if (!(is >> tag)) - break; +/* ************************************************************************* */ +// Implementation of parseMeasurements for Rot2, which converts from Pose2 + +// Extract Rot2 measurement from Pose2 measurement +static BinaryMeasurement convert(const BinaryMeasurement &p) { + auto gaussian = + boost::dynamic_pointer_cast(p.noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose2 measurements " + "with Gaussian noise models."); + const Matrix3 M = gaussian->covariance(); + return BinaryMeasurement(p.key1(), p.key2(), p.measured().rotation(), + noiseModel::Isotropic::Variance(1, M(2, 2))); +} - auto between_pose = parseEdge(is, tag); - if (between_pose) { - std::tie(id1, id2) = between_pose->first; - Pose2& l1Xl2 = between_pose->second; +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + auto poses = parseMeasurements(filename, model, maxIndex); + std::vector> result; + result.reserve(poses.size()); + for (const auto &p : poses) + result.push_back(convert(p)); + return result; +} - // read noise model - SharedNoiseModel modelInFile = readNoiseModel(is, smart, noiseFormat, - kernelFunctionType); +/* ************************************************************************* */ +// Implementation of parseFactors for Pose2 +template <> +std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseFactor parse({model ? createSampler(model) : nullptr, maxIndex, + true, NoiseFormatAUTO, KernelFunctionTypeNONE, + nullptr}); + return parseToVector::shared_ptr>(filename, parse); +} - // optional filter - if (maxID && (id1 >= maxID || id2 >= maxID)) - continue; +/* ************************************************************************* */ +using BearingRange2D = BearingRange; - if (useModelInFile) - model = modelInFile; +template <> struct ParseMeasurement { + // arguments + size_t maxIndex; - if (addNoise) - l1Xl2 = l1Xl2.retract(sampler->sample()); + // The actual parser + boost::optional> + operator()(istream &is, const string &tag) { + if (tag != "BR" && tag != "LANDMARK") + return boost::none; - // Insert vertices if pure odometry file - if (!initial->exists(id1)) - initial->insert(id1, Pose2()); - if (!initial->exists(id2)) - initial->insert(id2, initial->at(id1) * l1Xl2); - - NonlinearFactor::shared_ptr factor( - new BetweenFactor(id1, id2, l1Xl2, model)); - graph->push_back(factor); - } - // Parse measurements + size_t id1, id2; + is >> id1 >> id2; double bearing, range, bearing_std, range_std; - // A bearing-range measurement if (tag == "BR") { - is >> id1 >> id2 >> bearing >> range >> bearing_std >> range_std; + is >> bearing >> range >> bearing_std >> range_std; } - // A landmark measurement, TODO Frank says: don't know why is converted to bearing-range + // A landmark measurement, converted to bearing-range if (tag == "LANDMARK") { double lmx, lmy; double v1, v2, v3; - is >> id1 >> id2 >> lmx >> lmy >> v1 >> v2 >> v3; + is >> lmx >> lmy >> v1 >> v2 >> v3; // Convert x,y to bearing,range bearing = atan2(lmy, lmx); range = sqrt(lmx * lmx + lmy * lmy); - // In our experience, the x-y covariance on landmark sightings is not very good, so assume - // it describes the uncertainty at a range of 10m, and convert that to bearing/range uncertainty. + // In our experience, the x-y covariance on landmark sightings is not + // very good, so assume it describes the uncertainty at a range of 10m, + // and convert that to bearing/range uncertainty. if (std::abs(v1 - v3) < 1e-4) { bearing_std = sqrt(v1 / 10.0); range_std = sqrt(v1); } else { + // TODO(frank): we are ignoring the non-uniform covariance bearing_std = 1; range_std = 1; - if (!haveLandmark) { - cout - << "Warning: load2D is a very simple dataset loader and is ignoring the\n" - "non-uniform covariance on LANDMARK measurements in this file." - << endl; - haveLandmark = true; - } } } - // Do some common stuff for bearing-range measurements - if (tag == "LANDMARK" || tag == "BR") { + // optional filter + if (maxIndex && id1 > maxIndex) + return boost::none; - // optional filter - if (maxID && id1 >= maxID) - continue; + // Create noise model + auto measurementNoise = noiseModel::Diagonal::Sigmas( + (Vector(2) << bearing_std, range_std).finished()); - // Create noise model - noiseModel::Diagonal::shared_ptr measurementNoise = - noiseModel::Diagonal::Sigmas((Vector(2) << bearing_std, range_std).finished()); + return BinaryMeasurement( + id1, L(id2), BearingRange2D(bearing, range), measurementNoise); + } +}; + +/* ************************************************************************* */ +GraphAndValues load2D(const string &filename, SharedNoiseModel model, + size_t maxIndex, bool addNoise, bool smart, + NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { + + // Single pass for poses and landmarks. + auto initial = boost::make_shared(); + Parser insert = [maxIndex, &initial](istream &is, const string &tag) { + if (auto indexedPose = parseVertexPose(is, tag)) { + if (!maxIndex || indexedPose->first <= maxIndex) + initial->insert(indexedPose->first, indexedPose->second); + } else if (auto indexedLandmark = parseVertexLandmark(is, tag)) { + if (!maxIndex || indexedLandmark->first <= maxIndex) + initial->insert(L(indexedLandmark->first), indexedLandmark->second); + } + return 0; + }; + parseLines(filename, insert); + + // Single pass for Pose2 and bearing-range factors. + auto graph = boost::make_shared(); + + // Instantiate factor parser + ParseFactor parseBetweenFactor( + {addNoise ? createSampler(model) : nullptr, maxIndex, smart, noiseFormat, + kernelFunctionType, model}); - // Add to graph - *graph += BearingRangeFactor(id1, L(id2), bearing, range, - measurementNoise); + // Instantiate bearing-range parser + ParseMeasurement parseBearingRange{maxIndex}; + + // Combine in a single parser that adds factors to `graph`, but also inserts + // new variables into `initial` when needed. + Parser parse = [&](istream &is, const string &tag) { + if (auto f = parseBetweenFactor(is, tag)) { + graph->push_back(*f); + + // Insert vertices if pure odometry file + Key key1 = (*f)->key1(), key2 = (*f)->key2(); + if (!initial->exists(key1)) + initial->insert(key1, Pose2()); + if (!initial->exists(key2)) + initial->insert(key2, initial->at(key1) * (*f)->measured()); + } else if (auto m = parseBearingRange(is, tag)) { + Key key1 = m->key1(), key2 = m->key2(); + BearingRange2D br = m->measured(); + graph->emplace_shared>(key1, key2, br, + m->noiseModel()); // Insert poses or points if they do not exist yet - if (!initial->exists(id1)) - initial->insert(id1, Pose2()); - if (!initial->exists(L(id2))) { - Pose2 pose = initial->at(id1); - Point2 local(cos(bearing) * range, sin(bearing) * range); + if (!initial->exists(key1)) + initial->insert(key1, Pose2()); + if (!initial->exists(key2)) { + Pose2 pose = initial->at(key1); + Point2 local = br.bearing() * Point2(br.range(), 0); Point2 global = pose.transformFrom(local); - initial->insert(L(id2), global); + initial->insert(key2, global); } } - is.ignore(LINESIZE, '\n'); - } + return 0; + }; + + parseLines(filename, parse); return make_pair(graph, initial); } /* ************************************************************************* */ -GraphAndValues load2D_robust(const string& filename, - noiseModel::Base::shared_ptr& model, int maxID) { - return load2D(filename, model, maxID); +GraphAndValues load2D(pair dataset, size_t maxIndex, + bool addNoise, bool smart, NoiseFormat noiseFormat, + KernelFunctionType kernelFunctionType) { + return load2D(dataset.first, dataset.second, maxIndex, addNoise, smart, + noiseFormat, kernelFunctionType); } /* ************************************************************************* */ -void save2D(const NonlinearFactorGraph& graph, const Values& config, - const noiseModel::Diagonal::shared_ptr model, const string& filename) { +GraphAndValues load2D_robust(const string &filename, + const noiseModel::Base::shared_ptr &model, + size_t maxIndex) { + return load2D(filename, model, maxIndex); +} + +/* ************************************************************************* */ +void save2D(const NonlinearFactorGraph &graph, const Values &config, + const noiseModel::Diagonal::shared_ptr model, + const string &filename) { fstream stream(filename.c_str(), fstream::out); // save poses - - for(const Values::ConstKeyValuePair& key_value: config) { - const Pose2& pose = key_value.value.cast(); + for (const auto key_value : config) { + const Pose2 &pose = key_value.value.cast(); stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y() - << " " << pose.theta() << endl; + << " " << pose.theta() << endl; } // save edges - Matrix R = model->R(); - Matrix RR = trans(R) * R; //prod(trans(R),R); - for(boost::shared_ptr factor_: graph) { - boost::shared_ptr > factor = - boost::dynamic_pointer_cast >(factor_); + // TODO(frank): why don't we use model in factor? + Matrix3 R = model->R(); + Matrix3 RR = R.transpose() * R; + for (auto f : graph) { + auto factor = boost::dynamic_pointer_cast>(f); if (!factor) continue; - Pose2 pose = factor->measured().inverse(); + const Pose2 pose = factor->measured().inverse(); stream << "EDGE2 " << factor->key2() << " " << factor->key1() << " " - << pose.x() << " " << pose.y() << " " << pose.theta() << " " << RR(0, 0) - << " " << RR(0, 1) << " " << RR(1, 1) << " " << RR(2, 2) << " " - << RR(0, 2) << " " << RR(1, 2) << endl; + << pose.x() << " " << pose.y() << " " << pose.theta() << " " + << RR(0, 0) << " " << RR(0, 1) << " " << RR(1, 1) << " " << RR(2, 2) + << " " << RR(0, 2) << " " << RR(1, 2) << endl; } stream.close(); } /* ************************************************************************* */ -GraphAndValues readG2o(const string& g2oFile, const bool is3D, +GraphAndValues readG2o(const string &g2oFile, const bool is3D, KernelFunctionType kernelFunctionType) { if (is3D) { return load3D(g2oFile); } else { // just call load2D - int maxID = 0; + size_t maxIndex = 0; bool addNoise = false; bool smart = true; - return load2D(g2oFile, SharedNoiseModel(), maxID, addNoise, smart, + return load2D(g2oFile, SharedNoiseModel(), maxIndex, addNoise, smart, NoiseFormatG2O, kernelFunctionType); } } /* ************************************************************************* */ -void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, - const string& filename) { +void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, + const string &filename) { fstream stream(filename.c_str(), fstream::out); - // save 2D & 3D poses - for (const auto& key_value : estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Pose2& pose = p->value(); - stream << "VERTEX_SE2 " << key_value.key << " " << pose.x() << " " - << pose.y() << " " << pose.theta() << endl; + // Use a lambda here to more easily modify behavior in future. + auto index = [](gtsam::Key key) { return Symbol(key).index(); }; + + // save 2D poses + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Pose2 &pose = p->value(); + stream << "VERTEX_SE2 " << index(key_value.key) << " " << pose.x() << " " + << pose.y() << " " << pose.theta() << endl; } - for(const auto& key_value: estimate) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - const Pose3& pose = p->value(); - Point3 t = pose.translation(); - Rot3 R = pose.rotation(); - stream << "VERTEX_SE3:QUAT " << key_value.key << " " << t.x() << " " << t.y() << " " << t.z() - << " " << R.toQuaternion().x() << " " << R.toQuaternion().y() << " " << R.toQuaternion().z() - << " " << R.toQuaternion().w() << endl; + // save 3D poses + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Pose3 &pose = p->value(); + const Point3 t = pose.translation(); + const auto q = pose.rotation().toQuaternion(); + stream << "VERTEX_SE3:QUAT " << index(key_value.key) << " " << t.x() << " " + << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " + << q.z() << " " << q.w() << endl; + } + + // save 2D landmarks + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Point2 &point = p->value(); + stream << "VERTEX_XY " << index(key_value.key) << " " << point.x() << " " + << point.y() << endl; + } + + // save 3D landmarks + for (const auto key_value : estimate) { + auto p = dynamic_cast *>(&key_value.value); + if (!p) + continue; + const Point3 &point = p->value(); + stream << "VERTEX_TRACKXYZ " << index(key_value.key) << " " << point.x() + << " " << point.y() << " " << point.z() << endl; } // save edges (2D or 3D) - for(const auto& factor_: graph) { - boost::shared_ptr > factor = - boost::dynamic_pointer_cast >(factor_); - if (factor){ + for (const auto &factor_ : graph) { + auto factor = boost::dynamic_pointer_cast>(factor_); + if (factor) { SharedNoiseModel model = factor->noiseModel(); - boost::shared_ptr gaussianModel = + auto gaussianModel = boost::dynamic_pointer_cast(model); - if (!gaussianModel){ + if (!gaussianModel) { model->print("model\n"); throw invalid_argument("writeG2o: invalid noise model!"); } - Matrix Info = gaussianModel->R().transpose() * gaussianModel->R(); + Matrix3 Info = gaussianModel->R().transpose() * gaussianModel->R(); Pose2 pose = factor->measured(); //.inverse(); - stream << "EDGE_SE2 " << factor->key1() << " " << factor->key2() << " " - << pose.x() << " " << pose.y() << " " << pose.theta(); - for (int i = 0; i < 3; i++){ - for (int j = i; j < 3; j++){ + stream << "EDGE_SE2 " << index(factor->key1()) << " " + << index(factor->key2()) << " " << pose.x() << " " << pose.y() + << " " << pose.theta(); + for (size_t i = 0; i < 3; i++) { + for (size_t j = i; j < 3; j++) { stream << " " << Info(i, j); } } stream << endl; } - boost::shared_ptr< BetweenFactor > factor3D = - boost::dynamic_pointer_cast< BetweenFactor >(factor_); + auto factor3D = boost::dynamic_pointer_cast>(factor_); - if (factor3D){ + if (factor3D) { SharedNoiseModel model = factor3D->noiseModel(); boost::shared_ptr gaussianModel = boost::dynamic_pointer_cast(model); - if (!gaussianModel){ + if (!gaussianModel) { model->print("model\n"); throw invalid_argument("writeG2o: invalid noise model!"); } - Matrix Info = gaussianModel->R().transpose() * gaussianModel->R(); - Pose3 pose3D = factor3D->measured(); - Point3 p = pose3D.translation(); - Rot3 R = pose3D.rotation(); - - stream << "EDGE_SE3:QUAT " << factor3D->key1() << " " << factor3D->key2() << " " - << p.x() << " " << p.y() << " " << p.z() << " " << R.toQuaternion().x() - << " " << R.toQuaternion().y() << " " << R.toQuaternion().z() << " " << R.toQuaternion().w(); - - Matrix InfoG2o = I_6x6; - InfoG2o.block(0,0,3,3) = Info.block(3,3,3,3); // cov translation - InfoG2o.block(3,3,3,3) = Info.block(0,0,3,3); // cov rotation - InfoG2o.block(0,3,3,3) = Info.block(0,3,3,3); // off diagonal - InfoG2o.block(3,0,3,3) = Info.block(3,0,3,3); // off diagonal - - for (int i = 0; i < 6; i++){ - for (int j = i; j < 6; j++){ + Matrix6 Info = gaussianModel->R().transpose() * gaussianModel->R(); + const Pose3 pose3D = factor3D->measured(); + const Point3 p = pose3D.translation(); + const auto q = pose3D.rotation().toQuaternion(); + stream << "EDGE_SE3:QUAT " << index(factor3D->key1()) << " " + << index(factor3D->key2()) << " " << p.x() << " " << p.y() << " " + << p.z() << " " << q.x() << " " << q.y() << " " << q.z() << " " + << q.w(); + + Matrix6 InfoG2o = I_6x6; + InfoG2o.block<3, 3>(0, 0) = Info.block<3, 3>(3, 3); // cov translation + InfoG2o.block<3, 3>(3, 3) = Info.block<3, 3>(0, 0); // cov rotation + InfoG2o.block<3, 3>(0, 3) = Info.block<3, 3>(0, 3); // off diagonal + InfoG2o.block<3, 3>(3, 0) = Info.block<3, 3>(3, 0); // off diagonal + + for (size_t i = 0; i < 6; i++) { + for (size_t j = i; j < 6; j++) { stream << " " << InfoG2o(i, j); } } @@ -511,106 +739,215 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, } /* ************************************************************************* */ -std::map parse3DPoses(const string& filename) { - ifstream is(filename.c_str()); - if (!is) - throw invalid_argument("parse3DPoses: can not find file " + filename); - - std::map poses; - while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; - - if (tag == "VERTEX3") { - Key id; - double x, y, z, roll, pitch, yaw; - ls >> id >> x >> y >> z >> roll >> pitch >> yaw; - poses.emplace(id, Pose3(Rot3::Ypr(yaw, pitch, roll), {x, y, z})); - } - if (tag == "VERTEX_SE3:QUAT") { - Key id; - double x, y, z, qx, qy, qz, qw; - ls >> id >> x >> y >> z >> qx >> qy >> qz >> qw; - poses.emplace(id, Pose3(Rot3::Quaternion(qw, qx, qy, qz), {x, y, z})); - } - } - return poses; +// parse quaternion in x,y,z,w order, and normalize to unit length +istream &operator>>(istream &is, Quaternion &q) { + double x, y, z, w; + is >> x >> y >> z >> w; + const double norm = sqrt(w * w + x * x + y * y + z * z), f = 1.0 / norm; + q = Quaternion(f * w, f * x, f * y, f * z); + return is; } /* ************************************************************************* */ -BetweenFactorPose3s parse3DFactors(const string& filename) { - ifstream is(filename.c_str()); - if (!is) throw invalid_argument("parse3DFactors: can not find file " + filename); +// parse Rot3 from roll, pitch, yaw +istream &operator>>(istream &is, Rot3 &R) { + double yaw, pitch, roll; + is >> roll >> pitch >> yaw; // notice order ! + R = Rot3::Ypr(yaw, pitch, roll); + return is; +} - std::vector::shared_ptr> factors; - while (!is.eof()) { - char buf[LINESIZE]; - is.getline(buf, LINESIZE); - istringstream ls(buf); - string tag; - ls >> tag; +/* ************************************************************************* */ +boost::optional> parseVertexPose3(istream &is, + const string &tag) { + if (tag == "VERTEX3") { + size_t id; + double x, y, z; + Rot3 R; + is >> id >> x >> y >> z >> R; + return make_pair(id, Pose3(R, {x, y, z})); + } else if (tag == "VERTEX_SE3:QUAT") { + size_t id; + double x, y, z; + Quaternion q; + is >> id >> x >> y >> z >> q; + return make_pair(id, Pose3(q, {x, y, z})); + } else + return boost::none; +} - if (tag == "EDGE3") { - Key id1, id2; - double x, y, z, roll, pitch, yaw; - ls >> id1 >> id2 >> x >> y >> z >> roll >> pitch >> yaw; - Matrix m(6, 6); - for (size_t i = 0; i < 6; i++) - for (size_t j = i; j < 6; j++) ls >> m(i, j); - SharedNoiseModel model = noiseModel::Gaussian::Information(m); - factors.emplace_back(new BetweenFactor( - id1, id2, Pose3(Rot3::Ypr(yaw, pitch, roll), {x, y, z}), model)); - } - if (tag == "EDGE_SE3:QUAT") { - Key id1, id2; - double x, y, z, qx, qy, qz, qw; - ls >> id1 >> id2 >> x >> y >> z >> qx >> qy >> qz >> qw; - Matrix m(6, 6); - for (size_t i = 0; i < 6; i++) { - for (size_t j = i; j < 6; j++) { - double mij; - ls >> mij; - m(i, j) = mij; - m(j, i) = mij; - } - } - Matrix mgtsam(6, 6); +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPose3, maxIndex); +} - mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation - mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation - mgtsam.block<3, 3>(0, 3) = m.block<3, 3>(0, 3); // off diagonal - mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal +/* ************************************************************************* */ +boost::optional> parseVertexPoint3(istream &is, + const string &tag) { + if (tag == "VERTEX_TRACKXYZ") { + size_t id; + double x, y, z; + is >> id >> x >> y >> z; + return make_pair(id, Point3(x, y, z)); + } else + return boost::none; +} - SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam); - factors.emplace_back(new BetweenFactor( - id1, id2, Pose3(Rot3::Quaternion(qw, qx, qy, qz), {x, y, z}), model)); +template <> +std::map parseVariables(const std::string &filename, + size_t maxIndex) { + return parseToMap(filename, parseVertexPoint3, maxIndex); +} + +/* ************************************************************************* */ +// Parse a symmetric covariance matrix (onlyupper-triangular part is stored) +istream &operator>>(istream &is, Matrix6 &m) { + for (size_t i = 0; i < 6; i++) + for (size_t j = i; j < 6; j++) { + is >> m(i, j); + m(j, i) = m(i, j); } - } - return factors; + return is; } /* ************************************************************************* */ -GraphAndValues load3D(const string& filename) { - const auto factors = parse3DFactors(filename); - NonlinearFactorGraph::shared_ptr graph(new NonlinearFactorGraph); - for (const auto& factor : factors) { - graph->push_back(factor); - } +// Pose3 measurement parser +template <> struct ParseMeasurement { + // The arguments + boost::shared_ptr sampler; + size_t maxIndex; + + // The actual parser + boost::optional> operator()(istream &is, + const string &tag) { + if (tag != "EDGE3" && tag != "EDGE_SE3:QUAT") + return boost::none; + + // parse ids and optionally filter + size_t id1, id2; + is >> id1 >> id2; + if (maxIndex && (id1 > maxIndex || id2 > maxIndex)) + return boost::none; + + Matrix6 m; + if (tag == "EDGE3") { + double x, y, z; + Rot3 R; + is >> x >> y >> z >> R >> m; + Pose3 T12(R, {x, y, z}); + // optionally add noise + if (sampler) + T12 = T12.retract(sampler->sample()); + + return BinaryMeasurement(id1, id2, T12, + noiseModel::Gaussian::Information(m)); + } else if (tag == "EDGE_SE3:QUAT") { + double x, y, z; + Quaternion q; + is >> x >> y >> z >> q >> m; + + Pose3 T12(q, {x, y, z}); + // optionally add noise + if (sampler) + T12 = T12.retract(sampler->sample()); + + // EDGE_SE3:QUAT stores covariance in t,R order, unlike GTSAM: + Matrix6 mgtsam; + mgtsam.block<3, 3>(0, 0) = m.block<3, 3>(3, 3); // cov rotation + mgtsam.block<3, 3>(3, 3) = m.block<3, 3>(0, 0); // cov translation + mgtsam.block<3, 3>(0, 3) = m.block<3, 3>(0, 3); // off diagonal + mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal + SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam); - const auto poses = parse3DPoses(filename); - Values::shared_ptr initial(new Values); - for (const auto& key_pose : poses) { - initial->insert(key_pose.first, key_pose.second); + return BinaryMeasurement( + id1, id2, T12, noiseModel::Gaussian::Information(mgtsam)); + } else + return boost::none; } +}; + +/* ************************************************************************* */ +// Implementation of parseMeasurements for Pose3 +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseMeasurement parse{model ? createSampler(model) : nullptr, + maxIndex}; + return parseToVector>(filename, parse); +} + +/* ************************************************************************* */ +// Implementation of parseMeasurements for Rot3, which converts from Pose3 + +// Extract Rot3 measurement from Pose3 measurement +static BinaryMeasurement convert(const BinaryMeasurement &p) { + auto gaussian = + boost::dynamic_pointer_cast(p.noiseModel()); + if (!gaussian) + throw std::invalid_argument( + "parseMeasurements can only convert Pose3 measurements " + "with Gaussian noise models."); + const Matrix6 M = gaussian->covariance(); + return BinaryMeasurement( + p.key1(), p.key2(), p.measured().rotation(), + noiseModel::Gaussian::Covariance(M.block<3, 3>(0, 0), true)); +} + +template <> +std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + auto poses = parseMeasurements(filename, model, maxIndex); + std::vector> result; + result.reserve(poses.size()); + for (const auto &p : poses) + result.push_back(convert(p)); + return result; +} + +/* ************************************************************************* */ +// Implementation of parseFactors for Pose3 +template <> +std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, + size_t maxIndex) { + ParseFactor parse({model ? createSampler(model) : nullptr, maxIndex}); + return parseToVector::shared_ptr>(filename, parse); +} + +/* ************************************************************************* */ +GraphAndValues load3D(const string &filename) { + auto graph = boost::make_shared(); + auto initial = boost::make_shared(); + + // Instantiate factor parser. maxIndex is always zero for load3D. + ParseFactor parseFactor({nullptr, 0}); + + // Single pass for variables and factors. Unlike 2D version, does *not* insert + // variables into `initial` if referenced but not present. + Parser parse = [&](istream &is, const string &tag) { + if (auto indexedPose = parseVertexPose3(is, tag)) { + initial->insert(indexedPose->first, indexedPose->second); + } else if (auto indexedLandmark = parseVertexPoint3(is, tag)) { + initial->insert(L(indexedLandmark->first), indexedLandmark->second); + } else if (auto factor = parseFactor(is, tag)) { + graph->push_back(*factor); + } + return 0; + }; + parseLines(filename, parse); return make_pair(graph, initial); } /* ************************************************************************* */ -Rot3 openGLFixedRotation() { // this is due to different convention for cameras in gtsam and openGL +Rot3 openGLFixedRotation() { // this is due to different convention for + // cameras in gtsam and openGL /* R = [ 1 0 0 * 0 -1 0 * 0 0 -1] @@ -623,7 +960,7 @@ Rot3 openGLFixedRotation() { // this is due to different convention for cameras } /* ************************************************************************* */ -Pose3 openGL2gtsam(const Rot3& R, double tx, double ty, double tz) { +Pose3 openGL2gtsam(const Rot3 &R, double tx, double ty, double tz) { Rot3 R90 = openGLFixedRotation(); Rot3 wRc = (R.inverse()).compose(R90); @@ -632,7 +969,7 @@ Pose3 openGL2gtsam(const Rot3& R, double tx, double ty, double tz) { } /* ************************************************************************* */ -Pose3 gtsam2openGL(const Rot3& R, double tx, double ty, double tz) { +Pose3 gtsam2openGL(const Rot3 &R, double tx, double ty, double tz) { Rot3 R90 = openGLFixedRotation(); Rot3 cRw_openGL = R90.compose(R.inverse()); Point3 t_openGL = cRw_openGL.rotate(Point3(-tx, -ty, -tz)); @@ -640,13 +977,13 @@ Pose3 gtsam2openGL(const Rot3& R, double tx, double ty, double tz) { } /* ************************************************************************* */ -Pose3 gtsam2openGL(const Pose3& PoseGTSAM) { +Pose3 gtsam2openGL(const Pose3 &PoseGTSAM) { return gtsam2openGL(PoseGTSAM.rotation(), PoseGTSAM.x(), PoseGTSAM.y(), - PoseGTSAM.z()); + PoseGTSAM.z()); } /* ************************************************************************* */ -bool readBundler(const string& filename, SfmData &data) { +bool readBundler(const string &filename, SfmData &data) { // Load the data file ifstream is(filename.c_str(), ifstream::in); if (!is) { @@ -681,7 +1018,7 @@ bool readBundler(const string& filename, SfmData &data) { // Check for all-zero R, in which case quit if (r11 == 0 && r12 == 0 && r13 == 0) { cout << "Error in readBundler: zero rotation matrix for pose " << i - << endl; + << endl; return false; } @@ -733,7 +1070,7 @@ bool readBundler(const string& filename, SfmData &data) { } /* ************************************************************************* */ -bool readBAL(const string& filename, SfmData &data) { +bool readBAL(const string &filename, SfmData &data) { // Load the data file ifstream is(filename.c_str(), ifstream::in); if (!is) { @@ -781,7 +1118,7 @@ bool readBAL(const string& filename, SfmData &data) { // Get the 3D position float x, y, z; is >> x >> y >> z; - SfmTrack& track = data.tracks[j]; + SfmTrack &track = data.tracks[j]; track.p = Point3(x, y, z); track.r = 0.4f; track.g = 0.4f; @@ -793,7 +1130,14 @@ bool readBAL(const string& filename, SfmData &data) { } /* ************************************************************************* */ -bool writeBAL(const string& filename, SfmData &data) { +SfmData readBal(const string &filename) { + SfmData data; + readBAL(filename, data); + return data; +} + +/* ************************************************************************* */ +bool writeBAL(const string &filename, SfmData &data) { // Open the output file ofstream os; os.open(filename.c_str()); @@ -811,29 +1155,32 @@ bool writeBAL(const string& filename, SfmData &data) { // Write observations os << data.number_cameras() << " " << data.number_tracks() << " " - << nrObservations << endl; + << nrObservations << endl; os << endl; for (size_t j = 0; j < data.number_tracks(); j++) { // for each 3D point j - const SfmTrack& track = data.tracks[j]; + const SfmTrack &track = data.tracks[j]; - for (size_t k = 0; k < track.number_measurements(); k++) { // for each observation of the 3D point j + for (size_t k = 0; k < track.number_measurements(); + k++) { // for each observation of the 3D point j size_t i = track.measurements[k].first; // camera id - double u0 = data.cameras[i].calibration().u0(); - double v0 = data.cameras[i].calibration().v0(); + double u0 = data.cameras[i].calibration().px(); + double v0 = data.cameras[i].calibration().py(); if (u0 != 0 || v0 != 0) { - cout - << "writeBAL has not been tested for calibration with nonzero (u0,v0)" - << endl; + cout << "writeBAL has not been tested for calibration with nonzero " + "(u0,v0)" + << endl; } - double pixelBALx = track.measurements[k].second.x() - u0; // center of image is the origin - double pixelBALy = -(track.measurements[k].second.y() - v0); // center of image is the origin + double pixelBALx = track.measurements[k].second.x() - + u0; // center of image is the origin + double pixelBALy = -(track.measurements[k].second.y() - + v0); // center of image is the origin Point2 pixelMeasurement(pixelBALx, pixelBALy); - os << i /*camera id*/<< " " << j /*point id*/<< " " - << pixelMeasurement.x() /*u of the pixel*/<< " " - << pixelMeasurement.y() /*v of the pixel*/<< endl; + os << i /*camera id*/ << " " << j /*point id*/ << " " + << pixelMeasurement.x() /*u of the pixel*/ << " " + << pixelMeasurement.y() /*v of the pixel*/ << endl; } } os << endl; @@ -866,46 +1213,47 @@ bool writeBAL(const string& filename, SfmData &data) { return true; } -bool writeBALfromValues(const string& filename, const SfmData &data, - Values& values) { +bool writeBALfromValues(const string &filename, const SfmData &data, + Values &values) { using Camera = PinholeCamera; SfmData dataValues = data; // Store poses or cameras in SfmData size_t nrPoses = values.count(); - if (nrPoses == dataValues.number_cameras()) { // we only estimated camera poses - for (size_t i = 0; i < dataValues.number_cameras(); i++) { // for each camera - Key poseKey = symbol('x', i); - Pose3 pose = values.at(poseKey); + if (nrPoses == + dataValues.number_cameras()) { // we only estimated camera poses + for (size_t i = 0; i < dataValues.number_cameras(); + i++) { // for each camera + Pose3 pose = values.at(X(i)); Cal3Bundler K = dataValues.cameras[i].calibration(); Camera camera(pose, K); dataValues.cameras[i] = camera; } } else { size_t nrCameras = values.count(); - if (nrCameras == dataValues.number_cameras()) { // we only estimated camera poses and calibration - for (size_t i = 0; i < nrCameras; i++) { // for each camera - Key cameraKey = i; // symbol('c',i); + if (nrCameras == dataValues.number_cameras()) { // we only estimated camera + // poses and calibration + for (size_t i = 0; i < nrCameras; i++) { // for each camera + Key cameraKey = i; // symbol('c',i); Camera camera = values.at(cameraKey); dataValues.cameras[i] = camera; } } else { - cout - << "writeBALfromValues: different number of cameras in SfM_dataValues (#cameras " - << dataValues.number_cameras() << ") and values (#cameras " - << nrPoses << ", #poses " << nrCameras << ")!!" - << endl; + cout << "writeBALfromValues: different number of cameras in " + "SfM_dataValues (#cameras " + << dataValues.number_cameras() << ") and values (#cameras " + << nrPoses << ", #poses " << nrCameras << ")!!" << endl; return false; } } // Store 3D points in SfmData - size_t nrPoints = values.count(), nrTracks = dataValues.number_tracks(); + size_t nrPoints = values.count(), + nrTracks = dataValues.number_tracks(); if (nrPoints != nrTracks) { - cout - << "writeBALfromValues: different number of points in SfM_dataValues (#points= " - << nrTracks << ") and values (#points " - << nrPoints << ")!!" << endl; + cout << "writeBALfromValues: different number of points in " + "SfM_dataValues (#points= " + << nrTracks << ") and values (#points " << nrPoints << ")!!" << endl; } for (size_t j = 0; j < nrTracks; j++) { // for each point @@ -917,7 +1265,7 @@ bool writeBALfromValues(const string& filename, const SfmData &data, dataValues.tracks[j].r = 1.0; dataValues.tracks[j].g = 0.0; dataValues.tracks[j].b = 0.0; - dataValues.tracks[j].p = Point3(0,0,0); + dataValues.tracks[j].p = Point3(0, 0, 0); } } @@ -925,22 +1273,46 @@ bool writeBALfromValues(const string& filename, const SfmData &data, return writeBAL(filename, dataValues); } -Values initialCamerasEstimate(const SfmData& db) { +Values initialCamerasEstimate(const SfmData &db) { Values initial; size_t i = 0; // NO POINTS: j = 0; - for(const SfmCamera& camera: db.cameras) + for (const SfmCamera &camera : db.cameras) initial.insert(i++, camera); return initial; } -Values initialCamerasAndPointsEstimate(const SfmData& db) { +Values initialCamerasAndPointsEstimate(const SfmData &db) { Values initial; size_t i = 0, j = 0; - for(const SfmCamera& camera: db.cameras) + for (const SfmCamera &camera : db.cameras) initial.insert((i++), camera); - for(const SfmTrack& track: db.tracks) + for (const SfmTrack &track : db.tracks) initial.insert(P(j++), track.p); return initial; } -} // \namespace gtsam +// Wrapper-friendly versions of parseFactors and parseFactors +BetweenFactorPose2s +parse2DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, size_t maxIndex) { + return parseFactors(filename, model, maxIndex); +} + +BetweenFactorPose3s +parse3DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model, size_t maxIndex) { + return parseFactors(filename, model, maxIndex); +} + +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 +std::map parse3DPoses(const std::string &filename, + size_t maxIndex) { + return parseVariables(filename, maxIndex); +} + +std::map parse3DLandmarks(const std::string &filename, + size_t maxIndex) { + return parseVariables(filename, maxIndex); +} +#endif +} // namespace gtsam diff --git a/gtsam/slam/dataset.h b/gtsam/slam/dataset.h index 3ab199bab..ec5d6dce9 100644 --- a/gtsam/slam/dataset.h +++ b/gtsam/slam/dataset.h @@ -20,16 +20,17 @@ #pragma once +#include #include #include #include -#include -#include +#include #include -#include #include #include #include +#include +#include #include #include @@ -74,16 +75,57 @@ enum KernelFunctionType { KernelFunctionTypeNONE, KernelFunctionTypeHUBER, KernelFunctionTypeTUKEY }; +/** + * Parse variables in a line-based text format (like g2o) into a map. + * Instantiated in .cpp Pose2, Point2, Pose3, and Point3. + * Note the map keys are integer indices, *not* gtsam::Keys. This is is + * different below where landmarks will use L(index) symbols. + */ +template +GTSAM_EXPORT std::map parseVariables(const std::string &filename, + size_t maxIndex = 0); + +/** + * Parse binary measurements in a line-based text format (like g2o) into a + * vector. Instantiated in .cpp for Pose2, Rot2, Pose3, and Rot3. The rotation + * versions parse poses and extract only the rotation part, using the marginal + * covariance as noise model. + */ +template +GTSAM_EXPORT std::vector> +parseMeasurements(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + +/** + * Parse BetweenFactors in a line-based text format (like g2o) into a vector of + * shared pointers. Instantiated in .cpp T equal to Pose2 and Pose3. + */ +template +GTSAM_EXPORT std::vector::shared_ptr> +parseFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + /// Return type for auxiliary functions -typedef std::pair IndexedPose; -typedef std::pair, Pose2> IndexedEdge; +typedef std::pair IndexedPose; +typedef std::pair IndexedLandmark; +typedef std::pair, Pose2> IndexedEdge; /** * Parse TORO/G2O vertex "id x y yaw" * @param is input stream * @param tag string parsed from input stream, will only parse if vertex type */ -GTSAM_EXPORT boost::optional parseVertex(std::istream& is, +GTSAM_EXPORT boost::optional parseVertexPose(std::istream& is, + const std::string& tag); + +/** + * Parse G2O landmark vertex "id x y" + * @param is input stream + * @param tag string parsed from input stream, will only parse if vertex type + */ +GTSAM_EXPORT boost::optional parseVertexLandmark(std::istream& is, const std::string& tag); /** @@ -94,18 +136,21 @@ GTSAM_EXPORT boost::optional parseVertex(std::istream& is, GTSAM_EXPORT boost::optional parseEdge(std::istream& is, const std::string& tag); -/// Return type for load functions -typedef std::pair GraphAndValues; +/// Return type for load functions, which return a graph and initial values. For +/// landmarks, the gtsam::Symbol L(index) is used to insert into the Values. +/// Bearing-range measurements also refer to landmarks with L(index). +using GraphAndValues = + std::pair; /** * Load TORO 2D Graph * @param dataset/model pair as constructed by [dataset] - * @param maxID if non-zero cut out vertices >= maxID + * @param maxIndex if non-zero cut out vertices >= maxIndex * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model */ GTSAM_EXPORT GraphAndValues load2D( - std::pair dataset, int maxID = 0, + std::pair dataset, size_t maxIndex = 0, bool addNoise = false, bool smart = true, // NoiseFormat noiseFormat = NoiseFormatAUTO, @@ -115,7 +160,7 @@ GTSAM_EXPORT GraphAndValues load2D( * Load TORO/G2O style graph files * @param filename * @param model optional noise model to use instead of one specified by file - * @param maxID if non-zero cut out vertices >= maxID + * @param maxIndex if non-zero cut out vertices >= maxIndex * @param addNoise add noise to the edges * @param smart try to reduce complexity of covariance to cheapest model * @param noiseFormat how noise parameters are stored @@ -123,13 +168,13 @@ GTSAM_EXPORT GraphAndValues load2D( * @return graph and initial values */ GTSAM_EXPORT GraphAndValues load2D(const std::string& filename, - SharedNoiseModel model = SharedNoiseModel(), Key maxID = 0, bool addNoise = + SharedNoiseModel model = SharedNoiseModel(), size_t maxIndex = 0, bool addNoise = false, bool smart = true, NoiseFormat noiseFormat = NoiseFormatAUTO, // KernelFunctionType kernelFunctionType = KernelFunctionTypeNONE); /// @deprecated load2D now allows for arbitrary models and wrapping a robust kernel GTSAM_EXPORT GraphAndValues load2D_robust(const std::string& filename, - noiseModel::Base::shared_ptr& model, int maxID = 0); + const noiseModel::Base::shared_ptr& model, size_t maxIndex = 0); /** save 2d graph */ GTSAM_EXPORT void save2D(const NonlinearFactorGraph& graph, @@ -153,33 +198,37 @@ GTSAM_EXPORT GraphAndValues readG2o(const std::string& g2oFile, const bool is3D * @param filename The name of the g2o file to write * @param graph NonlinearFactor graph storing the measurements * @param estimate Values + * + * Note:behavior change in PR #471: to be consistent with load2D and load3D, we + * write the *indices* to file and not the full Keys. This change really only + * affects landmarks, which get read as indices but stored in values with the + * symbol L(index). */ GTSAM_EXPORT void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate, const std::string& filename); -/// Parse edges in 3D TORO graph file into a set of BetweenFactors. -using BetweenFactorPose3s = std::vector::shared_ptr>; -GTSAM_EXPORT BetweenFactorPose3s parse3DFactors(const std::string& filename); - -/// Parse vertices in 3D TORO graph file into a map of Pose3s. -GTSAM_EXPORT std::map parse3DPoses(const std::string& filename); - /// Load TORO 3D Graph GTSAM_EXPORT GraphAndValues load3D(const std::string& filename); /// A measurement with its camera index typedef std::pair SfmMeasurement; -/// SfmTrack +/// Sift index for SfmTrack typedef std::pair SiftIndex; /// Define the structure for the 3D points struct SfmTrack { - SfmTrack(): p(0,0,0) {} + SfmTrack(float r = 0, float g = 0, float b = 0): p(0,0,0), r(r), g(g), b(b) {} + SfmTrack(const gtsam::Point3& pt, float r = 0, float g = 0, float b = 0) : p(pt), r(r), g(g), b(b) {} + Point3 p; ///< 3D position of the point float r, g, b; ///< RGB color of the 3D point std::vector measurements; ///< The 2D image projections (id,(u,v)) std::vector siftIndices; + + /// Get RGB values describing 3d point + const Point3 rgb() const { return Point3(r, g, b); } + /// Total number of measurements in this track size_t number_measurements() const { return measurements.size(); @@ -192,8 +241,84 @@ struct SfmTrack { SiftIndex siftIndex(size_t idx) const { return siftIndices[idx]; } + /// Get 3D point + const Point3& point3() const { + return p; + } + /// Add measurement (camera_idx, Point2) to track + void add_measurement(size_t idx, const gtsam::Point2& m) { + measurements.emplace_back(idx, m); + } + + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE & ar, const unsigned int /*version*/) { + ar & BOOST_SERIALIZATION_NVP(p); + ar & BOOST_SERIALIZATION_NVP(r); + ar & BOOST_SERIALIZATION_NVP(g); + ar & BOOST_SERIALIZATION_NVP(b); + ar & BOOST_SERIALIZATION_NVP(measurements); + ar & BOOST_SERIALIZATION_NVP(siftIndices); + } + + /// assert equality up to a tolerance + bool equals(const SfmTrack &sfmTrack, double tol = 1e-9) const { + // check the 3D point + if (!p.isApprox(sfmTrack.p)) { + return false; + } + + // check the RGB values + if (r!=sfmTrack.r || g!=sfmTrack.g || b!=sfmTrack.b) { + return false; + } + + // compare size of vectors for measurements and siftIndices + if (number_measurements() != sfmTrack.number_measurements() || + siftIndices.size() != sfmTrack.siftIndices.size()) { + return false; + } + + // compare measurements (order sensitive) + for (size_t idx = 0; idx < number_measurements(); ++idx) { + SfmMeasurement measurement = measurements[idx]; + SfmMeasurement otherMeasurement = sfmTrack.measurements[idx]; + + if (measurement.first != otherMeasurement.first || + !measurement.second.isApprox(otherMeasurement.second)) { + return false; + } + } + + // compare sift indices (order sensitive) + for (size_t idx = 0; idx < siftIndices.size(); ++idx) { + SiftIndex index = siftIndices[idx]; + SiftIndex otherIndex = sfmTrack.siftIndices[idx]; + + if (index.first != otherIndex.first || + index.second != otherIndex.second) { + return false; + } + } + + return true; + } + + /// print + void print(const std::string& s = "") const { + std::cout << "Track with " << measurements.size(); + std::cout << " measurements of point " << p << std::endl; + } +}; + +/* ************************************************************************* */ +/// traits +template<> +struct traits : public Testable { }; + /// Define the structure for the camera poses typedef PinholeCamera SfmCamera; @@ -216,6 +341,63 @@ struct SfmData { SfmTrack track(size_t idx) const { return tracks[idx]; } + /// Add a track to SfmData + void add_track(const SfmTrack& t) { + tracks.push_back(t); + } + /// Add a camera to SfmData + void add_camera(const SfmCamera& cam) { + cameras.push_back(cam); + } + + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(Archive & ar, const unsigned int /*version*/) { + ar & BOOST_SERIALIZATION_NVP(cameras); + ar & BOOST_SERIALIZATION_NVP(tracks); + } + + /// @} + /// @name Testable + /// @{ + + /// assert equality up to a tolerance + bool equals(const SfmData &sfmData, double tol = 1e-9) const { + // check number of cameras and tracks + if (number_cameras() != sfmData.number_cameras() || + number_tracks() != sfmData.number_tracks()) { + return false; + } + + // check each camera + for (size_t i = 0; i < number_cameras(); ++i) { + if (!camera(i).equals(sfmData.camera(i), tol)) { + return false; + } + } + + // check each track + for (size_t j = 0; j < number_tracks(); ++j) { + if (!track(j).equals(sfmData.track(j), tol)) { + return false; + } + } + + return true; + } + + /// print + void print(const std::string& s = "") const { + std::cout << "Number of cameras = " << number_cameras() << std::endl; + std::cout << "Number of tracks = " << number_tracks() << std::endl; + } +}; + +/* ************************************************************************* */ +/// traits +template<> +struct traits : public Testable { }; /** @@ -236,6 +418,14 @@ GTSAM_EXPORT bool readBundler(const std::string& filename, SfmData &data); */ GTSAM_EXPORT bool readBAL(const std::string& filename, SfmData &data); +/** + * @brief This function parses a "Bundle Adjustment in the Large" (BAL) file and returns the data + * as a SfmData structure. Mainly used by wrapped code. + * @param filename The name of the BAL file. + * @return SfM structure where the data is stored. + */ +GTSAM_EXPORT SfmData readBal(const std::string& filename); + /** * @brief This function writes a "Bundle Adjustment in the Large" (BAL) file from a * SfmData structure @@ -300,13 +490,31 @@ GTSAM_EXPORT Values initialCamerasEstimate(const SfmData& db); */ GTSAM_EXPORT Values initialCamerasAndPointsEstimate(const SfmData& db); -/// Aliases for backwards compatibility -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -typedef SfmMeasurement SfM_Measurement; -typedef SiftIndex SIFT_Index; -typedef SfmTrack SfM_Track; -typedef SfmCamera SfM_Camera; -typedef SfmData SfM_data; -#endif +// Wrapper-friendly versions of parseFactors and parseFactors +using BetweenFactorPose2s = std::vector::shared_ptr>; +GTSAM_EXPORT BetweenFactorPose2s +parse2DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + +using BetweenFactorPose3s = std::vector::shared_ptr>; +GTSAM_EXPORT BetweenFactorPose3s +parse3DFactors(const std::string &filename, + const noiseModel::Diagonal::shared_ptr &model = nullptr, + size_t maxIndex = 0); + +using BinaryMeasurementsUnit3 = std::vector>; +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 +inline boost::optional parseVertex(std::istream &is, + const std::string &tag) { + return parseVertexPose(is, tag); +} -} // namespace gtsam +GTSAM_EXPORT std::map parse3DPoses(const std::string &filename, + size_t maxIndex = 0); + +GTSAM_EXPORT std::map +parse3DLandmarks(const std::string &filename, size_t maxIndex = 0); + +#endif +} // namespace gtsam diff --git a/gtsam/slam/expressions.h b/gtsam/slam/expressions.h index d60923d8e..680f2d175 100644 --- a/gtsam/slam/expressions.h +++ b/gtsam/slam/expressions.h @@ -75,30 +75,6 @@ inline Unit3_ unrotate(const Rot3_& x, const Unit3_& p) { return Unit3_(x, &Rot3::unrotate, p); } -#ifndef GTSAM_TYPEDEF_POINTS_TO_VECTORS -namespace internal { -// define a rotate and unrotate for Vector3 -inline Vector3 rotate(const Rot3& R, const Vector3& v, - OptionalJacobian<3, 3> H1 = boost::none, - OptionalJacobian<3, 3> H2 = boost::none) { - return R.rotate(v, H1, H2); -} -inline Vector3 unrotate(const Rot3& R, const Vector3& v, - OptionalJacobian<3, 3> H1 = boost::none, - OptionalJacobian<3, 3> H2 = boost::none) { - return R.unrotate(v, H1, H2); -} -} // namespace internal -inline Expression rotate(const Rot3_& R, - const Expression& v) { - return Expression(internal::rotate, R, v); -} -inline Expression unrotate(const Rot3_& R, - const Expression& v) { - return Expression(internal::unrotate, R, v); -} -#endif - // Projection typedef Expression Cal3_S2_; diff --git a/gtsam/slam/lago.cpp b/gtsam/slam/lago.cpp index 76edc8b9d..70caa424f 100644 --- a/gtsam/slam/lago.cpp +++ b/gtsam/slam/lago.cpp @@ -17,6 +17,8 @@ */ #include + +#include #include #include #include @@ -33,7 +35,6 @@ namespace lago { static const Matrix I = I_1x1; static const Matrix I3 = I_3x3; -static const Key keyAnchor = symbol('Z', 9999999); static const noiseModel::Diagonal::shared_ptr priorOrientationNoise = noiseModel::Diagonal::Sigmas((Vector(1) << 0).finished()); static const noiseModel::Diagonal::shared_ptr priorPose2Noise = @@ -79,7 +80,7 @@ key2doubleMap computeThetasToRoot(const key2doubleMap& deltaThetaMap, key2doubleMap thetaToRootMap; // Orientation of the roo - thetaToRootMap.insert(pair(keyAnchor, 0.0)); + thetaToRootMap.insert(pair(initialize::kAnchorKey, 0.0)); // for all nodes in the tree for(const key2doubleMap::value_type& it: deltaThetaMap) { @@ -189,41 +190,16 @@ GaussianFactorGraph buildLinearOrientationGraph( lagoGraph.add(key1, -I, key2, I, deltaThetaRegularized, model_deltaTheta); } // prior on the anchor orientation - lagoGraph.add(keyAnchor, I, (Vector(1) << 0.0).finished(), priorOrientationNoise); + lagoGraph.add(initialize::kAnchorKey, I, (Vector(1) << 0.0).finished(), priorOrientationNoise); return lagoGraph; } -/* ************************************************************************* */ -// Select the subgraph of betweenFactors and transforms priors into between wrt a fictitious node -static NonlinearFactorGraph buildPose2graph(const NonlinearFactorGraph& graph) { - gttic(lago_buildPose2graph); - NonlinearFactorGraph pose2Graph; - - for(const boost::shared_ptr& factor: graph) { - - // recast to a between on Pose2 - boost::shared_ptr > pose2Between = - boost::dynamic_pointer_cast >(factor); - if (pose2Between) - pose2Graph.add(pose2Between); - - // recast PriorFactor to BetweenFactor - boost::shared_ptr > pose2Prior = - boost::dynamic_pointer_cast >(factor); - if (pose2Prior) - pose2Graph.add( - BetweenFactor(keyAnchor, pose2Prior->keys()[0], - pose2Prior->prior(), pose2Prior->noiseModel())); - } - return pose2Graph; -} - /* ************************************************************************* */ static PredecessorMap findOdometricPath( const NonlinearFactorGraph& pose2Graph) { PredecessorMap tree; - Key minKey = keyAnchor; // this initialization does not matter + Key minKey = initialize::kAnchorKey; // this initialization does not matter bool minUnassigned = true; for(const boost::shared_ptr& factor: pose2Graph) { @@ -240,8 +216,8 @@ static PredecessorMap findOdometricPath( minKey = key1; } } - tree.insert(minKey, keyAnchor); - tree.insert(keyAnchor, keyAnchor); // root + tree.insert(minKey, initialize::kAnchorKey); + tree.insert(initialize::kAnchorKey, initialize::kAnchorKey); // root return tree; } @@ -284,7 +260,7 @@ VectorValues initializeOrientations(const NonlinearFactorGraph& graph, // We "extract" the Pose2 subgraph of the original graph: this // is done to properly model priors and avoiding operating on a larger graph - NonlinearFactorGraph pose2Graph = buildPose2graph(graph); + NonlinearFactorGraph pose2Graph = initialize::buildPoseGraph(graph); // Get orientations from relative orientation measurements return computeOrientations(pose2Graph, useOdometricPath); @@ -338,7 +314,7 @@ Values computePoses(const NonlinearFactorGraph& pose2graph, } } // add prior - linearPose2graph.add(keyAnchor, I3, Vector3(0.0, 0.0, 0.0), + linearPose2graph.add(initialize::kAnchorKey, I3, Vector3(0.0, 0.0, 0.0), priorPose2Noise); // optimize @@ -348,7 +324,7 @@ Values computePoses(const NonlinearFactorGraph& pose2graph, Values initialGuessLago; for(const VectorValues::value_type& it: posesLago) { Key key = it.first; - if (key != keyAnchor) { + if (key != initialize::kAnchorKey) { const Vector& poseVector = it.second; Pose2 poseLago = Pose2(poseVector(0), poseVector(1), orientationsLago.at(key)(0) + poseVector(2)); @@ -364,7 +340,7 @@ Values initialize(const NonlinearFactorGraph& graph, bool useOdometricPath) { // We "extract" the Pose2 subgraph of the original graph: this // is done to properly model priors and avoiding operating on a larger graph - NonlinearFactorGraph pose2Graph = buildPose2graph(graph); + NonlinearFactorGraph pose2Graph = initialize::buildPoseGraph(graph); // Get orientations from relative orientation measurements VectorValues orientationsLago = computeOrientations(pose2Graph, @@ -385,7 +361,7 @@ Values initialize(const NonlinearFactorGraph& graph, // for all nodes in the tree for(const VectorValues::value_type& it: orientations) { Key key = it.first; - if (key != keyAnchor) { + if (key != initialize::kAnchorKey) { const Pose2& pose = initialGuess.at(key); const Vector& orientation = it.second; Pose2 poseLago = Pose2(pose.x(), pose.y(), orientation(0)); diff --git a/gtsam/slam/tests/testDataset.cpp b/gtsam/slam/tests/testDataset.cpp index 9a3c797b2..aad9124c5 100644 --- a/gtsam/slam/tests/testDataset.cpp +++ b/gtsam/slam/tests/testDataset.cpp @@ -43,13 +43,13 @@ TEST(dataSet, findExampleDataFile) { } /* ************************************************************************* */ -TEST( dataSet, parseVertex) +TEST( dataSet, parseVertexPose) { const string str = "VERTEX2 1 2.000000 3.000000 4.000000"; istringstream is(str); string tag; EXPECT(is >> tag); - const auto actual = parseVertex(is, tag); + const auto actual = parseVertexPose(is, tag); EXPECT(actual); if (actual) { EXPECT_LONGS_EQUAL(1, actual->first); @@ -57,6 +57,21 @@ TEST( dataSet, parseVertex) } } +/* ************************************************************************* */ +TEST( dataSet, parseVertexLandmark) +{ + const string str = "VERTEX_XY 1 2.000000 3.000000"; + istringstream is(str); + string tag; + EXPECT(is >> tag); + const auto actual = parseVertexLandmark(is, tag); + EXPECT(actual); + if (actual) { + EXPECT_LONGS_EQUAL(1, actual->first); + EXPECT(assert_equal(Point2(2, 3), actual->second)); + } +} + /* ************************************************************************* */ TEST( dataSet, parseEdge) { @@ -67,38 +82,73 @@ TEST( dataSet, parseEdge) const auto actual = parseEdge(is, tag); EXPECT(actual); if (actual) { - pair expected(0, 1); + pair expected(0, 1); EXPECT(expected == actual->first); EXPECT(assert_equal(Pose2(2, 3, 4), actual->second)); } } /* ************************************************************************* */ -TEST( dataSet, load2D) -{ +TEST(dataSet, load2D) { ///< The structure where we will save the SfM data const string filename = findExampleDataFile("w100.graph"); NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; boost::tie(graph, initial) = load2D(filename); - EXPECT_LONGS_EQUAL(300,graph->size()); - EXPECT_LONGS_EQUAL(100,initial->size()); - noiseModel::Unit::shared_ptr model = noiseModel::Unit::Create(3); - BetweenFactor expected(1, 0, Pose2(-0.99879,0.0417574,-0.00818381), model); - BetweenFactor::shared_ptr actual = boost::dynamic_pointer_cast< - BetweenFactor >(graph->at(0)); + EXPECT_LONGS_EQUAL(300, graph->size()); + EXPECT_LONGS_EQUAL(100, initial->size()); + auto model = noiseModel::Unit::Create(3); + BetweenFactor expected(1, 0, Pose2(-0.99879, 0.0417574, -0.00818381), + model); + BetweenFactor::shared_ptr actual = + boost::dynamic_pointer_cast>(graph->at(0)); EXPECT(assert_equal(expected, *actual)); + + // Check binary measurements, Pose2 + size_t maxIndex = 5; + auto measurements = parseMeasurements(filename, nullptr, maxIndex); + EXPECT_LONGS_EQUAL(5, measurements.size()); + + // Check binary measurements, Rot2 + auto measurements2 = parseMeasurements(filename); + EXPECT_LONGS_EQUAL(300, measurements2.size()); + + // // Check factor parsing + const auto actualFactors = parseFactors(filename); + for (size_t i : {0, 1, 2, 3, 4, 5}) { + EXPECT(assert_equal( + *boost::dynamic_pointer_cast>(graph->at(i)), + *actualFactors[i], 1e-5)); + } + + // Check pose parsing + const auto actualPoses = parseVariables(filename); + for (size_t j : {0, 1, 2, 3, 4}) { + EXPECT(assert_equal(initial->at(j), actualPoses.at(j), 1e-5)); + } + + // Check landmark parsing + const auto actualLandmarks = parseVariables(filename); + EXPECT_LONGS_EQUAL(0, actualLandmarks.size()); } /* ************************************************************************* */ -TEST( dataSet, load2DVictoriaPark) -{ +TEST(dataSet, load2DVictoriaPark) { const string filename = findExampleDataFile("victoria_park.txt"); NonlinearFactorGraph::shared_ptr graph; Values::shared_ptr initial; + + // Load all boost::tie(graph, initial) = load2D(filename); - EXPECT_LONGS_EQUAL(10608,graph->size()); - EXPECT_LONGS_EQUAL(7120,initial->size()); + EXPECT_LONGS_EQUAL(10608, graph->size()); + EXPECT_LONGS_EQUAL(7120, initial->size()); + + // Restrict keys + size_t maxIndex = 5; + boost::tie(graph, initial) = load2D(filename, nullptr, maxIndex); + EXPECT_LONGS_EQUAL(5, graph->size()); + EXPECT_LONGS_EQUAL(6, initial->size()); // file has 0 as well + EXPECT_LONGS_EQUAL(L(5), graph->at(4)->keys()[1]); } /* ************************************************************************* */ @@ -122,45 +172,6 @@ TEST( dataSet, Balbianello) EXPECT(assert_equal(expected,actual,1)); } -/* ************************************************************************* */ -TEST( dataSet, readG2o) -{ - const string g2oFile = findExampleDataFile("pose2example"); - NonlinearFactorGraph::shared_ptr actualGraph; - Values::shared_ptr actualValues; - boost::tie(actualGraph, actualValues) = readG2o(g2oFile); - - Values expectedValues; - expectedValues.insert(0, Pose2(0.000000, 0.000000, 0.000000)); - expectedValues.insert(1, Pose2(1.030390, 0.011350, -0.081596)); - expectedValues.insert(2, Pose2(2.036137, -0.129733, -0.301887)); - expectedValues.insert(3, Pose2(3.015097, -0.442395, -0.345514)); - expectedValues.insert(4, Pose2(3.343949, 0.506678, 1.214715)); - expectedValues.insert(5, Pose2(3.684491, 1.464049, 1.183785)); - expectedValues.insert(6, Pose2(4.064626, 2.414783, 1.176333)); - expectedValues.insert(7, Pose2(4.429778, 3.300180, 1.259169)); - expectedValues.insert(8, Pose2(4.128877, 2.321481, -1.825391)); - expectedValues.insert(9, Pose2(3.884653, 1.327509, -1.953016)); - expectedValues.insert(10, Pose2(3.531067, 0.388263, -2.148934)); - EXPECT(assert_equal(expectedValues,*actualValues,1e-5)); - - noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Precisions(Vector3(44.721360, 44.721360, 30.901699)); - NonlinearFactorGraph expectedGraph; - expectedGraph.emplace_shared >(0, 1, Pose2(1.030390, 0.011350, -0.081596), model); - expectedGraph.emplace_shared >(1, 2, Pose2(1.013900, -0.058639, -0.220291), model); - expectedGraph.emplace_shared >(2, 3, Pose2(1.027650, -0.007456, -0.043627), model); - expectedGraph.emplace_shared >(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model); - expectedGraph.emplace_shared >(4, 5, Pose2(1.016030, 0.014565, -0.030930), model); - expectedGraph.emplace_shared >(5, 6, Pose2(1.023890, 0.006808, -0.007452), model); - expectedGraph.emplace_shared >(6, 7, Pose2(0.957734, 0.003159, 0.082836), model); - expectedGraph.emplace_shared >(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model); - expectedGraph.emplace_shared >(8, 9, Pose2(1.023440, 0.013984, -0.127624), model); - expectedGraph.emplace_shared >(9,10, Pose2(1.003350, 0.022250, -0.195918), model); - expectedGraph.emplace_shared >(5, 9, Pose2(0.033943, 0.032439, 3.073637), model); - expectedGraph.emplace_shared >(3,10, Pose2(0.044020, 0.988477, -1.553511), model); - EXPECT(assert_equal(expectedGraph,*actualGraph,1e-5)); -} - /* ************************************************************************* */ TEST(dataSet, readG2o3D) { const string g2oFile = findExampleDataFile("pose3example"); @@ -208,7 +219,7 @@ TEST(dataSet, readG2o3D) { } // Check factor parsing - const auto actualFactors = parse3DFactors(g2oFile); + const auto actualFactors = parseFactors(g2oFile); for (size_t i : {0, 1, 2, 3, 4, 5}) { EXPECT(assert_equal( *boost::dynamic_pointer_cast>(expectedGraph[i]), @@ -216,7 +227,13 @@ TEST(dataSet, readG2o3D) { } // Check pose parsing - const auto actualPoses = parse3DPoses(g2oFile); + const auto actualPoses = parseVariables(g2oFile); + for (size_t j : {0, 1, 2, 3, 4}) { + EXPECT(assert_equal(poses[j], actualPoses.at(j), 1e-5)); + } + + // Check landmark parsing + const auto actualLandmarks = parseVariables(g2oFile); for (size_t j : {0, 1, 2, 3, 4}) { EXPECT(assert_equal(poses[j], actualPoses.at(j), 1e-5)); } @@ -273,59 +290,119 @@ TEST( dataSet, readG2o3DNonDiagonalNoise) } /* ************************************************************************* */ -TEST( dataSet, readG2oHuber) -{ +TEST(dataSet, readG2oCheckDeterminants) { + const string g2oFile = findExampleDataFile("toyExample.g2o"); + + // Check determinants in factors + auto factors = parseFactors(g2oFile); + EXPECT_LONGS_EQUAL(6, factors.size()); + for (const auto& factor : factors) { + const Rot3 R = factor->measured().rotation(); + EXPECT_DOUBLES_EQUAL(1.0, R.matrix().determinant(), 1e-9); + } + + // Check determinants in initial values + const map poses = parseVariables(g2oFile); + EXPECT_LONGS_EQUAL(5, poses.size()); + for (const auto& key_value : poses) { + const Rot3 R = key_value.second.rotation(); + EXPECT_DOUBLES_EQUAL(1.0, R.matrix().determinant(), 1e-9); + } + const map landmarks = parseVariables(g2oFile); + EXPECT_LONGS_EQUAL(0, landmarks.size()); +} + +/* ************************************************************************* */ +TEST(dataSet, readG2oLandmarks) { + const string g2oFile = findExampleDataFile("example_with_vertices.g2o"); + + // Check number of poses and landmarks. Should be 8 each. + const map poses = parseVariables(g2oFile); + EXPECT_LONGS_EQUAL(8, poses.size()); + const map landmarks = parseVariables(g2oFile); + EXPECT_LONGS_EQUAL(8, landmarks.size()); + + auto graphAndValues = load3D(g2oFile); + EXPECT(graphAndValues.second->exists(L(0))); +} + +/* ************************************************************************* */ +static NonlinearFactorGraph expectedGraph(const SharedNoiseModel& model) { + NonlinearFactorGraph g; + using Factor = BetweenFactor; + g.emplace_shared(0, 1, Pose2(1.030390, 0.011350, -0.081596), model); + g.emplace_shared(1, 2, Pose2(1.013900, -0.058639, -0.220291), model); + g.emplace_shared(2, 3, Pose2(1.027650, -0.007456, -0.043627), model); + g.emplace_shared(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model); + g.emplace_shared(4, 5, Pose2(1.016030, 0.014565, -0.030930), model); + g.emplace_shared(5, 6, Pose2(1.023890, 0.006808, -0.007452), model); + g.emplace_shared(6, 7, Pose2(0.957734, 0.003159, 0.082836), model); + g.emplace_shared(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model); + g.emplace_shared(8, 9, Pose2(1.023440, 0.013984, -0.127624), model); + g.emplace_shared(9, 10, Pose2(1.003350, 0.022250, -0.195918), model); + g.emplace_shared(5, 9, Pose2(0.033943, 0.032439, 3.073637), model); + g.emplace_shared(3, 10, Pose2(0.044020, 0.988477, -1.553511), model); + return g; +} + +/* ************************************************************************* */ +TEST(dataSet, readG2o) { + const string g2oFile = findExampleDataFile("pose2example"); + NonlinearFactorGraph::shared_ptr actualGraph; + Values::shared_ptr actualValues; + boost::tie(actualGraph, actualValues) = readG2o(g2oFile); + + auto model = noiseModel::Diagonal::Precisions( + Vector3(44.721360, 44.721360, 30.901699)); + EXPECT(assert_equal(expectedGraph(model), *actualGraph, 1e-5)); + + Values expectedValues; + expectedValues.insert(0, Pose2(0.000000, 0.000000, 0.000000)); + expectedValues.insert(1, Pose2(1.030390, 0.011350, -0.081596)); + expectedValues.insert(2, Pose2(2.036137, -0.129733, -0.301887)); + expectedValues.insert(3, Pose2(3.015097, -0.442395, -0.345514)); + expectedValues.insert(4, Pose2(3.343949, 0.506678, 1.214715)); + expectedValues.insert(5, Pose2(3.684491, 1.464049, 1.183785)); + expectedValues.insert(6, Pose2(4.064626, 2.414783, 1.176333)); + expectedValues.insert(7, Pose2(4.429778, 3.300180, 1.259169)); + expectedValues.insert(8, Pose2(4.128877, 2.321481, -1.825391)); + expectedValues.insert(9, Pose2(3.884653, 1.327509, -1.953016)); + expectedValues.insert(10, Pose2(3.531067, 0.388263, -2.148934)); + EXPECT(assert_equal(expectedValues, *actualValues, 1e-5)); +} + +/* ************************************************************************* */ +TEST(dataSet, readG2oHuber) { const string g2oFile = findExampleDataFile("pose2example"); NonlinearFactorGraph::shared_ptr actualGraph; Values::shared_ptr actualValues; bool is3D = false; - boost::tie(actualGraph, actualValues) = readG2o(g2oFile, is3D, KernelFunctionTypeHUBER); + boost::tie(actualGraph, actualValues) = + readG2o(g2oFile, is3D, KernelFunctionTypeHUBER); - noiseModel::Diagonal::shared_ptr baseModel = noiseModel::Diagonal::Precisions(Vector3(44.721360, 44.721360, 30.901699)); - SharedNoiseModel model = noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.345), baseModel); + auto baseModel = noiseModel::Diagonal::Precisions( + Vector3(44.721360, 44.721360, 30.901699)); + auto model = noiseModel::Robust::Create( + noiseModel::mEstimator::Huber::Create(1.345), baseModel); - NonlinearFactorGraph expectedGraph; - expectedGraph.emplace_shared >(0, 1, Pose2(1.030390, 0.011350, -0.081596), model); - expectedGraph.emplace_shared >(1, 2, Pose2(1.013900, -0.058639, -0.220291), model); - expectedGraph.emplace_shared >(2, 3, Pose2(1.027650, -0.007456, -0.043627), model); - expectedGraph.emplace_shared >(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model); - expectedGraph.emplace_shared >(4, 5, Pose2(1.016030, 0.014565, -0.030930), model); - expectedGraph.emplace_shared >(5, 6, Pose2(1.023890, 0.006808, -0.007452), model); - expectedGraph.emplace_shared >(6, 7, Pose2(0.957734, 0.003159, 0.082836), model); - expectedGraph.emplace_shared >(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model); - expectedGraph.emplace_shared >(8, 9, Pose2(1.023440, 0.013984, -0.127624), model); - expectedGraph.emplace_shared >(9,10, Pose2(1.003350, 0.022250, -0.195918), model); - expectedGraph.emplace_shared >(5, 9, Pose2(0.033943, 0.032439, 3.073637), model); - expectedGraph.emplace_shared >(3,10, Pose2(0.044020, 0.988477, -1.553511), model); - EXPECT(assert_equal(expectedGraph,*actualGraph,1e-5)); + EXPECT(assert_equal(expectedGraph(model), *actualGraph, 1e-5)); } /* ************************************************************************* */ -TEST( dataSet, readG2oTukey) -{ +TEST(dataSet, readG2oTukey) { const string g2oFile = findExampleDataFile("pose2example"); NonlinearFactorGraph::shared_ptr actualGraph; Values::shared_ptr actualValues; bool is3D = false; - boost::tie(actualGraph, actualValues) = readG2o(g2oFile, is3D, KernelFunctionTypeTUKEY); + boost::tie(actualGraph, actualValues) = + readG2o(g2oFile, is3D, KernelFunctionTypeTUKEY); - noiseModel::Diagonal::shared_ptr baseModel = noiseModel::Diagonal::Precisions(Vector3(44.721360, 44.721360, 30.901699)); - SharedNoiseModel model = noiseModel::Robust::Create(noiseModel::mEstimator::Tukey::Create(4.6851), baseModel); + auto baseModel = noiseModel::Diagonal::Precisions( + Vector3(44.721360, 44.721360, 30.901699)); + auto model = noiseModel::Robust::Create( + noiseModel::mEstimator::Tukey::Create(4.6851), baseModel); - NonlinearFactorGraph expectedGraph; - expectedGraph.emplace_shared >(0, 1, Pose2(1.030390, 0.011350, -0.081596), model); - expectedGraph.emplace_shared >(1, 2, Pose2(1.013900, -0.058639, -0.220291), model); - expectedGraph.emplace_shared >(2, 3, Pose2(1.027650, -0.007456, -0.043627), model); - expectedGraph.emplace_shared >(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model); - expectedGraph.emplace_shared >(4, 5, Pose2(1.016030, 0.014565, -0.030930), model); - expectedGraph.emplace_shared >(5, 6, Pose2(1.023890, 0.006808, -0.007452), model); - expectedGraph.emplace_shared >(6, 7, Pose2(0.957734, 0.003159, 0.082836), model); - expectedGraph.emplace_shared >(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model); - expectedGraph.emplace_shared >(8, 9, Pose2(1.023440, 0.013984, -0.127624), model); - expectedGraph.emplace_shared >(9,10, Pose2(1.003350, 0.022250, -0.195918), model); - expectedGraph.emplace_shared >(5, 9, Pose2(0.033943, 0.032439, 3.073637), model); - expectedGraph.emplace_shared >(3,10, Pose2(0.044020, 0.988477, -1.553511), model); - EXPECT(assert_equal(expectedGraph,*actualGraph,1e-5)); + EXPECT(assert_equal(expectedGraph(model), *actualGraph, 1e-5)); } /* ************************************************************************* */ @@ -495,18 +572,16 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ SfmData readData; readBAL(filenameToRead, readData); - Pose3 poseChange = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), gtsam::Point3(0.3,0.1,0.3)); + Pose3 poseChange = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), Point3(0.3,0.1,0.3)); Values value; for(size_t i=0; i < readData.number_cameras(); i++){ // for each camera - Key poseKey = symbol('x',i); Pose3 pose = poseChange.compose(readData.cameras[i].pose()); - value.insert(poseKey, pose); + value.insert(X(i), pose); } for(size_t j=0; j < readData.number_tracks(); j++){ // for each point - Key pointKey = P(j); Point3 point = poseChange.transformFrom( readData.tracks[j].p ); - value.insert(pointKey, point); + value.insert(P(j), point); } // Write values and readData to a file @@ -531,13 +606,11 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){ EXPECT(assert_equal(expected,actual,12)); Pose3 expectedPose = camera0.pose(); - Key poseKey = symbol('x',0); - Pose3 actualPose = value.at(poseKey); + Pose3 actualPose = value.at(X(0)); EXPECT(assert_equal(expectedPose,actualPose, 1e-7)); Point3 expectedPoint = track0.p; - Key pointKey = P(0); - Point3 actualPoint = value.at(pointKey); + Point3 actualPoint = value.at(P(0)); EXPECT(assert_equal(expectedPoint,actualPoint, 1e-6)); } diff --git a/gtsam/slam/tests/testFrobeniusFactor.cpp b/gtsam/slam/tests/testFrobeniusFactor.cpp new file mode 100644 index 000000000..321b54c86 --- /dev/null +++ b/gtsam/slam/tests/testFrobeniusFactor.cpp @@ -0,0 +1,196 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * testFrobeniusFactor.cpp + * + * @file testFrobeniusFactor.cpp + * @date March 2019 + * @author Frank Dellaert + * @brief Check evaluateError for various Frobenius norm + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +//****************************************************************************** +namespace so3 { +SO3 id; +Vector3 v1 = (Vector(3) << 0.1, 0, 0).finished(); +SO3 R1 = SO3::Expmap(v1); +Vector3 v2 = (Vector(3) << 0.01, 0.02, 0.03).finished(); +SO3 R2 = SO3::Expmap(v2); +SO3 R12 = R1.between(R2); +} // namespace so3 + +/* ************************************************************************* */ +TEST(FrobeniusPriorSO3, evaluateError) { + using namespace ::so3; + auto factor = FrobeniusPrior(1, R2.matrix()); + Vector actual = factor.evaluateError(R1); + Vector expected = R1.vec() - R2.vec(); + EXPECT(assert_equal(expected, actual, 1e-9)); + + Values values; + values.insert(1, R1); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); +} + +/* ************************************************************************* */ +TEST(FrobeniusPriorSO3, ClosestTo) { + // Example top-left of SO(4) matrix not quite on SO(3) manifold + Matrix3 M; + M << 0.79067393, 0.6051136, -0.0930814, // + 0.4155925, -0.64214347, -0.64324489, // + -0.44948549, 0.47046326, -0.75917576; + + SO3 expected = SO3::ClosestTo(M); + + // manifold optimization gets same result as SVD solution in ClosestTo + NonlinearFactorGraph graph; + graph.emplace_shared >(1, M); + + Values initial; + initial.insert(1, SO3(I_3x3)); + auto result = GaussNewtonOptimizer(graph, initial).optimize(); + EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-6); + EXPECT(assert_equal(expected, result.at(1), 1e-6)); +} + +/* ************************************************************************* */ +TEST(FrobeniusPriorSO3, ChordalL2mean) { + // See Hartley13ijcv: + // Cost function C(R) = \sum FrobeniusPrior(R,R_i) + // Closed form solution = ClosestTo(C_e), where + // C_e = \sum R_i !!!! + + // We will test by computing mean of R1=exp(v1) R1^T=exp(-v1): + using namespace ::so3; + SO3 expected; // identity + Matrix3 M = R1.matrix() + R1.matrix().transpose(); + EXPECT(assert_equal(expected, SO3::ClosestTo(M), 1e-6)); + EXPECT(assert_equal(expected, SO3::ChordalMean({R1, R1.inverse()}), 1e-6)); + + // manifold optimization gets same result as ChordalMean + NonlinearFactorGraph graph; + graph.emplace_shared >(1, R1.matrix()); + graph.emplace_shared >(1, R1.matrix().transpose()); + + Values initial; + initial.insert(1, R1.inverse()); + auto result = GaussNewtonOptimizer(graph, initial).optimize(); + EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 0.1); // Why so loose? + EXPECT(assert_equal(expected, result.at(1), 1e-5)); +} + +/* ************************************************************************* */ +TEST(FrobeniusFactorSO3, evaluateError) { + using namespace ::so3; + auto factor = FrobeniusFactor(1, 2); + Vector actual = factor.evaluateError(R1, R2); + Vector expected = R2.vec() - R1.vec(); + EXPECT(assert_equal(expected, actual, 1e-9)); + + Values values; + values.insert(1, R1); + values.insert(2, R2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); +} + +/* ************************************************************************* */ +// Commented out as SO(n) not yet supported (and might never be) +// TEST(FrobeniusBetweenFactorSOn, evaluateError) { +// using namespace ::so3; +// auto factor = +// FrobeniusBetweenFactor(1, 2, SOn::FromMatrix(R12.matrix())); +// Vector actual = factor.evaluateError(SOn::FromMatrix(R1.matrix()), +// SOn::FromMatrix(R2.matrix())); +// Vector expected = Vector9::Zero(); +// EXPECT(assert_equal(expected, actual, 1e-9)); + +// Values values; +// values.insert(1, R1); +// values.insert(2, R2); +// EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); +// } + +/* ************************************************************************* */ +TEST(FrobeniusBetweenFactorSO3, evaluateError) { + using namespace ::so3; + auto factor = FrobeniusBetweenFactor(1, 2, R12); + Vector actual = factor.evaluateError(R1, R2); + Vector expected = Vector9::Zero(); + EXPECT(assert_equal(expected, actual, 1e-9)); + + Values values; + values.insert(1, R1); + values.insert(2, R2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); +} + +//****************************************************************************** +namespace so4 { +SO4 id; +Vector6 v1 = (Vector(6) << 0.1, 0, 0, 0, 0, 0).finished(); +SO4 Q1 = SO4::Expmap(v1); +Vector6 v2 = (Vector(6) << 0.01, 0.02, 0.03, 0.04, 0.05, 0.06).finished(); +SO4 Q2 = SO4::Expmap(v2); +} // namespace so4 + +/* ************************************************************************* */ +TEST(FrobeniusFactorSO4, evaluateError) { + using namespace ::so4; + auto factor = FrobeniusFactor(1, 2, noiseModel::Unit::Create(6)); + Vector actual = factor.evaluateError(Q1, Q2); + Vector expected = Q2.vec() - Q1.vec(); + EXPECT(assert_equal(expected, actual, 1e-9)); + + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); +} + +/* ************************************************************************* */ +TEST(FrobeniusBetweenFactorSO4, evaluateError) { + using namespace ::so4; + Matrix4 M{I_4x4}; + M.topLeftCorner<3, 3>() = ::so3::R12.matrix(); + auto factor = FrobeniusBetweenFactor(1, 2, Q1.between(Q2)); + Matrix H1, H2; + Vector actual = factor.evaluateError(Q1, Q2, H1, H2); + Vector expected = SO4::VectorN2::Zero(); + EXPECT(assert_equal(expected, actual, 1e-9)); + + Values values; + values.insert(1, Q1); + values.insert(2, Q2); + EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/slam/tests/testInitializePose.cpp b/gtsam/slam/tests/testInitializePose.cpp new file mode 100644 index 000000000..1373cb576 --- /dev/null +++ b/gtsam/slam/tests/testInitializePose.cpp @@ -0,0 +1,82 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testInitializePose3.cpp + * @brief Unit tests for 3D SLAM initialization, using rotation relaxation + * + * @author Luca Carlone + * @author Frank Dellaert + * @date August, 2014 + */ + +#include + +#include +#include +#include +#include + +#include + +using namespace std; +using namespace gtsam; + +/* ************************************************************************* */ +TEST(InitializePose3, computePoses2D) { + const string g2oFile = findExampleDataFile("noisyToyGraph.txt"); + NonlinearFactorGraph::shared_ptr inputGraph; + Values::shared_ptr posesInFile; + bool is3D = false; + boost::tie(inputGraph, posesInFile) = readG2o(g2oFile, is3D); + + auto priorModel = noiseModel::Unit::Create(3); + inputGraph->addPrior(0, posesInFile->at(0), priorModel); + + auto poseGraph = initialize::buildPoseGraph(*inputGraph); + + auto I = genericValue(Rot3()); + Values orientations; + for (size_t i : {0, 1, 2, 3}) + orientations.insert(i, posesInFile->at(i).rotation()); + const Values poses = initialize::computePoses(orientations, &poseGraph); + + // posesInFile is seriously noisy, so we check error of recovered poses + EXPECT_DOUBLES_EQUAL(0.0810283, inputGraph->error(poses), 1e-6); +} + +/* ************************************************************************* */ +TEST(InitializePose3, computePoses3D) { + const string g2oFile = findExampleDataFile("Klaus3"); + NonlinearFactorGraph::shared_ptr inputGraph; + Values::shared_ptr posesInFile; + bool is3D = true; + boost::tie(inputGraph, posesInFile) = readG2o(g2oFile, is3D); + + auto priorModel = noiseModel::Unit::Create(6); + inputGraph->addPrior(0, posesInFile->at(0), priorModel); + + auto poseGraph = initialize::buildPoseGraph(*inputGraph); + + auto I = genericValue(Rot3()); + Values orientations; + for (size_t i : {0, 1, 2}) + orientations.insert(i, posesInFile->at(i).rotation()); + Values poses = initialize::computePoses(orientations, &poseGraph); + EXPECT(assert_equal(*posesInFile, poses, 0.1)); // TODO(frank): very loose !! +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam/slam/tests/testInitializePose3.cpp b/gtsam/slam/tests/testInitializePose3.cpp index ba41cdc9b..995a109fa 100644 --- a/gtsam/slam/tests/testInitializePose3.cpp +++ b/gtsam/slam/tests/testInitializePose3.cpp @@ -75,8 +75,15 @@ NonlinearFactorGraph graph2() { g.add(BetweenFactor(x0, x1, pose0.between(pose1), noiseModel::Isotropic::Precision(6, 1.0))); g.add(BetweenFactor(x1, x2, pose1.between(pose2), noiseModel::Isotropic::Precision(6, 1.0))); g.add(BetweenFactor(x2, x3, pose2.between(pose3), noiseModel::Isotropic::Precision(6, 1.0))); - g.add(BetweenFactor(x2, x0, Pose3(Rot3::Ypr(0.1,0,0.1), Point3()), noiseModel::Isotropic::Precision(6, 0.0))); // random pose, but zero information - g.add(BetweenFactor(x0, x3, Pose3(Rot3::Ypr(0.5,-0.2,0.2), Point3(10,20,30)), noiseModel::Isotropic::Precision(6, 0.0))); // random pose, but zero informatoin + // random pose, but zero information + auto noise_zero_info = noiseModel::Isotropic::Precision(6, 0.0); + g.add(BetweenFactor( + x2, x0, Pose3(Rot3::Ypr(0.1, 0.0, 0.1), Point3(0.0, 0.0, 0.0)), + noise_zero_info)); + // random pose, but zero information + g.add(BetweenFactor( + x0, x3, Pose3(Rot3::Ypr(0.5, -0.2, 0.2), Point3(10, 20, 30)), + noise_zero_info)); g.addPrior(x0, pose0, model); return g; } @@ -121,7 +128,7 @@ TEST( InitializePose3, orientationsGradientSymbolicGraph ) { KeyVectorMap adjEdgesMap; KeyRotMap factorId2RotMap; - InitializePose3::createSymbolicGraph(adjEdgesMap, factorId2RotMap, pose3Graph); + InitializePose3::createSymbolicGraph(pose3Graph, &adjEdgesMap, &factorId2RotMap); EXPECT_DOUBLES_EQUAL(adjEdgesMap.at(x0)[0], 0, 1e-9); EXPECT_DOUBLES_EQUAL(adjEdgesMap.at(x0)[1], 3, 1e-9); @@ -258,21 +265,21 @@ TEST( InitializePose3, posesWithGivenGuess ) { } /* ************************************************************************* */ -TEST( InitializePose3, initializePoses ) -{ +TEST(InitializePose3, initializePoses) { const string g2oFile = findExampleDataFile("pose3example-grid"); NonlinearFactorGraph::shared_ptr inputGraph; - Values::shared_ptr expectedValues; + Values::shared_ptr posesInFile; bool is3D = true; - boost::tie(inputGraph, expectedValues) = readG2o(g2oFile, is3D); - noiseModel::Unit::shared_ptr priorModel = noiseModel::Unit::Create(6); + boost::tie(inputGraph, posesInFile) = readG2o(g2oFile, is3D); + + auto priorModel = noiseModel::Unit::Create(6); inputGraph->addPrior(0, Pose3(), priorModel); Values initial = InitializePose3::initialize(*inputGraph); - EXPECT(assert_equal(*expectedValues,initial,0.1)); // TODO(frank): very loose !! + EXPECT(assert_equal(*posesInFile, initial, + 0.1)); // TODO(frank): very loose !! } - /* ************************************************************************* */ int main() { TestResult tr; diff --git a/gtsam/slam/tests/testLago.cpp b/gtsam/slam/tests/testLago.cpp index 55449d86e..781317d7a 100644 --- a/gtsam/slam/tests/testLago.cpp +++ b/gtsam/slam/tests/testLago.cpp @@ -284,7 +284,7 @@ TEST( Lago, largeGraphNoisy_orientations ) { Values::shared_ptr expected; boost::tie(gmatlab, expected) = readG2o(matlabFile); - for(const Values::KeyValuePair& key_val: *expected){ + for(const auto key_val: *expected){ Key k = key_val.key; EXPECT(assert_equal(expected->at(k), actual.at(k), 1e-5)); } @@ -310,7 +310,7 @@ TEST( Lago, largeGraphNoisy ) { Values::shared_ptr expected; boost::tie(gmatlab, expected) = readG2o(matlabFile); - for(const Values::KeyValuePair& key_val: *expected){ + for(const auto key_val: *expected){ Key k = key_val.key; EXPECT(assert_equal(expected->at(k), actual.at(k), 1e-2)); } diff --git a/gtsam/slam/tests/testSerializationDataset.cpp b/gtsam/slam/tests/testSerializationDataset.cpp new file mode 100644 index 000000000..6ef82f07f --- /dev/null +++ b/gtsam/slam/tests/testSerializationDataset.cpp @@ -0,0 +1,58 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testSerializationDataset.cpp + * @brief serialization tests for dataset.cpp + * @author Ayush Baid + * @date Jan 1, 2021 + */ + +#include + +#include +#include + +using namespace std; +using namespace gtsam; +using namespace gtsam::serializationTestHelpers; + +/* ************************************************************************* */ +TEST(dataSet, sfmDataSerialization) { + // Test the serialization of SfmData + const string filename = findExampleDataFile("dubrovnik-3-7-pre"); + SfmData mydata; + CHECK(readBAL(filename, mydata)); + + // round-trip equality check on serialization and subsequent deserialization + EXPECT(equalsObj(mydata)); + EXPECT(equalsXML(mydata)); + EXPECT(equalsBinary(mydata)); +} + +/* ************************************************************************* */ +TEST(dataSet, sfmTrackSerialization) { + // Test the serialization of SfmTrack + const string filename = findExampleDataFile("dubrovnik-3-7-pre"); + SfmData mydata; + CHECK(readBAL(filename, mydata)); + + SfmTrack track = mydata.track(0); + + // round-trip equality check on serialization and subsequent deserialization + EXPECT(equalsObj(track)); + EXPECT(equalsXML(track)); + EXPECT(equalsBinary(track)); +} + +/* ************************************************************************* */ +int main() { TestResult tr; return TestRegistry::runAllTests(tr); } +/* ************************************************************************* */ diff --git a/gtsam/slam/tests/testSmartFactorBase.cpp b/gtsam/slam/tests/testSmartFactorBase.cpp index f69f4c113..951cbf8f4 100644 --- a/gtsam/slam/tests/testSmartFactorBase.cpp +++ b/gtsam/slam/tests/testSmartFactorBase.cpp @@ -37,13 +37,13 @@ class PinholeFactor: public SmartFactorBase > { public: typedef SmartFactorBase > Base; PinholeFactor() {} - PinholeFactor(const SharedNoiseModel& sharedNoiseModel): Base(sharedNoiseModel) { - } - virtual double error(const Values& values) const { - return 0.0; - } - virtual boost::shared_ptr linearize( - const Values& values) const { + PinholeFactor(const SharedNoiseModel& sharedNoiseModel, + boost::optional body_P_sensor = boost::none, + size_t expectedNumberCameras = 10) + : Base(sharedNoiseModel, body_P_sensor, expectedNumberCameras) {} + double error(const Values& values) const override { return 0.0; } + boost::shared_ptr linearize( + const Values& values) const override { return boost::shared_ptr(new JacobianFactor()); } }; @@ -60,6 +60,40 @@ TEST(SmartFactorBase, Pinhole) { EXPECT_LONGS_EQUAL(2 * 2, f.dim()); } +TEST(SmartFactorBase, PinholeWithSensor) { + Pose3 body_P_sensor(Rot3(), Point3(1, 0, 0)); + PinholeFactor f = PinholeFactor(unit2, body_P_sensor); + EXPECT(assert_equal(f.body_P_sensor(), body_P_sensor)); + + PinholeFactor::Cameras cameras; + // Assume body at origin. + Pose3 world_P_body = Pose3(); + // Camera coordinates in world frame. + Pose3 wTc = world_P_body * body_P_sensor; + cameras.push_back(PinholeCamera(wTc)); + + // Simple point to project slightly off image center + Point3 p(0, 0, 10); + Point2 measurement = cameras[0].project(p); + f.add(measurement, 1); + + PinholeFactor::Cameras::FBlocks Fs; + Matrix E; + Vector error = f.unwhitenedError(cameras, p, Fs, E); + + Vector expectedError = Vector::Zero(2); + Matrix29 expectedFs; + expectedFs << -0.001, -1.00001, 0, -0.1, 0, -0.01, 0, 0, 0, 1, 0, 0, 0, -0.1, 0, 0, 0, 0; + Matrix23 expectedE; + expectedE << 0.1, 0, 0.01, 0, 0.1, 0; + + EXPECT(assert_equal(error, expectedError)); + // We only have the jacobian for the 1 camera + // Use of a lower tolerance value due to compiler precision mismatch. + EXPECT(assert_equal(expectedFs, Fs[0], 1e-3)); + EXPECT(assert_equal(expectedE, E)); +} + /* ************************************************************************* */ #include @@ -71,11 +105,11 @@ class StereoFactor: public SmartFactorBase { StereoFactor() {} StereoFactor(const SharedNoiseModel& sharedNoiseModel): Base(sharedNoiseModel) { } - virtual double error(const Values& values) const { + double error(const Values& values) const override { return 0.0; } - virtual boost::shared_ptr linearize( - const Values& values) const { + boost::shared_ptr linearize( + const Values& values) const override { return boost::shared_ptr(new JacobianFactor()); } }; diff --git a/gtsam/smart/CMakeLists.txt b/gtsam/smart/CMakeLists.txt deleted file mode 100644 index 53c18fe96..000000000 --- a/gtsam/smart/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -# Install headers -file(GLOB smart_headers "*.h") -install(FILES ${smart_headers} DESTINATION include/gtsam/smart) - -# Build tests -add_subdirectory(tests) diff --git a/gtsam/smart/tests/CMakeLists.txt b/gtsam/smart/tests/CMakeLists.txt deleted file mode 100644 index caa3164fa..000000000 --- a/gtsam/smart/tests/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -gtsamAddTestsGlob(smart "test*.cpp" "" "gtsam") diff --git a/gtsam/symbolic/SymbolicBayesTree.h b/gtsam/symbolic/SymbolicBayesTree.h index e28f28764..3fdf1011b 100644 --- a/gtsam/symbolic/SymbolicBayesTree.h +++ b/gtsam/symbolic/SymbolicBayesTree.h @@ -39,6 +39,7 @@ namespace gtsam { typedef boost::shared_ptr shared_ptr; typedef boost::weak_ptr weak_ptr; SymbolicBayesTreeClique() {} + virtual ~SymbolicBayesTreeClique() {} SymbolicBayesTreeClique(const boost::shared_ptr& conditional) : Base(conditional) {} }; diff --git a/gtsam/symbolic/SymbolicConditional.h b/gtsam/symbolic/SymbolicConditional.h index 0530af556..9163cdba6 100644 --- a/gtsam/symbolic/SymbolicConditional.h +++ b/gtsam/symbolic/SymbolicConditional.h @@ -105,7 +105,7 @@ namespace gtsam { /// @name Testable /** Print with optional formatter */ - void print(const std::string& str = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + virtual void print(const std::string& str = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; /** Check equality */ bool equals(const This& c, double tol = 1e-9) const; diff --git a/gtsam_extra.cmake.in b/gtsam_extra.cmake.in index 8a9a13648..44ba36bd6 100644 --- a/gtsam_extra.cmake.in +++ b/gtsam_extra.cmake.in @@ -7,7 +7,3 @@ set (GTSAM_VERSION_STRING "@GTSAM_VERSION_STRING@") set (GTSAM_USE_TBB @GTSAM_USE_TBB@) set (GTSAM_DEFAULT_ALLOCATOR @GTSAM_DEFAULT_ALLOCATOR@) - -if("@GTSAM_INSTALL_CYTHON_TOOLBOX@") - list(APPEND GTSAM_EIGENCY_INSTALL_PATH "@GTSAM_EIGENCY_INSTALL_PATH@") -endif() diff --git a/gtsam_unstable/CMakeLists.txt b/gtsam_unstable/CMakeLists.txt index 010b32710..ec161baa8 100644 --- a/gtsam_unstable/CMakeLists.txt +++ b/gtsam_unstable/CMakeLists.txt @@ -108,7 +108,7 @@ list(APPEND GTSAM_EXPORTED_TARGETS gtsam_unstable) set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) # Wrap version for gtsam_unstable -if (GTSAM_INSTALL_MATLAB_TOOLBOX) +if (GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX) # Set up codegen include(GtsamMatlabWrap) @@ -119,8 +119,8 @@ if (GTSAM_INSTALL_MATLAB_TOOLBOX) endif() # Wrap - wrap_and_install_library(gtsam_unstable.h "gtsam" "" "${mexFlags}") -endif(GTSAM_INSTALL_MATLAB_TOOLBOX) + wrap_and_install_library(gtsam_unstable.i "gtsam" "" "${mexFlags}") +endif(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX) # Build examples diff --git a/gtsam_unstable/discrete/AllDiff.h b/gtsam_unstable/discrete/AllDiff.h index 3e7296593..80e700b29 100644 --- a/gtsam_unstable/discrete/AllDiff.h +++ b/gtsam_unstable/discrete/AllDiff.h @@ -34,11 +34,11 @@ namespace gtsam { AllDiff(const DiscreteKeys& dkeys); // print - virtual void print(const std::string& s = "", - const KeyFormatter& formatter = DefaultKeyFormatter) const; + void print(const std::string& s = "", + const KeyFormatter& formatter = DefaultKeyFormatter) const override; /// equals - bool equals(const DiscreteFactor& other, double tol) const { + bool equals(const DiscreteFactor& other, double tol) const override { if(!dynamic_cast(&other)) return false; else { @@ -50,13 +50,13 @@ namespace gtsam { } /// Calculate value = expensive ! - virtual double operator()(const Values& values) const; + double operator()(const Values& values) const override; /// Convert into a decisiontree, can be *very* expensive ! - virtual DecisionTreeFactor toDecisionTreeFactor() const; + DecisionTreeFactor toDecisionTreeFactor() const override; /// Multiply into a decisiontree - virtual DecisionTreeFactor operator*(const DecisionTreeFactor& f) const; + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; /* * Ensure Arc-consistency @@ -65,13 +65,13 @@ namespace gtsam { * @param j domain to be checked * @param domains all other domains */ - bool ensureArcConsistency(size_t j, std::vector& domains) const; + bool ensureArcConsistency(size_t j, std::vector& domains) const override; /// Partially apply known values - virtual Constraint::shared_ptr partiallyApply(const Values&) const; + Constraint::shared_ptr partiallyApply(const Values&) const override; /// Partially apply known values, domain version - virtual Constraint::shared_ptr partiallyApply(const std::vector&) const; + Constraint::shared_ptr partiallyApply(const std::vector&) const override; }; } // namespace gtsam diff --git a/gtsam_unstable/discrete/BinaryAllDiff.h b/gtsam_unstable/discrete/BinaryAllDiff.h index 1ae5a008a..bbb60e2f1 100644 --- a/gtsam_unstable/discrete/BinaryAllDiff.h +++ b/gtsam_unstable/discrete/BinaryAllDiff.h @@ -33,14 +33,14 @@ namespace gtsam { } // print - virtual void print(const std::string& s = "", - const KeyFormatter& formatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", + const KeyFormatter& formatter = DefaultKeyFormatter) const override { std::cout << s << "BinaryAllDiff on " << formatter(keys_[0]) << " and " << formatter(keys_[1]) << std::endl; } /// equals - bool equals(const DiscreteFactor& other, double tol) const { + bool equals(const DiscreteFactor& other, double tol) const override { if(!dynamic_cast(&other)) return false; else { @@ -50,12 +50,12 @@ namespace gtsam { } /// Calculate value - virtual double operator()(const Values& values) const { + double operator()(const Values& values) const override { return (double) (values.at(keys_[0]) != values.at(keys_[1])); } /// Convert into a decisiontree - virtual DecisionTreeFactor toDecisionTreeFactor() const { + DecisionTreeFactor toDecisionTreeFactor() const override { DiscreteKeys keys; keys.push_back(DiscreteKey(keys_[0],cardinality0_)); keys.push_back(DiscreteKey(keys_[1],cardinality1_)); @@ -68,7 +68,7 @@ namespace gtsam { } /// Multiply into a decisiontree - virtual DecisionTreeFactor operator*(const DecisionTreeFactor& f) const { + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override { // TODO: can we do this more efficiently? return toDecisionTreeFactor() * f; } @@ -79,20 +79,20 @@ namespace gtsam { * @param domains all other domains */ /// - bool ensureArcConsistency(size_t j, std::vector& domains) const { + bool ensureArcConsistency(size_t j, std::vector& domains) const override { // throw std::runtime_error( // "BinaryAllDiff::ensureArcConsistency not implemented"); return false; } /// Partially apply known values - virtual Constraint::shared_ptr partiallyApply(const Values&) const { + Constraint::shared_ptr partiallyApply(const Values&) const override { throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented"); } /// Partially apply known values, domain version - virtual Constraint::shared_ptr partiallyApply( - const std::vector&) const { + Constraint::shared_ptr partiallyApply( + const std::vector&) const override { throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented"); } }; diff --git a/gtsam_unstable/discrete/Domain.h b/gtsam_unstable/discrete/Domain.h index 5dd597e20..5acc5a08f 100644 --- a/gtsam_unstable/discrete/Domain.h +++ b/gtsam_unstable/discrete/Domain.h @@ -66,11 +66,11 @@ namespace gtsam { } // print - virtual void print(const std::string& s = "", - const KeyFormatter& formatter = DefaultKeyFormatter) const; + void print(const std::string& s = "", + const KeyFormatter& formatter = DefaultKeyFormatter) const override; /// equals - bool equals(const DiscreteFactor& other, double tol) const { + bool equals(const DiscreteFactor& other, double tol) const override { if(!dynamic_cast(&other)) return false; else { @@ -84,20 +84,20 @@ namespace gtsam { } /// Calculate value - virtual double operator()(const Values& values) const; + double operator()(const Values& values) const override; /// Convert into a decisiontree - virtual DecisionTreeFactor toDecisionTreeFactor() const; + DecisionTreeFactor toDecisionTreeFactor() const override; /// Multiply into a decisiontree - virtual DecisionTreeFactor operator*(const DecisionTreeFactor& f) const; + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; /* * Ensure Arc-consistency * @param j domain to be checked * @param domains all other domains */ - bool ensureArcConsistency(size_t j, std::vector& domains) const; + bool ensureArcConsistency(size_t j, std::vector& domains) const override; /** * Check for a value in domain that does not occur in any other connected domain. @@ -107,12 +107,11 @@ namespace gtsam { bool checkAllDiff(const KeyVector keys, std::vector& domains); /// Partially apply known values - virtual Constraint::shared_ptr partiallyApply( - const Values& values) const; + Constraint::shared_ptr partiallyApply(const Values& values) const override; /// Partially apply known values, domain version - virtual Constraint::shared_ptr partiallyApply( - const std::vector& domains) const; + Constraint::shared_ptr partiallyApply( + const std::vector& domains) const override; }; } // namespace gtsam diff --git a/gtsam_unstable/discrete/SingleValue.h b/gtsam_unstable/discrete/SingleValue.h index 43f06956d..c4d2addec 100644 --- a/gtsam_unstable/discrete/SingleValue.h +++ b/gtsam_unstable/discrete/SingleValue.h @@ -42,11 +42,11 @@ namespace gtsam { } // print - virtual void print(const std::string& s = "", - const KeyFormatter& formatter = DefaultKeyFormatter) const; + void print(const std::string& s = "", + const KeyFormatter& formatter = DefaultKeyFormatter) const override; /// equals - bool equals(const DiscreteFactor& other, double tol) const { + bool equals(const DiscreteFactor& other, double tol) const override { if(!dynamic_cast(&other)) return false; else { @@ -56,28 +56,27 @@ namespace gtsam { } /// Calculate value - virtual double operator()(const Values& values) const; + double operator()(const Values& values) const override; /// Convert into a decisiontree - virtual DecisionTreeFactor toDecisionTreeFactor() const; + DecisionTreeFactor toDecisionTreeFactor() const override; /// Multiply into a decisiontree - virtual DecisionTreeFactor operator*(const DecisionTreeFactor& f) const; + DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override; /* * Ensure Arc-consistency * @param j domain to be checked * @param domains all other domains */ - bool ensureArcConsistency(size_t j, std::vector& domains) const; + bool ensureArcConsistency(size_t j, std::vector& domains) const override; /// Partially apply known values - virtual Constraint::shared_ptr partiallyApply( - const Values& values) const; + Constraint::shared_ptr partiallyApply(const Values& values) const override; /// Partially apply known values, domain version - virtual Constraint::shared_ptr partiallyApply( - const std::vector& domains) const; + Constraint::shared_ptr partiallyApply( + const std::vector& domains) const override; }; } // namespace gtsam diff --git a/gtsam_unstable/discrete/tests/testPlanning.cpp b/gtsam_unstable/discrete/tests/testPlanning.cpp deleted file mode 100644 index 431d7a4ef..000000000 --- a/gtsam_unstable/discrete/tests/testPlanning.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * testPlanning.cpp - * @brief develop code for planning example - * @date Feb 13, 2012 - * @author Frank Dellaert - */ - -//#include -#include -#include -#include -#include - -using namespace std; -using namespace gtsam; - -/* ************************************************************************* */ -TEST_UNSAFE(Planner, allInOne) -{ - // A small planning problem - // First variable is location - DiscreteKey location(0,3), - haveRock(1,2), haveSoil(2,2), haveImage(3,2), - commRock(4,2), commSoil(5,2), commImage(6,2); - - // There are 3 actions: - // Drive, communicate, sample -} - - -/* ************************************************************************* */ -int main() { - TestResult tr; - return TestRegistry::runAllTests(tr); -} -/* ************************************************************************* */ - diff --git a/gtsam_unstable/discrete/tests/testScheduler.cpp b/gtsam_unstable/discrete/tests/testScheduler.cpp index 0be4e4b1f..3f6c6a1e0 100644 --- a/gtsam_unstable/discrete/tests/testScheduler.cpp +++ b/gtsam_unstable/discrete/tests/testScheduler.cpp @@ -75,7 +75,7 @@ DiscreteFactorGraph createExpected() { // Mutual exclusion for students expected.addAllDiff(A, J); - return expected; + return std::move(expected); } /* ************************************************************************* */ diff --git a/gtsam_unstable/dynamics/FullIMUFactor.h b/gtsam_unstable/dynamics/FullIMUFactor.h index 1c5ade5b6..c8c351d7a 100644 --- a/gtsam_unstable/dynamics/FullIMUFactor.h +++ b/gtsam_unstable/dynamics/FullIMUFactor.h @@ -51,12 +51,12 @@ class FullIMUFactor : public NoiseModelFactor2 { virtual ~FullIMUFactor() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** Check if two factors are equal */ - virtual bool equals(const NonlinearFactor& e, double tol = 1e-9) const { + bool equals(const NonlinearFactor& e, double tol = 1e-9) const override { const This* const f = dynamic_cast(&e); return f && Base::equals(e) && equal_with_abs_tol(accel_, f->accel_, tol) && @@ -64,7 +64,7 @@ class FullIMUFactor : public NoiseModelFactor2 { std::abs(dt_ - f->dt_) < tol; } - void print(const std::string& s="", const gtsam::KeyFormatter& formatter = gtsam::DefaultKeyFormatter) const { + void print(const std::string& s="", const gtsam::KeyFormatter& formatter = gtsam::DefaultKeyFormatter) const override { std::string a = "FullIMUFactor: " + s; Base::print(a, formatter); gtsam::print((Vector)accel_, "accel"); @@ -81,9 +81,9 @@ class FullIMUFactor : public NoiseModelFactor2 { * Error evaluation with optional derivatives - calculates * z - h(x1,x2) */ - virtual Vector evaluateError(const PoseRTV& x1, const PoseRTV& x2, + Vector evaluateError(const PoseRTV& x1, const PoseRTV& x2, boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + boost::optional H2 = boost::none) const override { Vector9 z; z.head(3).operator=(accel_); // Strange syntax to work around ambiguous operator error with clang z.segment(3, 3).operator=(gyro_); // Strange syntax to work around ambiguous operator error with clang diff --git a/gtsam_unstable/dynamics/IMUFactor.h b/gtsam_unstable/dynamics/IMUFactor.h index bb0a354ee..fb864a78d 100644 --- a/gtsam_unstable/dynamics/IMUFactor.h +++ b/gtsam_unstable/dynamics/IMUFactor.h @@ -44,12 +44,12 @@ class IMUFactor : public NoiseModelFactor2 { virtual ~IMUFactor() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** Check if two factors are equal */ - virtual bool equals(const NonlinearFactor& e, double tol = 1e-9) const { + bool equals(const NonlinearFactor& e, double tol = 1e-9) const override { const This* const f = dynamic_cast(&e); return f && Base::equals(e) && equal_with_abs_tol(accel_, f->accel_, tol) && @@ -57,7 +57,7 @@ class IMUFactor : public NoiseModelFactor2 { std::abs(dt_ - f->dt_) < tol; } - void print(const std::string& s="", const gtsam::KeyFormatter& formatter = gtsam::DefaultKeyFormatter) const { + void print(const std::string& s="", const gtsam::KeyFormatter& formatter = gtsam::DefaultKeyFormatter) const override { std::string a = "IMUFactor: " + s; Base::print(a, formatter); gtsam::print((Vector)accel_, "accel"); @@ -74,9 +74,9 @@ class IMUFactor : public NoiseModelFactor2 { * Error evaluation with optional derivatives - calculates * z - h(x1,x2) */ - virtual Vector evaluateError(const PoseRTV& x1, const PoseRTV& x2, + Vector evaluateError(const PoseRTV& x1, const PoseRTV& x2, boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + boost::optional H2 = boost::none) const override { const Vector6 meas = z(); if (H1) *H1 = numericalDerivative21( boost::bind(This::predict_proxy, _1, _2, dt_, meas), x1, x2, 1e-5); diff --git a/gtsam_unstable/dynamics/Pendulum.h b/gtsam_unstable/dynamics/Pendulum.h index 209456a62..470d7108b 100644 --- a/gtsam_unstable/dynamics/Pendulum.h +++ b/gtsam_unstable/dynamics/Pendulum.h @@ -40,7 +40,7 @@ class PendulumFactor1: public NoiseModelFactor3 { : Base(noiseModel::Constrained::All(1, std::abs(mu)), k1, k, velKey), h_(h) {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new PendulumFactor1(*this))); } @@ -48,7 +48,7 @@ class PendulumFactor1: public NoiseModelFactor3 { Vector evaluateError(const double& qk1, const double& qk, const double& v, boost::optional H1 = boost::none, boost::optional H2 = boost::none, - boost::optional H3 = boost::none) const { + boost::optional H3 = boost::none) const override { const size_t p = 1; if (H1) *H1 = -Matrix::Identity(p,p); if (H2) *H2 = Matrix::Identity(p,p); @@ -88,7 +88,7 @@ class PendulumFactor2: public NoiseModelFactor3 { : Base(noiseModel::Constrained::All(1, std::abs(mu)), vk1, vk, qkey), h_(h), g_(g), r_(r) {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new PendulumFactor2(*this))); } @@ -96,7 +96,7 @@ class PendulumFactor2: public NoiseModelFactor3 { Vector evaluateError(const double & vk1, const double & vk, const double & q, boost::optional H1 = boost::none, boost::optional H2 = boost::none, - boost::optional H3 = boost::none) const { + boost::optional H3 = boost::none) const override { const size_t p = 1; if (H1) *H1 = -Matrix::Identity(p,p); if (H2) *H2 = Matrix::Identity(p,p); @@ -139,7 +139,7 @@ class PendulumFactorPk: public NoiseModelFactor3 { h_(h), m_(m), r_(r), g_(g), alpha_(alpha) {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new PendulumFactorPk(*this))); } @@ -147,7 +147,7 @@ class PendulumFactorPk: public NoiseModelFactor3 { Vector evaluateError(const double & pk, const double & qk, const double & qk1, boost::optional H1 = boost::none, boost::optional H2 = boost::none, - boost::optional H3 = boost::none) const { + boost::optional H3 = boost::none) const override { const size_t p = 1; double qmid = (1-alpha_)*qk + alpha_*qk1; @@ -195,7 +195,7 @@ class PendulumFactorPk1: public NoiseModelFactor3 { h_(h), m_(m), r_(r), g_(g), alpha_(alpha) {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new PendulumFactorPk1(*this))); } @@ -203,7 +203,7 @@ class PendulumFactorPk1: public NoiseModelFactor3 { Vector evaluateError(const double & pk1, const double & qk, const double & qk1, boost::optional H1 = boost::none, boost::optional H2 = boost::none, - boost::optional H3 = boost::none) const { + boost::optional H3 = boost::none) const override { const size_t p = 1; double qmid = (1-alpha_)*qk + alpha_*qk1; diff --git a/gtsam_unstable/dynamics/SimpleHelicopter.h b/gtsam_unstable/dynamics/SimpleHelicopter.h index f38c256b1..42ef04f84 100644 --- a/gtsam_unstable/dynamics/SimpleHelicopter.h +++ b/gtsam_unstable/dynamics/SimpleHelicopter.h @@ -36,7 +36,7 @@ class Reconstruction : public NoiseModelFactor3 { virtual ~Reconstruction() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new Reconstruction(*this))); } @@ -44,7 +44,7 @@ class Reconstruction : public NoiseModelFactor3 { Vector evaluateError(const Pose3& gk1, const Pose3& gk, const Vector6& xik, boost::optional H1 = boost::none, boost::optional H2 = boost::none, - boost::optional H3 = boost::none) const { + boost::optional H3 = boost::none) const override { Matrix6 D_exphxi_xi; Pose3 exphxi = Pose3::Expmap(h_ * xik, H3 ? &D_exphxi_xi : 0); @@ -98,7 +98,7 @@ class DiscreteEulerPoincareHelicopter : public NoiseModelFactor3( gtsam::NonlinearFactor::shared_ptr(new DiscreteEulerPoincareHelicopter(*this))); } @@ -110,7 +110,7 @@ class DiscreteEulerPoincareHelicopter : public NoiseModelFactor3 H1 = boost::none, boost::optional H2 = boost::none, - boost::optional H3 = boost::none) const { + boost::optional H3 = boost::none) const override { Vector muk = Inertia_*xik; Vector muk_1 = Inertia_*xik_1; diff --git a/gtsam_unstable/dynamics/VelocityConstraint.h b/gtsam_unstable/dynamics/VelocityConstraint.h index ed3797015..986d8e271 100644 --- a/gtsam_unstable/dynamics/VelocityConstraint.h +++ b/gtsam_unstable/dynamics/VelocityConstraint.h @@ -73,16 +73,16 @@ class VelocityConstraint : public gtsam::NoiseModelFactor2 { virtual ~VelocityConstraint() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new VelocityConstraint(*this))); } /** * Calculates the error for trapezoidal model given */ - virtual gtsam::Vector evaluateError(const PoseRTV& x1, const PoseRTV& x2, + gtsam::Vector evaluateError(const PoseRTV& x1, const PoseRTV& x2, boost::optional H1=boost::none, - boost::optional H2=boost::none) const { + boost::optional H2=boost::none) const override { if (H1) *H1 = gtsam::numericalDerivative21( boost::bind(VelocityConstraint::evaluateError_, _1, _2, dt_, integration_mode_), x1, x2, 1e-5); if (H2) *H2 = gtsam::numericalDerivative22( @@ -90,7 +90,7 @@ class VelocityConstraint : public gtsam::NoiseModelFactor2 { return evaluateError_(x1, x2, dt_, integration_mode_); } - virtual void print(const std::string& s = "", const gtsam::KeyFormatter& formatter = gtsam::DefaultKeyFormatter) const { + void print(const std::string& s = "", const gtsam::KeyFormatter& formatter = gtsam::DefaultKeyFormatter) const override { std::string a = "VelocityConstraint: " + s; Base::print(a, formatter); switch(integration_mode_) { diff --git a/gtsam_unstable/dynamics/VelocityConstraint3.h b/gtsam_unstable/dynamics/VelocityConstraint3.h index 721d0265b..f6cd8ccc4 100644 --- a/gtsam_unstable/dynamics/VelocityConstraint3.h +++ b/gtsam_unstable/dynamics/VelocityConstraint3.h @@ -31,7 +31,7 @@ class VelocityConstraint3 : public NoiseModelFactor3 { virtual ~VelocityConstraint3() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new VelocityConstraint3(*this))); } @@ -39,7 +39,7 @@ class VelocityConstraint3 : public NoiseModelFactor3 { Vector evaluateError(const double& x1, const double& x2, const double& v, boost::optional H1 = boost::none, boost::optional H2 = boost::none, - boost::optional H3 = boost::none) const { + boost::optional H3 = boost::none) const override { const size_t p = 1; if (H1) *H1 = Matrix::Identity(p,p); if (H2) *H2 = -Matrix::Identity(p,p); diff --git a/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp b/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp index 939d3a5c8..1494d784b 100644 --- a/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp +++ b/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp @@ -308,11 +308,11 @@ int main(int argc, char** argv) { // And to demonstrate the fixed-lag aspect, print the keys contained in each smoother after 3.0 seconds cout << "After 15.0 seconds, each version contains to the following keys:" << endl; cout << " Concurrent Filter Keys: " << endl; - for(const auto& key_value: concurrentFilter.getLinearizationPoint()) { + for(const auto key_value: concurrentFilter.getLinearizationPoint()) { cout << setprecision(5) << " Key: " << key_value.key << endl; } cout << " Concurrent Smoother Keys: " << endl; - for(const auto& key_value: concurrentSmoother.getLinearizationPoint()) { + for(const auto key_value: concurrentSmoother.getLinearizationPoint()) { cout << setprecision(5) << " Key: " << key_value.key << endl; } cout << " Fixed-Lag Smoother Keys: " << endl; diff --git a/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp b/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp index 5fdc7a743..64afa8030 100644 --- a/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp +++ b/gtsam_unstable/examples/SmartRangeExample_plaza1.cpp @@ -188,7 +188,8 @@ int main(int argc, char** argv) { smartFactors[j]->addRange(i, range); printf("adding range %g for %d",range,(int)j); } catch (const invalid_argument& e) { - printf("warning: omitting duplicate range %g for %d",range,(int)j); + printf("warning: omitting duplicate range %g for %d: %s", range, + (int)j, e.what()); } cout << endl; } @@ -233,7 +234,7 @@ int main(int argc, char** argv) { } } countK = 0; - for(const Values::ConstFiltered::KeyValuePair& it: result.filter()) + for(const auto it: result.filter()) os2 << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t1" << endl; if (smart) { @@ -256,7 +257,7 @@ int main(int argc, char** argv) { // Write result to file Values result = isam.calculateEstimate(); ofstream os("rangeResult.txt"); - for(const Values::ConstFiltered::KeyValuePair& it: result.filter()) + for(const auto it: result.filter()) os << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t" << it.value.theta() << endl; exit(0); diff --git a/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp b/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp index 90b2a30ff..95aff85a7 100644 --- a/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp +++ b/gtsam_unstable/examples/SmartRangeExample_plaza2.cpp @@ -202,11 +202,11 @@ int main(int argc, char** argv) { // Write result to file Values result = isam.calculateEstimate(); ofstream os2("rangeResultLM.txt"); - for(const Values::ConstFiltered::KeyValuePair& it: result.filter()) + for(const auto it: result.filter()) os2 << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t1" << endl; ofstream os("rangeResult.txt"); - for(const Values::ConstFiltered::KeyValuePair& it: result.filter()) + for(const auto it: result.filter()) os << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t" << it.value.theta() << endl; exit(0); diff --git a/gtsam_unstable/geometry/Similarity3.cpp b/gtsam_unstable/geometry/Similarity3.cpp index d40fb0b59..819c51fee 100644 --- a/gtsam_unstable/geometry/Similarity3.cpp +++ b/gtsam_unstable/geometry/Similarity3.cpp @@ -19,9 +19,68 @@ #include #include +#include namespace gtsam { +using std::vector; +using PointPairs = vector; + +namespace { +/// Subtract centroids from point pairs. +static PointPairs subtractCentroids(const PointPairs &abPointPairs, + const Point3Pair ¢roids) { + PointPairs d_abPointPairs; + for (const Point3Pair& abPair : abPointPairs) { + Point3 da = abPair.first - centroids.first; + Point3 db = abPair.second - centroids.second; + d_abPointPairs.emplace_back(da, db); + } + return d_abPointPairs; +} + +/// Form inner products x and y and calculate scale. +static const double calculateScale(const PointPairs &d_abPointPairs, + const Rot3 &aRb) { + double x = 0, y = 0; + Point3 da, db; + for (const Point3Pair& d_abPair : d_abPointPairs) { + std::tie(da, db) = d_abPair; + const Vector3 da_prime = aRb * db; + y += da.transpose() * da_prime; + x += da_prime.transpose() * da_prime; + } + const double s = y / x; + return s; +} + +/// Form outer product H. +static Matrix3 calculateH(const PointPairs &d_abPointPairs) { + Matrix3 H = Z_3x3; + for (const Point3Pair& d_abPair : d_abPointPairs) { + H += d_abPair.first * d_abPair.second.transpose(); + } + return H; +} + +/// This method estimates the similarity transform from differences point pairs, given a known or estimated rotation and point centroids. +static Similarity3 align(const PointPairs &d_abPointPairs, const Rot3 &aRb, + const Point3Pair ¢roids) { + const double s = calculateScale(d_abPointPairs, aRb); + const Point3 aTb = (centroids.first - s * (aRb * centroids.second)) / s; + return Similarity3(aRb, aTb, s); +} + +/// This method estimates the similarity transform from point pairs, given a known or estimated rotation. +// Refer to: http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf Chapter 3 +static Similarity3 alignGivenR(const PointPairs &abPointPairs, + const Rot3 &aRb) { + auto centroids = means(abPointPairs); + auto d_abPointPairs = subtractCentroids(abPointPairs, centroids); + return align(d_abPointPairs, aRb, centroids); +} +} // namespace + Similarity3::Similarity3() : t_(0,0,0), s_(1) { } @@ -54,15 +113,15 @@ bool Similarity3::operator==(const Similarity3& other) const { void Similarity3::print(const std::string& s) const { std::cout << std::endl; std::cout << s; - rotation().print("R:\n"); - std::cout << "t: " << translation().transpose() << "s: " << scale() << std::endl; + rotation().print("\nR:\n"); + std::cout << "t: " << translation().transpose() << " s: " << scale() << std::endl; } Similarity3 Similarity3::identity() { return Similarity3(); } -Similarity3 Similarity3::operator*(const Similarity3& T) const { - return Similarity3(R_ * T.R_, ((1.0 / T.s_) * t_) + R_ * T.t_, s_ * T.s_); +Similarity3 Similarity3::operator*(const Similarity3& S) const { + return Similarity3(R_ * S.R_, ((1.0 / S.s_) * t_) + R_ * S.t_, s_ * S.s_); } Similarity3 Similarity3::inverse() const { @@ -85,11 +144,51 @@ Point3 Similarity3::transformFrom(const Point3& p, // return s_ * q; } +Pose3 Similarity3::transformFrom(const Pose3& T) const { + Rot3 R = R_.compose(T.rotation()); + Point3 t = Point3(s_ * (R_ * T.translation() + t_)); + return Pose3(R, t); +} + Point3 Similarity3::operator*(const Point3& p) const { return transformFrom(p); } -Matrix4 Similarity3::wedge(const Vector7& xi) { +Similarity3 Similarity3::Align(const PointPairs &abPointPairs) { + // Refer to Chapter 3 of + // http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2005/Zinsser05-PSR.pdf + if (abPointPairs.size() < 3) + throw std::runtime_error("input should have at least 3 pairs of points"); + auto centroids = means(abPointPairs); + auto d_abPointPairs = subtractCentroids(abPointPairs, centroids); + Matrix3 H = calculateH(d_abPointPairs); + // ClosestTo finds rotation matrix closest to H in Frobenius sense + Rot3 aRb = Rot3::ClosestTo(H); + return align(d_abPointPairs, aRb, centroids); +} + +Similarity3 Similarity3::Align(const vector &abPosePairs) { + const size_t n = abPosePairs.size(); + if (n < 2) + throw std::runtime_error("input should have at least 2 pairs of poses"); + + // calculate rotation + vector rotations; + PointPairs abPointPairs; + rotations.reserve(n); + abPointPairs.reserve(n); + Pose3 wTa, wTb; + for (const Pose3Pair &abPair : abPosePairs) { + std::tie(wTa, wTb) = abPair; + rotations.emplace_back(wTa.rotation().compose(wTb.rotation().inverse())); + abPointPairs.emplace_back(wTa.translation(), wTb.translation()); + } + const Rot3 aRb = FindKarcherMean(rotations); + + return alignGivenR(abPointPairs, aRb); +} + +Matrix4 Similarity3::wedge(const Vector7 &xi) { // http://www.ethaneade.org/latex2html/lie/node29.html const auto w = xi.head<3>(); const auto u = xi.segment<3>(3); @@ -128,12 +227,13 @@ Matrix3 Similarity3::GetV(Vector3 w, double lambda) { W = 1.0 / 24.0 - theta2 / 720.0; } const double lambda2 = lambda * lambda, lambda3 = lambda2 * lambda; + const double expMinLambda = exp(-lambda); double A, alpha = 0.0, beta, mu; if (lambda2 > 1e-9) { - A = (1.0 - exp(-lambda)) / lambda; + A = (1.0 - expMinLambda) / lambda; alpha = 1.0 / (1.0 + theta2 / lambda2); - beta = (exp(-lambda) - 1 + lambda) / lambda2; - mu = (1 - lambda + (0.5 * lambda2) - exp(-lambda)) / lambda3; + beta = (expMinLambda - 1 + lambda) / lambda2; + mu = (1 - lambda + (0.5 * lambda2) - expMinLambda) / lambda3; } else { A = 1.0 - lambda / 2.0 + lambda2 / 6.0; beta = 0.5 - lambda / 6.0 + lambda2 / 24.0 - lambda3 / 120.0; diff --git a/gtsam_unstable/geometry/Similarity3.h b/gtsam_unstable/geometry/Similarity3.h index bf4937ed4..b82862ddb 100644 --- a/gtsam_unstable/geometry/Similarity3.h +++ b/gtsam_unstable/geometry/Similarity3.h @@ -19,10 +19,12 @@ #include #include +#include #include #include #include + namespace gtsam { // Forward declarations @@ -87,7 +89,7 @@ class Similarity3: public LieGroup { GTSAM_UNSTABLE_EXPORT static Similarity3 identity(); /// Composition - GTSAM_UNSTABLE_EXPORT Similarity3 operator*(const Similarity3& T) const; + GTSAM_UNSTABLE_EXPORT Similarity3 operator*(const Similarity3& S) const; /// Return the inverse GTSAM_UNSTABLE_EXPORT Similarity3 inverse() const; @@ -101,9 +103,32 @@ class Similarity3: public LieGroup { OptionalJacobian<3, 7> H1 = boost::none, // OptionalJacobian<3, 3> H2 = boost::none) const; + /** + * Action on a pose T. + * |Rs ts| |R t| |Rs*R Rs*t+ts| + * |0 1/s| * |0 1| = | 0 1/s |, the result is still a Sim3 object. + * To retrieve a Pose3, we normalized the scale value into 1. + * |Rs*R Rs*t+ts| |Rs*R s(Rs*t+ts)| + * | 0 1/s | = | 0 1 | + * + * This group action satisfies the compatibility condition. + * For more details, refer to: https://en.wikipedia.org/wiki/Group_action + */ + GTSAM_UNSTABLE_EXPORT Pose3 transformFrom(const Pose3& T) const; + /** syntactic sugar for transformFrom */ GTSAM_UNSTABLE_EXPORT Point3 operator*(const Point3& p) const; + /** + * Create Similarity3 by aligning at least three point pairs + */ + GTSAM_UNSTABLE_EXPORT static Similarity3 Align(const std::vector& abPointPairs); + + /** + * Create Similarity3 by aligning at least two pose pairs + */ + GTSAM_UNSTABLE_EXPORT static Similarity3 Align(const std::vector& abPosePairs); + /// @} /// @name Lie Group /// @{ @@ -182,8 +207,8 @@ class Similarity3: public LieGroup { /// @name Helper functions /// @{ - /// Calculate expmap and logmap coefficients. private: + /// Calculate expmap and logmap coefficients. static Matrix3 GetV(Vector3 w, double lambda); /// @} diff --git a/gtsam_unstable/geometry/tests/testSimilarity3.cpp b/gtsam_unstable/geometry/tests/testSimilarity3.cpp index b07b5acd6..937761f02 100644 --- a/gtsam_unstable/geometry/tests/testSimilarity3.cpp +++ b/gtsam_unstable/geometry/tests/testSimilarity3.cpp @@ -51,6 +51,8 @@ static const Similarity3 T4(R, P, s); static const Similarity3 T5(R, P, 10); static const Similarity3 T6(Rot3(), Point3(1, 1, 0), 2); // Simpler transform +const double degree = M_PI / 180; + //****************************************************************************** TEST(Similarity3, Concepts) { BOOST_CONCEPT_ASSERT((IsGroup)); @@ -244,7 +246,7 @@ TEST(Similarity3, GroupAction) { &Similarity3::transformFrom, _1, _2, boost::none, boost::none); Point3 q(1, 2, 3); - for (const auto T : { T1, T2, T3, T4, T5, T6 }) { + for (const auto& T : { T1, T2, T3, T4, T5, T6 }) { Point3 q(1, 0, 0); Matrix H1 = numericalDerivative21(f, T, q); Matrix H2 = numericalDerivative22(f, T, q); @@ -255,6 +257,118 @@ TEST(Similarity3, GroupAction) { } } +//****************************************************************************** +// Group action on Pose3 +TEST(Similarity3, GroupActionPose3) { + Similarity3 bSa(Rot3::Ry(180 * degree), Point3(2, 3, 5), 2.0); + + // Create source poses + Pose3 Ta1(Rot3(), Point3(0, 0, 0)); + Pose3 Ta2(Rot3(-1, 0, 0, 0, -1, 0, 0, 0, 1), Point3(4, 0, 0)); + + // Create destination poses + Pose3 expected_Tb1(Rot3(-1, 0, 0, 0, 1, 0, 0, 0, -1), Point3(4, 6, 10)); + Pose3 expected_Tb2(Rot3(1, 0, 0, 0, -1, 0, 0, 0, -1), Point3(-4, 6, 10)); + + EXPECT(assert_equal(expected_Tb1, bSa.transformFrom(Ta1))); + EXPECT(assert_equal(expected_Tb2, bSa.transformFrom(Ta2))); +} + +// Test left group action compatibility. +// cSa*Ta = cSb*bSa*Ta +TEST(Similarity3, GroupActionPose3_Compatibility) { + Similarity3 bSa(Rot3::Ry(180 * degree), Point3(2, 3, 5), 2.0); + Similarity3 cSb(Rot3::Ry(90 * degree), Point3(-10, -4, 0), 3.0); + Similarity3 cSa(Rot3::Ry(270 * degree), Point3(0, 1, -2), 6.0); + + // Create poses + Pose3 Ta1(Rot3(), Point3(0, 0, 0)); + Pose3 Ta2(Rot3(-1, 0, 0, 0, -1, 0, 0, 0, 1), Point3(4, 0, 0)); + Pose3 Tb1(Rot3(-1, 0, 0, 0, 1, 0, 0, 0, -1), Point3(4, 6, 10)); + Pose3 Tb2(Rot3(1, 0, 0, 0, -1, 0, 0, 0, -1), Point3(-4, 6, 10)); + Pose3 Tc1(Rot3(0, 0, -1, 0, 1, 0, 1, 0, 0), Point3(0, 6, -12)); + Pose3 Tc2(Rot3(0, 0, -1, 0, -1, 0, -1, 0, 0), Point3(0, 6, 12)); + + EXPECT(assert_equal(Tc1, cSb.transformFrom(Tb1))); + EXPECT(assert_equal(Tc2, cSb.transformFrom(Tb2))); + + EXPECT(assert_equal(cSa.transformFrom(Ta1), cSb.transformFrom(Tb1))); + EXPECT(assert_equal(cSa.transformFrom(Ta2), cSb.transformFrom(Tb2))); +} + +//****************************************************************************** +// Align with Point3 Pairs +TEST(Similarity3, AlignPoint3_1) { + Similarity3 expected_aSb(Rot3::Rz(-90 * degree), Point3(3, 4, 5), 2.0); + + Point3 b1(0, 0, 0), b2(3, 0, 0), b3(3, 0, 4); + + Point3Pair ab1(make_pair(expected_aSb.transformFrom(b1), b1)); + Point3Pair ab2(make_pair(expected_aSb.transformFrom(b2), b2)); + Point3Pair ab3(make_pair(expected_aSb.transformFrom(b3), b3)); + + vector correspondences{ab1, ab2, ab3}; + + Similarity3 actual_aSb = Similarity3::Align(correspondences); + EXPECT(assert_equal(expected_aSb, actual_aSb)); +} + +TEST(Similarity3, AlignPoint3_2) { + Similarity3 expected_aSb(Rot3(), Point3(10, 10, 0), 1.0); + + Point3 b1(0, 0, 0), b2(20, 10, 0), b3(10, 20, 0); + + Point3Pair ab1(make_pair(expected_aSb.transformFrom(b1), b1)); + Point3Pair ab2(make_pair(expected_aSb.transformFrom(b2), b2)); + Point3Pair ab3(make_pair(expected_aSb.transformFrom(b3), b3)); + + vector correspondences{ab1, ab2, ab3}; + + Similarity3 actual_aSb = Similarity3::Align(correspondences); + EXPECT(assert_equal(expected_aSb, actual_aSb)); +} + +TEST(Similarity3, AlignPoint3_3) { + Similarity3 expected_aSb(Rot3::RzRyRx(0.3, 0.2, 0.1), Point3(20, 10, 5), 1.0); + + Point3 b1(0, 0, 1), b2(10, 0, 2), b3(20, -10, 30); + + Point3Pair ab1(make_pair(expected_aSb.transformFrom(b1), b1)); + Point3Pair ab2(make_pair(expected_aSb.transformFrom(b2), b2)); + Point3Pair ab3(make_pair(expected_aSb.transformFrom(b3), b3)); + + vector correspondences{ab1, ab2, ab3}; + + Similarity3 actual_aSb = Similarity3::Align(correspondences); + EXPECT(assert_equal(expected_aSb, actual_aSb)); +} + +//****************************************************************************** +// Align with Pose3 Pairs +TEST(Similarity3, AlignPose3) { + Similarity3 expected_aSb(Rot3::Ry(180 * degree), Point3(2, 3, 5), 2.0); + + // Create source poses + Pose3 Ta1(Rot3(), Point3(0, 0, 0)); + Pose3 Ta2(Rot3(-1, 0, 0, 0, -1, 0, 0, 0, 1), Point3(4, 0, 0)); + + // Create destination poses + Pose3 Tb1(Rot3(-1, 0, 0, 0, 1, 0, 0, 0, -1), Point3(4, 6, 10)); + Pose3 Tb2(Rot3(1, 0, 0, 0, -1, 0, 0, 0, -1), Point3(-4, 6, 10)); + + Pose3Pair bTa1(make_pair(Tb1, Ta1)); + Pose3Pair bTa2(make_pair(Tb2, Ta2)); + + vector correspondences{bTa1, bTa2}; + + // Cayley transform cannot accommodate 180 degree rotations, + // hence we only test for Expmap +#ifdef GTSAM_ROT3_EXPMAP + Similarity3 actual_aSb = Similarity3::Align(correspondences); + EXPECT(assert_equal(expected_aSb, actual_aSb)); +#endif +} + //****************************************************************************** // Test very simple prior optimization example TEST(Similarity3, Optimization) { diff --git a/gtsam_unstable/gtsam_unstable.h b/gtsam_unstable/gtsam_unstable.i similarity index 97% rename from gtsam_unstable/gtsam_unstable.h rename to gtsam_unstable/gtsam_unstable.i index ef2d16bf0..e18d32b59 100644 --- a/gtsam_unstable/gtsam_unstable.h +++ b/gtsam_unstable/gtsam_unstable.i @@ -5,17 +5,19 @@ // specify the classes from gtsam we are using virtual class gtsam::Value; class gtsam::Vector6; -class gtsam::LieScalar; -class gtsam::LieVector; class gtsam::Point2; class gtsam::Point2Vector; class gtsam::Rot2; class gtsam::Pose2; class gtsam::Point3; +class gtsam::SO3; +class gtsam::SO4; +class gtsam::SOn; class gtsam::Rot3; class gtsam::Pose3; virtual class gtsam::noiseModel::Base; virtual class gtsam::noiseModel::Gaussian; +virtual class gtsam::noiseModel::Isotropic; virtual class gtsam::imuBias::ConstantBias; virtual class gtsam::NonlinearFactor; virtual class gtsam::NoiseModelFactor; @@ -39,6 +41,7 @@ class gtsam::KeyVector; class gtsam::LevenbergMarquardtParams; class gtsam::ISAM2Params; class gtsam::GaussianDensity; +class gtsam::LevenbergMarquardtOptimizer; namespace gtsam { @@ -282,7 +285,6 @@ virtual class PriorFactor : gtsam::NoiseModelFactor { void serializable() const; // enabling serialization functionality }; - #include template virtual class BetweenFactor : gtsam::NoiseModelFactor { @@ -472,14 +474,12 @@ virtual class DGroundConstraint : gtsam::NonlinearFactor { DGroundConstraint(size_t key, Vector constraint, const gtsam::noiseModel::Base* model); }; -#include - #include virtual class VelocityConstraint3 : gtsam::NonlinearFactor { /** Standard constructor */ VelocityConstraint3(size_t key1, size_t key2, size_t velKey, double dt); - Vector evaluateError(const gtsam::LieScalar& x1, const gtsam::LieScalar& x2, const gtsam::LieScalar& v) const; + Vector evaluateError(const double& x1, const double& x2, const double& v) const; }; #include @@ -487,7 +487,7 @@ virtual class PendulumFactor1 : gtsam::NonlinearFactor { /** Standard constructor */ PendulumFactor1(size_t k1, size_t k, size_t velKey, double dt); - Vector evaluateError(const gtsam::LieScalar& qk1, const gtsam::LieScalar& qk, const gtsam::LieScalar& v) const; + Vector evaluateError(const double& qk1, const double& qk, const double& v) const; }; #include @@ -495,21 +495,21 @@ virtual class PendulumFactor2 : gtsam::NonlinearFactor { /** Standard constructor */ PendulumFactor2(size_t vk1, size_t vk, size_t qKey, double dt, double L, double g); - Vector evaluateError(const gtsam::LieScalar& vk1, const gtsam::LieScalar& vk, const gtsam::LieScalar& q) const; + Vector evaluateError(const double& vk1, const double& vk, const double& q) const; }; virtual class PendulumFactorPk : gtsam::NonlinearFactor { /** Standard constructor */ PendulumFactorPk(size_t pk, size_t qk, size_t qk1, double h, double m, double r, double g, double alpha); - Vector evaluateError(const gtsam::LieScalar& pk, const gtsam::LieScalar& qk, const gtsam::LieScalar& qk1) const; + Vector evaluateError(const double& pk, const double& qk, const double& qk1) const; }; virtual class PendulumFactorPk1 : gtsam::NonlinearFactor { /** Standard constructor */ PendulumFactorPk1(size_t pk1, size_t qk, size_t qk1, double h, double m, double r, double g, double alpha); - Vector evaluateError(const gtsam::LieScalar& pk1, const gtsam::LieScalar& qk, const gtsam::LieScalar& qk1) const; + Vector evaluateError(const double& pk1, const double& qk, const double& qk1) const; }; #include diff --git a/gtsam_unstable/linear/ActiveSetSolver-inl.h b/gtsam_unstable/linear/ActiveSetSolver-inl.h index 602012090..12374ac76 100644 --- a/gtsam_unstable/linear/ActiveSetSolver-inl.h +++ b/gtsam_unstable/linear/ActiveSetSolver-inl.h @@ -149,7 +149,7 @@ Template JacobianFactor::shared_ptr This::createDualFactor( // to compute the least-square approximation of dual variables return boost::make_shared(Aterms, b); } else { - return boost::make_shared(); + return nullptr; } } @@ -165,14 +165,13 @@ Template JacobianFactor::shared_ptr This::createDualFactor( * if lambda = 0 you are on the constraint * if lambda > 0 you are violating the constraint. */ -Template GaussianFactorGraph::shared_ptr This::buildDualGraph( +Template GaussianFactorGraph This::buildDualGraph( const InequalityFactorGraph& workingSet, const VectorValues& delta) const { - GaussianFactorGraph::shared_ptr dualGraph(new GaussianFactorGraph()); + GaussianFactorGraph dualGraph; for (Key key : constrainedKeys_) { // Each constrained key becomes a factor in the dual graph - JacobianFactor::shared_ptr dualFactor = - createDualFactor(key, workingSet, delta); - if (!dualFactor->empty()) dualGraph->push_back(dualFactor); + auto dualFactor = createDualFactor(key, workingSet, delta); + if (dualFactor) dualGraph.push_back(dualFactor); } return dualGraph; } @@ -193,19 +192,16 @@ This::buildWorkingGraph(const InequalityFactorGraph& workingSet, Template typename This::State This::iterate( const typename This::State& state) const { // Algorithm 16.3 from Nocedal06book. - // Solve with the current working set eqn 16.39, but instead of solving for p - // solve for x - GaussianFactorGraph workingGraph = - buildWorkingGraph(state.workingSet, state.values); + // Solve with the current working set eqn 16.39, but solve for x not p + auto workingGraph = buildWorkingGraph(state.workingSet, state.values); VectorValues newValues = workingGraph.optimize(); // If we CAN'T move further // if p_k = 0 is the original condition, modified by Duy to say that the state // update is zero. if (newValues.equals(state.values, 1e-7)) { // Compute lambda from the dual graph - GaussianFactorGraph::shared_ptr dualGraph = buildDualGraph(state.workingSet, - newValues); - VectorValues duals = dualGraph->optimize(); + auto dualGraph = buildDualGraph(state.workingSet, newValues); + VectorValues duals = dualGraph.optimize(); int leavingFactor = identifyLeavingConstraint(state.workingSet, duals); // If all inequality constraints are satisfied: We have the solution!! if (leavingFactor < 0) { diff --git a/gtsam_unstable/linear/ActiveSetSolver.h b/gtsam_unstable/linear/ActiveSetSolver.h index 8c3c5a7e5..318912cf3 100644 --- a/gtsam_unstable/linear/ActiveSetSolver.h +++ b/gtsam_unstable/linear/ActiveSetSolver.h @@ -154,8 +154,8 @@ class ActiveSetSolver { public: /// Just for testing... /// Builds a dual graph from the current working set. - GaussianFactorGraph::shared_ptr buildDualGraph( - const InequalityFactorGraph& workingSet, const VectorValues& delta) const; + GaussianFactorGraph buildDualGraph(const InequalityFactorGraph &workingSet, + const VectorValues &delta) const; /** * Build a working graph of cost, equality and active inequality constraints diff --git a/gtsam_unstable/linear/EqualityFactorGraph.h b/gtsam_unstable/linear/EqualityFactorGraph.h index 43befdbe0..fb3f4c076 100644 --- a/gtsam_unstable/linear/EqualityFactorGraph.h +++ b/gtsam_unstable/linear/EqualityFactorGraph.h @@ -31,6 +31,11 @@ class EqualityFactorGraph: public FactorGraph { public: typedef boost::shared_ptr shared_ptr; + /// Add a linear inequality, forwards arguments to LinearInequality. + template void add(Args &&... args) { + emplace_shared(std::forward(args)...); + } + /// Compute error of a guess. double error(const VectorValues& x) const { double total_error = 0.; diff --git a/gtsam_unstable/linear/InequalityFactorGraph.h b/gtsam_unstable/linear/InequalityFactorGraph.h index c87645697..d042b0436 100644 --- a/gtsam_unstable/linear/InequalityFactorGraph.h +++ b/gtsam_unstable/linear/InequalityFactorGraph.h @@ -47,6 +47,11 @@ class InequalityFactorGraph: public FactorGraph { return Base::equals(other, tol); } + /// Add a linear inequality, forwards arguments to LinearInequality. + template void add(Args &&... args) { + emplace_shared(std::forward(args)...); + } + /** * Compute error of a guess. * Infinity error if it violates an inequality; zero otherwise. */ diff --git a/gtsam_unstable/linear/InfeasibleInitialValues.h b/gtsam_unstable/linear/InfeasibleInitialValues.h index 60adb872e..6f589d0c3 100644 --- a/gtsam_unstable/linear/InfeasibleInitialValues.h +++ b/gtsam_unstable/linear/InfeasibleInitialValues.h @@ -29,10 +29,10 @@ class InfeasibleInitialValues: public ThreadsafeException< InfeasibleInitialValues() { } - virtual ~InfeasibleInitialValues() throw () { + virtual ~InfeasibleInitialValues() noexcept { } - virtual const char *what() const throw () { + const char *what() const noexcept override { if (description_.empty()) description_ = "An infeasible initial value was provided for the solver.\n"; diff --git a/gtsam_unstable/linear/InfeasibleOrUnboundedProblem.h b/gtsam_unstable/linear/InfeasibleOrUnboundedProblem.h index 2184e9f52..25742d61f 100644 --- a/gtsam_unstable/linear/InfeasibleOrUnboundedProblem.h +++ b/gtsam_unstable/linear/InfeasibleOrUnboundedProblem.h @@ -25,10 +25,10 @@ class InfeasibleOrUnboundedProblem: public ThreadsafeException< public: InfeasibleOrUnboundedProblem() { } - virtual ~InfeasibleOrUnboundedProblem() throw () { + virtual ~InfeasibleOrUnboundedProblem() noexcept { } - virtual const char* what() const throw () { + const char* what() const noexcept override { if (description_.empty()) description_ = "The problem is either infeasible or unbounded.\n"; return description_.c_str(); diff --git a/gtsam_unstable/linear/LPInitSolver.h b/gtsam_unstable/linear/LPInitSolver.h index 4eb672fbc..14e5fb000 100644 --- a/gtsam_unstable/linear/LPInitSolver.h +++ b/gtsam_unstable/linear/LPInitSolver.h @@ -21,7 +21,6 @@ #include #include -#include namespace gtsam { /** @@ -83,7 +82,7 @@ class LPInitSolver { const InequalityFactorGraph& inequalities) const; // friend class for unit-testing private methods - FRIEND_TEST(LPInitSolver, initialization); + friend class LPInitSolverInitializationTest; }; } diff --git a/gtsam_unstable/linear/LinearCost.h b/gtsam_unstable/linear/LinearCost.h index b489510af..c22c389be 100644 --- a/gtsam_unstable/linear/LinearCost.h +++ b/gtsam_unstable/linear/LinearCost.h @@ -88,18 +88,18 @@ class LinearCost: public JacobianFactor { } /** equals */ - virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const { + bool equals(const GaussianFactor& lf, double tol = 1e-9) const override { return Base::equals(lf, tol); } /** print */ - virtual void print(const std::string& s = "", const KeyFormatter& formatter = - DefaultKeyFormatter) const { + void print(const std::string& s = "", const KeyFormatter& formatter = + DefaultKeyFormatter) const override { Base::print(s + " LinearCost: ", formatter); } /** Clone this LinearCost */ - virtual GaussianFactor::shared_ptr clone() const { + GaussianFactor::shared_ptr clone() const override { return boost::static_pointer_cast < GaussianFactor > (boost::make_shared < LinearCost > (*this)); } @@ -110,7 +110,7 @@ class LinearCost: public JacobianFactor { } /** Special error for single-valued inequality constraints. */ - virtual double error(const VectorValues& c) const { + double error(const VectorValues& c) const override { return error_vector(c)[0]; } }; diff --git a/gtsam_unstable/linear/LinearEquality.h b/gtsam_unstable/linear/LinearEquality.h index 2463ef31c..d960d8336 100644 --- a/gtsam_unstable/linear/LinearEquality.h +++ b/gtsam_unstable/linear/LinearEquality.h @@ -89,18 +89,18 @@ class LinearEquality: public JacobianFactor { } /** equals */ - virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const { + bool equals(const GaussianFactor& lf, double tol = 1e-9) const override { return Base::equals(lf, tol); } /** print */ - virtual void print(const std::string& s = "", const KeyFormatter& formatter = - DefaultKeyFormatter) const { + void print(const std::string& s = "", const KeyFormatter& formatter = + DefaultKeyFormatter) const override { Base::print(s, formatter); } /** Clone this LinearEquality */ - virtual GaussianFactor::shared_ptr clone() const { + GaussianFactor::shared_ptr clone() const override { return boost::static_pointer_cast < GaussianFactor > (boost::make_shared < LinearEquality > (*this)); } @@ -124,7 +124,7 @@ class LinearEquality: public JacobianFactor { * I think it should be zero, as this function is meant for objective cost. * But the name "error" can be misleading. * TODO: confirm with Frank!! */ - virtual double error(const VectorValues& c) const { + double error(const VectorValues& c) const override { return 0.0; } diff --git a/gtsam_unstable/linear/LinearInequality.h b/gtsam_unstable/linear/LinearInequality.h index 1a31bd4e4..d353afc46 100644 --- a/gtsam_unstable/linear/LinearInequality.h +++ b/gtsam_unstable/linear/LinearInequality.h @@ -100,13 +100,13 @@ class LinearInequality: public JacobianFactor { } /** equals */ - virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const { + bool equals(const GaussianFactor& lf, double tol = 1e-9) const override { return Base::equals(lf, tol); } /** print */ - virtual void print(const std::string& s = "", const KeyFormatter& formatter = - DefaultKeyFormatter) const { + void print(const std::string& s = "", const KeyFormatter& formatter = + DefaultKeyFormatter) const override { if (active()) Base::print(s + " Active", formatter); else @@ -114,7 +114,7 @@ class LinearInequality: public JacobianFactor { } /** Clone this LinearInequality */ - virtual GaussianFactor::shared_ptr clone() const { + GaussianFactor::shared_ptr clone() const override { return boost::static_pointer_cast < GaussianFactor > (boost::make_shared < LinearInequality > (*this)); } @@ -145,7 +145,7 @@ class LinearInequality: public JacobianFactor { } /** Special error for single-valued inequality constraints. */ - virtual double error(const VectorValues& c) const { + double error(const VectorValues& c) const override { return error_vector(c)[0]; } diff --git a/gtsam_unstable/linear/QPSParser.cpp b/gtsam_unstable/linear/QPSParser.cpp index 3039185f2..df21c0132 100644 --- a/gtsam_unstable/linear/QPSParser.cpp +++ b/gtsam_unstable/linear/QPSParser.cpp @@ -81,7 +81,7 @@ class QPSVisitor { varname_to_key; // Variable QPS string name to key std::unordered_map> H; // H from hessian - double f; // Constant term of quadratic cost + double f = 0; // Constant term of quadratic cost std::string obj_name; // the objective function has a name in the QPS std::string name_; // the quadratic program has a name in the QPS std::unordered_map @@ -175,10 +175,11 @@ class QPSVisitor { string var_ = fromChars<1>(vars); string row_ = fromChars<3>(vars); double coefficient = at_c<5>(vars); - if (row_ == obj_name) + if (row_ == obj_name) { f = -coefficient; - else + } else { b[row_] = coefficient; + } if (debug) { cout << "Added RHS for Var: " << var_ << " Row: " << row_ @@ -194,15 +195,17 @@ class QPSVisitor { string row2_ = fromChars<7>(vars); double coefficient1 = at_c<5>(vars); double coefficient2 = at_c<9>(vars); - if (row1_ == obj_name) + if (row1_ == obj_name) { f = -coefficient1; - else + } else { b[row1_] = coefficient1; + } - if (row2_ == obj_name) + if (row2_ == obj_name) { f = -coefficient2; - else + } else { b[row2_] = coefficient2; + } if (debug) { cout << "Added RHS for Var: " << var_ << " Row: " << row1_ diff --git a/gtsam_unstable/linear/QPSParserException.h b/gtsam_unstable/linear/QPSParserException.h index ed4c79bdd..ef14ed430 100644 --- a/gtsam_unstable/linear/QPSParserException.h +++ b/gtsam_unstable/linear/QPSParserException.h @@ -25,10 +25,10 @@ class QPSParserException: public ThreadsafeException { QPSParserException() { } - virtual ~QPSParserException() throw () { + virtual ~QPSParserException() noexcept { } - virtual const char *what() const throw () { + const char *what() const noexcept override { if (description_.empty()) description_ = "There is a problem parsing the QPS file.\n"; return description_.c_str(); diff --git a/gtsam_unstable/linear/tests/testLPSolver.cpp b/gtsam_unstable/linear/tests/testLPSolver.cpp index a105a39f0..53c8c7618 100644 --- a/gtsam_unstable/linear/tests/testLPSolver.cpp +++ b/gtsam_unstable/linear/tests/testLPSolver.cpp @@ -16,21 +16,23 @@ * @author Duy-Nguyen Ta */ +#include +#include + #include -#include #include -#include +#include #include +#include #include #include #include + #include + #include #include -#include -#include - using namespace std; using namespace gtsam; using namespace gtsam::symbol_shorthand; @@ -47,37 +49,27 @@ static const Vector kOne = Vector::Ones(1), kZero = Vector::Zero(1); */ LP simpleLP1() { LP lp; - lp.cost = LinearCost(1, Vector2(-1., -1.)); // min -x1-x2 (max x1+x2) - lp.inequalities.push_back( - LinearInequality(1, Vector2(-1, 0), 0, 1)); // x1 >= 0 - lp.inequalities.push_back( - LinearInequality(1, Vector2(0, -1), 0, 2)); // x2 >= 0 - lp.inequalities.push_back( - LinearInequality(1, Vector2(1, 2), 4, 3)); // x1 + 2*x2 <= 4 - lp.inequalities.push_back( - LinearInequality(1, Vector2(4, 2), 12, 4)); // 4x1 + 2x2 <= 12 - lp.inequalities.push_back( - LinearInequality(1, Vector2(-1, 1), 1, 5)); // -x1 + x2 <= 1 + lp.cost = LinearCost(1, Vector2(-1., -1.)); // min -x1-x2 (max x1+x2) + lp.inequalities.add(1, Vector2(-1, 0), 0, 1); // x1 >= 0 + lp.inequalities.add(1, Vector2(0, -1), 0, 2); // x2 >= 0 + lp.inequalities.add(1, Vector2(1, 2), 4, 3); // x1 + 2*x2 <= 4 + lp.inequalities.add(1, Vector2(4, 2), 12, 4); // 4x1 + 2x2 <= 12 + lp.inequalities.add(1, Vector2(-1, 1), 1, 5); // -x1 + x2 <= 1 return lp; } /* ************************************************************************* */ namespace gtsam { -TEST(LPInitSolver, infinite_loop_single_var) { - LP initchecker; - initchecker.cost = LinearCost(1, Vector3(0, 0, 1)); // min alpha - initchecker.inequalities.push_back( - LinearInequality(1, Vector3(-2, -1, -1), -2, 1)); //-2x-y-alpha <= -2 - initchecker.inequalities.push_back( - LinearInequality(1, Vector3(-1, 2, -1), 6, 2)); // -x+2y-alpha <= 6 - initchecker.inequalities.push_back( - LinearInequality(1, Vector3(-1, 0, -1), 0, 3)); // -x - alpha <= 0 - initchecker.inequalities.push_back( - LinearInequality(1, Vector3(1, 0, -1), 20, 4)); // x - alpha <= 20 - initchecker.inequalities.push_back( - LinearInequality(1, Vector3(0, -1, -1), 0, 5)); // -y - alpha <= 0 - LPSolver solver(initchecker); +TEST(LPInitSolver, InfiniteLoopSingleVar) { + LP lp; + lp.cost = LinearCost(1, Vector3(0, 0, 1)); // min alpha + lp.inequalities.add(1, Vector3(-2, -1, -1), -2, 1); //-2x-y-a <= -2 + lp.inequalities.add(1, Vector3(-1, 2, -1), 6, 2); // -x+2y-a <= 6 + lp.inequalities.add(1, Vector3(-1, 0, -1), 0, 3); // -x - a <= 0 + lp.inequalities.add(1, Vector3(1, 0, -1), 20, 4); // x - a <= 20 + lp.inequalities.add(1, Vector3(0, -1, -1), 0, 5); // -y - a <= 0 + LPSolver solver(lp); VectorValues starter; starter.insert(1, Vector3(0, 0, 2)); VectorValues results, duals; @@ -87,25 +79,23 @@ TEST(LPInitSolver, infinite_loop_single_var) { CHECK(assert_equal(results, expected, 1e-7)); } -TEST(LPInitSolver, infinite_loop_multi_var) { - LP initchecker; +TEST(LPInitSolver, InfiniteLoopMultiVar) { + LP lp; Key X = symbol('X', 1); Key Y = symbol('Y', 1); Key Z = symbol('Z', 1); - initchecker.cost = LinearCost(Z, kOne); // min alpha - initchecker.inequalities.push_back( - LinearInequality(X, -2.0 * kOne, Y, -1.0 * kOne, Z, -1.0 * kOne, -2, - 1)); //-2x-y-alpha <= -2 - initchecker.inequalities.push_back( - LinearInequality(X, -1.0 * kOne, Y, 2.0 * kOne, Z, -1.0 * kOne, 6, - 2)); // -x+2y-alpha <= 6 - initchecker.inequalities.push_back(LinearInequality( - X, -1.0 * kOne, Z, -1.0 * kOne, 0, 3)); // -x - alpha <= 0 - initchecker.inequalities.push_back(LinearInequality( - X, 1.0 * kOne, Z, -1.0 * kOne, 20, 4)); // x - alpha <= 20 - initchecker.inequalities.push_back(LinearInequality( - Y, -1.0 * kOne, Z, -1.0 * kOne, 0, 5)); // -y - alpha <= 0 - LPSolver solver(initchecker); + lp.cost = LinearCost(Z, kOne); // min alpha + lp.inequalities.add(X, -2.0 * kOne, Y, -1.0 * kOne, Z, -1.0 * kOne, -2, + 1); //-2x-y-alpha <= -2 + lp.inequalities.add(X, -1.0 * kOne, Y, 2.0 * kOne, Z, -1.0 * kOne, 6, + 2); // -x+2y-alpha <= 6 + lp.inequalities.add(X, -1.0 * kOne, Z, -1.0 * kOne, 0, + 3); // -x - alpha <= 0 + lp.inequalities.add(X, 1.0 * kOne, Z, -1.0 * kOne, 20, + 4); // x - alpha <= 20 + lp.inequalities.add(Y, -1.0 * kOne, Z, -1.0 * kOne, 0, + 5); // -y - alpha <= 0 + LPSolver solver(lp); VectorValues starter; starter.insert(X, kZero); starter.insert(Y, kZero); @@ -119,7 +109,7 @@ TEST(LPInitSolver, infinite_loop_multi_var) { CHECK(assert_equal(results, expected, 1e-7)); } -TEST(LPInitSolver, initialization) { +TEST(LPInitSolver, Initialization) { LP lp = simpleLP1(); LPInitSolver initSolver(lp); @@ -138,19 +128,19 @@ TEST(LPInitSolver, initialization) { LP::shared_ptr initLP = initSolver.buildInitialLP(yKey); LP expectedInitLP; expectedInitLP.cost = LinearCost(yKey, kOne); - expectedInitLP.inequalities.push_back(LinearInequality( - 1, Vector2(-1, 0), 2, Vector::Constant(1, -1), 0, 1)); // -x1 - y <= 0 - expectedInitLP.inequalities.push_back(LinearInequality( - 1, Vector2(0, -1), 2, Vector::Constant(1, -1), 0, 2)); // -x2 - y <= 0 - expectedInitLP.inequalities.push_back( - LinearInequality(1, Vector2(1, 2), 2, Vector::Constant(1, -1), 4, - 3)); // x1 + 2*x2 - y <= 4 - expectedInitLP.inequalities.push_back( - LinearInequality(1, Vector2(4, 2), 2, Vector::Constant(1, -1), 12, - 4)); // 4x1 + 2x2 - y <= 12 - expectedInitLP.inequalities.push_back( - LinearInequality(1, Vector2(-1, 1), 2, Vector::Constant(1, -1), 1, - 5)); // -x1 + x2 - y <= 1 + expectedInitLP.inequalities.add(1, Vector2(-1, 0), 2, Vector::Constant(1, -1), + 0, 1); // -x1 - y <= 0 + expectedInitLP.inequalities.add(1, Vector2(0, -1), 2, Vector::Constant(1, -1), + 0, 2); // -x2 - y <= 0 + expectedInitLP.inequalities.add(1, Vector2(1, 2), 2, Vector::Constant(1, -1), + 4, + 3); // x1 + 2*x2 - y <= 4 + expectedInitLP.inequalities.add(1, Vector2(4, 2), 2, Vector::Constant(1, -1), + 12, + 4); // 4x1 + 2x2 - y <= 12 + expectedInitLP.inequalities.add(1, Vector2(-1, 1), 2, Vector::Constant(1, -1), + 1, + 5); // -x1 + x2 - y <= 1 CHECK(assert_equal(expectedInitLP, *initLP, 1e-10)); LPSolver lpSolveInit(*initLP); VectorValues xy0(x0); @@ -164,7 +154,7 @@ TEST(LPInitSolver, initialization) { VectorValues x = initSolver.solve(); CHECK(lp.isFeasible(x)); } -} +} // namespace gtsam /* ************************************************************************* */ /** @@ -173,28 +163,24 @@ TEST(LPInitSolver, initialization) { * x - y = 5 * x + 2y = 6 */ -TEST(LPSolver, overConstrainedLinearSystem) { +TEST(LPSolver, OverConstrainedLinearSystem) { GaussianFactorGraph graph; Matrix A1 = Vector3(1, 1, 1); Matrix A2 = Vector3(1, -1, 2); Vector b = Vector3(1, 5, 6); - JacobianFactor factor(1, A1, 2, A2, b, noiseModel::Constrained::All(3)); - graph.push_back(factor); + graph.add(1, A1, 2, A2, b, noiseModel::Constrained::All(3)); VectorValues x = graph.optimize(); // This check confirms that gtsam linear constraint solver can't handle // over-constrained system - CHECK(factor.error(x) != 0.0); + CHECK(graph[0]->error(x) != 0.0); } TEST(LPSolver, overConstrainedLinearSystem2) { GaussianFactorGraph graph; - graph.emplace_shared(1, I_1x1, 2, I_1x1, kOne, - noiseModel::Constrained::All(1)); - graph.emplace_shared(1, I_1x1, 2, -I_1x1, 5 * kOne, - noiseModel::Constrained::All(1)); - graph.emplace_shared(1, I_1x1, 2, 2 * I_1x1, 6 * kOne, - noiseModel::Constrained::All(1)); + graph.add(1, I_1x1, 2, I_1x1, kOne, noiseModel::Constrained::All(1)); + graph.add(1, I_1x1, 2, -I_1x1, 5 * kOne, noiseModel::Constrained::All(1)); + graph.add(1, I_1x1, 2, 2 * I_1x1, 6 * kOne, noiseModel::Constrained::All(1)); VectorValues x = graph.optimize(); // This check confirms that gtsam linear constraint solver can't handle // over-constrained system @@ -202,7 +188,7 @@ TEST(LPSolver, overConstrainedLinearSystem2) { } /* ************************************************************************* */ -TEST(LPSolver, simpleTest1) { +TEST(LPSolver, SimpleTest1) { LP lp = simpleLP1(); LPSolver lpSolver(lp); VectorValues init; @@ -222,7 +208,7 @@ TEST(LPSolver, simpleTest1) { } /* ************************************************************************* */ -TEST(LPSolver, testWithoutInitialValues) { +TEST(LPSolver, TestWithoutInitialValues) { LP lp = simpleLP1(); LPSolver lpSolver(lp); VectorValues result, duals, expectedResult; diff --git a/gtsam_unstable/linear/tests/testQPSolver.cpp b/gtsam_unstable/linear/tests/testQPSolver.cpp index 2292c63d7..67a0c971e 100644 --- a/gtsam_unstable/linear/tests/testQPSolver.cpp +++ b/gtsam_unstable/linear/tests/testQPSolver.cpp @@ -17,10 +17,12 @@ * @author Ivan Dario Jimenez */ +#include +#include + #include #include -#include -#include + #include using namespace std; @@ -40,15 +42,15 @@ QP createTestCase() { // Hence, we have G11=2, G12 = -1, g1 = +3, G22 = 2, g2 = 0, f = 10 //TODO: THIS TEST MIGHT BE WRONG : the last parameter might be 5 instead of 10 because the form of the equation // Should be 0.5x'Gx + gx + f : Nocedal 449 - qp.cost.push_back( - HessianFactor(X(1), X(2), 2.0 * I_1x1, -I_1x1, 3.0 * I_1x1, 2.0 * I_1x1, - Z_1x1, 10.0)); + qp.cost.push_back(HessianFactor(X(1), X(2), 2.0 * I_1x1, -I_1x1, 3.0 * I_1x1, + 2.0 * I_1x1, Z_1x1, 10.0)); // Inequality constraints - qp.inequalities.push_back(LinearInequality(X(1), I_1x1, X(2), I_1x1, 2, 0)); // x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2 - qp.inequalities.push_back(LinearInequality(X(1), -I_1x1, 0, 1)); // -x1 <= 0 - qp.inequalities.push_back(LinearInequality(X(2), -I_1x1, 0, 2)); // -x2 <= 0 - qp.inequalities.push_back(LinearInequality(X(1), I_1x1, 1.5, 3)); // x1 <= 3/2 + qp.inequalities.add(X(1), I_1x1, X(2), I_1x1, 2, + 0); // x1 + x2 <= 2 --> x1 + x2 -2 <= 0, --> b=2 + qp.inequalities.add(X(1), -I_1x1, 0, 1); // -x1 <= 0 + qp.inequalities.add(X(2), -I_1x1, 0, 2); // -x2 <= 0 + qp.inequalities.add(X(1), I_1x1, 1.5, 3); // x1 <= 3/2 return qp; } @@ -94,16 +96,15 @@ QP createEqualityConstrainedTest() { // Note the Hessian encodes: // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f // Hence, we have G11=2, G12 = 0, g1 = 0, G22 = 2, g2 = 0, f = 0 - qp.cost.push_back( - HessianFactor(X(1), X(2), 2.0 * I_1x1, Z_1x1, Z_1x1, 2.0 * I_1x1, Z_1x1, - 0.0)); + qp.cost.push_back(HessianFactor(X(1), X(2), 2.0 * I_1x1, Z_1x1, Z_1x1, + 2.0 * I_1x1, Z_1x1, 0.0)); // Equality constraints // x1 + x2 = 1 --> x1 + x2 -1 = 0, hence we negate the b vector Matrix A1 = I_1x1; Matrix A2 = I_1x1; Vector b = -kOne; - qp.equalities.push_back(LinearEquality(X(1), A1, X(2), A2, b, 0)); + qp.equalities.add(X(1), A1, X(2), A2, b, 0); return qp; } @@ -118,9 +119,8 @@ TEST(QPSolver, dual) { QPSolver solver(qp); - GaussianFactorGraph::shared_ptr dualGraph = solver.buildDualGraph( - qp.inequalities, initialValues); - VectorValues dual = dualGraph->optimize(); + auto dualGraph = solver.buildDualGraph(qp.inequalities, initialValues); + VectorValues dual = dualGraph.optimize(); VectorValues expectedDual; expectedDual.insert(0, (Vector(1) << 2.0).finished()); CHECK(assert_equal(expectedDual, dual, 1e-10)); @@ -135,19 +135,19 @@ TEST(QPSolver, indentifyActiveConstraints) { currentSolution.insert(X(1), Z_1x1); currentSolution.insert(X(2), Z_1x1); - InequalityFactorGraph workingSet = solver.identifyActiveConstraints( - qp.inequalities, currentSolution); + auto workingSet = + solver.identifyActiveConstraints(qp.inequalities, currentSolution); CHECK(!workingSet.at(0)->active()); // inactive - CHECK(workingSet.at(1)->active());// active - CHECK(workingSet.at(2)->active());// active - CHECK(!workingSet.at(3)->active());// inactive + CHECK(workingSet.at(1)->active()); // active + CHECK(workingSet.at(2)->active()); // active + CHECK(!workingSet.at(3)->active()); // inactive VectorValues solution = solver.buildWorkingGraph(workingSet).optimize(); - VectorValues expectedSolution; - expectedSolution.insert(X(1), kZero); - expectedSolution.insert(X(2), kZero); - CHECK(assert_equal(expectedSolution, solution, 1e-100)); + VectorValues expected; + expected.insert(X(1), kZero); + expected.insert(X(2), kZero); + CHECK(assert_equal(expected, solution, 1e-100)); } /* ************************************************************************* */ @@ -159,24 +159,24 @@ TEST(QPSolver, iterate) { currentSolution.insert(X(1), Z_1x1); currentSolution.insert(X(2), Z_1x1); - std::vector expectedSolutions(4), expectedDuals(4); - expectedSolutions[0].insert(X(1), kZero); - expectedSolutions[0].insert(X(2), kZero); + std::vector expected(4), expectedDuals(4); + expected[0].insert(X(1), kZero); + expected[0].insert(X(2), kZero); expectedDuals[0].insert(1, (Vector(1) << 3).finished()); expectedDuals[0].insert(2, kZero); - expectedSolutions[1].insert(X(1), (Vector(1) << 1.5).finished()); - expectedSolutions[1].insert(X(2), kZero); + expected[1].insert(X(1), (Vector(1) << 1.5).finished()); + expected[1].insert(X(2), kZero); expectedDuals[1].insert(3, (Vector(1) << 1.5).finished()); - expectedSolutions[2].insert(X(1), (Vector(1) << 1.5).finished()); - expectedSolutions[2].insert(X(2), (Vector(1) << 0.75).finished()); + expected[2].insert(X(1), (Vector(1) << 1.5).finished()); + expected[2].insert(X(2), (Vector(1) << 0.75).finished()); - expectedSolutions[3].insert(X(1), (Vector(1) << 1.5).finished()); - expectedSolutions[3].insert(X(2), (Vector(1) << 0.5).finished()); + expected[3].insert(X(1), (Vector(1) << 1.5).finished()); + expected[3].insert(X(2), (Vector(1) << 0.5).finished()); - InequalityFactorGraph workingSet = solver.identifyActiveConstraints( - qp.inequalities, currentSolution); + auto workingSet = + solver.identifyActiveConstraints(qp.inequalities, currentSolution); QPSolver::State state(currentSolution, VectorValues(), workingSet, false, 100); @@ -188,12 +188,12 @@ TEST(QPSolver, iterate) { // Forst10book do not follow exactly what we implemented from Nocedal06book. // Specifically, we do not re-identify active constraints and // do not recompute dual variables after every step!!! -// CHECK(assert_equal(expectedSolutions[it], state.values, 1e-10)); -// CHECK(assert_equal(expectedDuals[it], state.duals, 1e-10)); + // CHECK(assert_equal(expected[it], state.values, 1e-10)); + // CHECK(assert_equal(expectedDuals[it], state.duals, 1e-10)); it++; } - CHECK(assert_equal(expectedSolutions[3], state.values, 1e-10)); + CHECK(assert_equal(expected[3], state.values, 1e-10)); } /* ************************************************************************* */ @@ -204,182 +204,161 @@ TEST(QPSolver, optimizeForst10book_pg171Ex5) { VectorValues initialValues; initialValues.insert(X(1), Z_1x1); initialValues.insert(X(2), Z_1x1); - VectorValues solution; - boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); - VectorValues expectedSolution; - expectedSolution.insert(X(1), (Vector(1) << 1.5).finished()); - expectedSolution.insert(X(2), (Vector(1) << 0.5).finished()); - CHECK(assert_equal(expectedSolution, solution, 1e-100)); + VectorValues solution = solver.optimize(initialValues).first; + VectorValues expected; + expected.insert(X(1), (Vector(1) << 1.5).finished()); + expected.insert(X(2), (Vector(1) << 0.5).finished()); + CHECK(assert_equal(expected, solution, 1e-100)); } pair testParser(QPSParser parser) { QP exampleqp = parser.Parse(); - QP expectedqp; + QP expected; Key X1(Symbol('X', 1)), X2(Symbol('X', 2)); // min f(x,y) = 4 + 1.5x -y + 0.58x^2 + 2xy + 2yx + 10y^2 - expectedqp.cost.push_back( - HessianFactor(X1, X2, 8.0 * I_1x1, 2.0 * I_1x1, -1.5 * kOne, 10.0 * I_1x1, - 2.0 * kOne, 8.0)); - // 2x + y >= 2 - // -x + 2y <= 6 - expectedqp.inequalities.push_back( - LinearInequality(X1, -2.0 * I_1x1, X2, -I_1x1, -2, 0)); - expectedqp.inequalities.push_back( - LinearInequality(X1, -I_1x1, X2, 2.0 * I_1x1, 6, 1)); - // x<= 20 - expectedqp.inequalities.push_back(LinearInequality(X1, I_1x1, 20, 4)); - //x >= 0 - expectedqp.inequalities.push_back(LinearInequality(X1, -I_1x1, 0, 2)); - // y > = 0 - expectedqp.inequalities.push_back(LinearInequality(X2, -I_1x1, 0, 3)); - return std::make_pair(expectedqp, exampleqp); -} -; + expected.cost.push_back(HessianFactor(X1, X2, 8.0 * I_1x1, 2.0 * I_1x1, + -1.5 * kOne, 10.0 * I_1x1, 2.0 * kOne, + 8.0)); + + expected.inequalities.add(X1, -2.0 * I_1x1, X2, -I_1x1, -2, 0); // 2x + y >= 2 + expected.inequalities.add(X1, -I_1x1, X2, 2.0 * I_1x1, 6, 1); // -x + 2y <= 6 + expected.inequalities.add(X1, I_1x1, 20, 4); // x<= 20 + expected.inequalities.add(X1, -I_1x1, 0, 2); // x >= 0 + expected.inequalities.add(X2, -I_1x1, 0, 3); // y > = 0 + return {expected, exampleqp}; +}; TEST(QPSolver, ParserSyntaticTest) { - auto expectedActual = testParser(QPSParser("QPExample.QPS")); - CHECK(assert_equal(expectedActual.first.cost, expectedActual.second.cost, + auto result = testParser(QPSParser("QPExample.QPS")); + CHECK(assert_equal(result.first.cost, result.second.cost, 1e-7)); + CHECK(assert_equal(result.first.inequalities, result.second.inequalities, 1e-7)); - CHECK(assert_equal(expectedActual.first.inequalities, - expectedActual.second.inequalities, 1e-7)); - CHECK(assert_equal(expectedActual.first.equalities, - expectedActual.second.equalities, 1e-7)); + CHECK(assert_equal(result.first.equalities, result.second.equalities, 1e-7)); } TEST(QPSolver, ParserSemanticTest) { - auto expected_actual = testParser(QPSParser("QPExample.QPS")); - VectorValues actualSolution, expectedSolution; - boost::tie(expectedSolution, boost::tuples::ignore) = - QPSolver(expected_actual.first).optimize(); - boost::tie(actualSolution, boost::tuples::ignore) = - QPSolver(expected_actual.second).optimize(); - CHECK(assert_equal(actualSolution, expectedSolution, 1e-7)); + auto result = testParser(QPSParser("QPExample.QPS")); + VectorValues expected = QPSolver(result.first).optimize().first; + VectorValues actual = QPSolver(result.second).optimize().first; + CHECK(assert_equal(actual, expected, 1e-7)); } -TEST(QPSolver, QPExampleTest){ +TEST(QPSolver, QPExampleTest) { QP problem = QPSParser("QPExample.QPS").Parse(); - VectorValues actualSolution; auto solver = QPSolver(problem); - boost::tie(actualSolution, boost::tuples::ignore) = solver.optimize(); - VectorValues expectedSolution; - expectedSolution.insert(Symbol('X',1),0.7625*I_1x1); - expectedSolution.insert(Symbol('X',2),0.4750*I_1x1); - double error_expected = problem.cost.error(expectedSolution); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(expectedSolution, actualSolution, 1e-7)) + VectorValues actual = solver.optimize().first; + VectorValues expected; + expected.insert(Symbol('X', 1), 0.7625 * I_1x1); + expected.insert(Symbol('X', 2), 0.4750 * I_1x1); + double error_expected = problem.cost.error(expected); + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(expected, actual, 1e-7)) CHECK(assert_equal(error_expected, error_actual)) } TEST(QPSolver, HS21) { QP problem = QPSParser("HS21.QPS").Parse(); - VectorValues actualSolution; - VectorValues expectedSolution; - expectedSolution.insert(Symbol('X',1), 2.0*I_1x1); - expectedSolution.insert(Symbol('X',2), 0.0*I_1x1); - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); + VectorValues expected; + expected.insert(Symbol('X', 1), 2.0 * I_1x1); + expected.insert(Symbol('X', 2), 0.0 * I_1x1); + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); CHECK(assert_equal(-99.9599999, error_actual, 1e-7)) - CHECK(assert_equal(expectedSolution, actualSolution)) + CHECK(assert_equal(expected, actual)) } TEST(QPSolver, HS35) { QP problem = QPSParser("HS35.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(1.11111111e-01,error_actual, 1e-7)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(1.11111111e-01, error_actual, 1e-7)) } TEST(QPSolver, HS35MOD) { QP problem = QPSParser("HS35MOD.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(2.50000001e-01,error_actual, 1e-7)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(2.50000001e-01, error_actual, 1e-7)) } TEST(QPSolver, HS51) { QP problem = QPSParser("HS51.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(8.88178420e-16,error_actual, 1e-7)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(8.88178420e-16, error_actual, 1e-7)) } TEST(QPSolver, HS52) { QP problem = QPSParser("HS52.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(5.32664756,error_actual, 1e-7)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(5.32664756, error_actual, 1e-7)) } -TEST(QPSolver, HS268) { // This test needs an extra order of magnitude of tolerance than the rest +TEST(QPSolver, HS268) { // This test needs an extra order of magnitude of + // tolerance than the rest QP problem = QPSParser("HS268.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(5.73107049e-07,error_actual, 1e-6)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(5.73107049e-07, error_actual, 1e-6)) } TEST(QPSolver, QPTEST) { // REQUIRES Jacobian Fix QP problem = QPSParser("QPTEST.QPS").Parse(); - VectorValues actualSolution; - boost::tie(actualSolution, boost::tuples::ignore) = QPSolver(problem).optimize(); - double error_actual = problem.cost.error(actualSolution); - CHECK(assert_equal(0.437187500e01,error_actual, 1e-7)) + VectorValues actual = QPSolver(problem).optimize().first; + double error_actual = problem.cost.error(actual); + CHECK(assert_equal(0.437187500e01, error_actual, 1e-7)) } /* ************************************************************************* */ -// Create Matlab's test graph as in http://www.mathworks.com/help/optim/ug/quadprog.html +// Create Matlab's test graph as in +// http://www.mathworks.com/help/optim/ug/quadprog.html QP createTestMatlabQPEx() { QP qp; // Objective functions 0.5*x1^2 + x2^2 - x1*x2 - 2*x1 -6*x2 // Note the Hessian encodes: - // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + 0.5*f + // 0.5*x1'*G11*x1 + x1'*G12*x2 + 0.5*x2'*G22*x2 - x1'*g1 - x2'*g2 + + // 0.5*f // Hence, we have G11=1, G12 = -1, g1 = +2, G22 = 2, g2 = +6, f = 0 - qp.cost.push_back( - HessianFactor(X(1), X(2), 1.0 * I_1x1, -I_1x1, 2.0 * I_1x1, 2.0 * I_1x1, - 6 * I_1x1, 1000.0)); + qp.cost.push_back(HessianFactor(X(1), X(2), 1.0 * I_1x1, -I_1x1, 2.0 * I_1x1, + 2.0 * I_1x1, 6 * I_1x1, 1000.0)); // Inequality constraints - qp.inequalities.push_back(LinearInequality(X(1), I_1x1, X(2), I_1x1, 2, 0)); // x1 + x2 <= 2 - qp.inequalities.push_back( - LinearInequality(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 1)); //-x1 + 2*x2 <=2 - qp.inequalities.push_back( - LinearInequality(X(1), 2 * I_1x1, X(2), I_1x1, 3, 2)); // 2*x1 + x2 <=3 - qp.inequalities.push_back(LinearInequality(X(1), -I_1x1, 0, 3)); // -x1 <= 0 - qp.inequalities.push_back(LinearInequality(X(2), -I_1x1, 0, 4)); // -x2 <= 0 + qp.inequalities.add(X(1), I_1x1, X(2), I_1x1, 2, 0); // x1 + x2 <= 2 + qp.inequalities.add(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 1); //-x1 + 2*x2 <=2 + qp.inequalities.add(X(1), 2 * I_1x1, X(2), I_1x1, 3, 2); // 2*x1 + x2 <=3 + qp.inequalities.add(X(1), -I_1x1, 0, 3); // -x1 <= 0 + qp.inequalities.add(X(2), -I_1x1, 0, 4); // -x2 <= 0 return qp; } -///* ************************************************************************* */ +///* ************************************************************************* +///*/ TEST(QPSolver, optimizeMatlabEx) { QP qp = createTestMatlabQPEx(); QPSolver solver(qp); VectorValues initialValues; initialValues.insert(X(1), Z_1x1); initialValues.insert(X(2), Z_1x1); - VectorValues solution; - boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); - VectorValues expectedSolution; - expectedSolution.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); - expectedSolution.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); - CHECK(assert_equal(expectedSolution, solution, 1e-7)); + VectorValues solution = solver.optimize(initialValues).first; + VectorValues expected; + expected.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); + expected.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); + CHECK(assert_equal(expected, solution, 1e-7)); } -///* ************************************************************************* */ +///* ************************************************************************* +///*/ TEST(QPSolver, optimizeMatlabExNoinitials) { QP qp = createTestMatlabQPEx(); QPSolver solver(qp); - VectorValues solution; - boost::tie(solution, boost::tuples::ignore) = solver.optimize(); - VectorValues expectedSolution; - expectedSolution.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); - expectedSolution.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); - CHECK(assert_equal(expectedSolution, solution, 1e-7)); + VectorValues solution = solver.optimize().first; + VectorValues expected; + expected.insert(X(1), (Vector(1) << 2.0 / 3.0).finished()); + expected.insert(X(2), (Vector(1) << 4.0 / 3.0).finished()); + CHECK(assert_equal(expected, solution, 1e-7)); } /* ************************************************************************* */ @@ -387,18 +366,15 @@ TEST(QPSolver, optimizeMatlabExNoinitials) { QP createTestNocedal06bookEx16_4() { QP qp; - qp.cost.push_back(JacobianFactor(X(1), I_1x1, I_1x1)); - qp.cost.push_back(JacobianFactor(X(2), I_1x1, 2.5 * I_1x1)); + qp.cost.add(X(1), I_1x1, I_1x1); + qp.cost.add(X(2), I_1x1, 2.5 * I_1x1); // Inequality constraints - qp.inequalities.push_back( - LinearInequality(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 0)); - qp.inequalities.push_back( - LinearInequality(X(1), I_1x1, X(2), 2 * I_1x1, 6, 1)); - qp.inequalities.push_back( - LinearInequality(X(1), I_1x1, X(2), -2 * I_1x1, 2, 2)); - qp.inequalities.push_back(LinearInequality(X(1), -I_1x1, 0.0, 3)); - qp.inequalities.push_back(LinearInequality(X(2), -I_1x1, 0.0, 4)); + qp.inequalities.add(X(1), -I_1x1, X(2), 2 * I_1x1, 2, 0); + qp.inequalities.add(X(1), I_1x1, X(2), 2 * I_1x1, 6, 1); + qp.inequalities.add(X(1), I_1x1, X(2), -2 * I_1x1, 2, 2); + qp.inequalities.add(X(1), -I_1x1, 0.0, 3); + qp.inequalities.add(X(2), -I_1x1, 0.0, 4); return qp; } @@ -410,21 +386,19 @@ TEST(QPSolver, optimizeNocedal06bookEx16_4) { initialValues.insert(X(1), (Vector(1) << 2.0).finished()); initialValues.insert(X(2), Z_1x1); - VectorValues solution; - boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); - VectorValues expectedSolution; - expectedSolution.insert(X(1), (Vector(1) << 1.4).finished()); - expectedSolution.insert(X(2), (Vector(1) << 1.7).finished()); - CHECK(assert_equal(expectedSolution, solution, 1e-7)); + VectorValues solution = solver.optimize(initialValues).first; + VectorValues expected; + expected.insert(X(1), (Vector(1) << 1.4).finished()); + expected.insert(X(2), (Vector(1) << 1.7).finished()); + CHECK(assert_equal(expected, solution, 1e-7)); } /* ************************************************************************* */ TEST(QPSolver, failedSubproblem) { QP qp; - qp.cost.push_back(JacobianFactor(X(1), I_2x2, Z_2x1)); + qp.cost.add(X(1), I_2x2, Z_2x1); qp.cost.push_back(HessianFactor(X(1), Z_2x2, Z_2x1, 100.0)); - qp.inequalities.push_back( - LinearInequality(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0)); + qp.inequalities.add(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0); VectorValues expected; expected.insert(X(1), (Vector(2) << 1.0, 0.0).finished()); @@ -433,8 +407,7 @@ TEST(QPSolver, failedSubproblem) { initialValues.insert(X(1), (Vector(2) << 10.0, 100.0).finished()); QPSolver solver(qp); - VectorValues solution; - boost::tie(solution, boost::tuples::ignore) = solver.optimize(initialValues); + VectorValues solution = solver.optimize(initialValues).first; CHECK(assert_equal(expected, solution, 1e-7)); } @@ -442,10 +415,9 @@ TEST(QPSolver, failedSubproblem) { /* ************************************************************************* */ TEST(QPSolver, infeasibleInitial) { QP qp; - qp.cost.push_back(JacobianFactor(X(1), I_2x2, Vector::Zero(2))); + qp.cost.add(X(1), I_2x2, Vector::Zero(2)); qp.cost.push_back(HessianFactor(X(1), Z_2x2, Vector::Zero(2), 100.0)); - qp.inequalities.push_back( - LinearInequality(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0)); + qp.inequalities.add(X(1), (Matrix(1, 2) << -1.0, 0.0).finished(), -1.0, 0); VectorValues expected; expected.insert(X(1), (Vector(2) << 1.0, 0.0).finished()); @@ -464,4 +436,3 @@ int main() { return TestRegistry::runAllTests(tr); } /* ************************************************************************* */ - diff --git a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp index 777e4b2d3..fd18e7c6d 100644 --- a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp +++ b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp @@ -59,7 +59,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update( // Add the new variables to theta theta_.insert(newTheta); // Add new variables to the end of the ordering - for (const auto& key_value : newTheta) { + for (const auto key_value : newTheta) { ordering_.push_back(key_value.key); } // Augment Delta @@ -267,7 +267,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() { // Put the linearization points and deltas back for specific variables if (enforceConsistency_ && (linearKeys_.size() > 0)) { theta_.update(linearKeys_); - for(const auto& key_value: linearKeys_) { + for(const auto key_value: linearKeys_) { delta_.at(key_value.key) = newDelta.at(key_value.key); } } diff --git a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h index d71cdd409..b4645e4ed 100644 --- a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h +++ b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h @@ -41,10 +41,10 @@ class GTSAM_UNSTABLE_EXPORT BatchFixedLagSmoother : public FixedLagSmoother { virtual ~BatchFixedLagSmoother() { }; /** Print the factor for debugging and testing (implementing Testable) */ - virtual void print(const std::string& s = "BatchFixedLagSmoother:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; + void print(const std::string& s = "BatchFixedLagSmoother:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /** Check if two IncrementalFixedLagSmoother Objects are equal */ - virtual bool equals(const FixedLagSmoother& rhs, double tol = 1e-9) const override; + bool equals(const FixedLagSmoother& rhs, double tol = 1e-9) const override; /** Add new factors, updating the solution and relinearizing as needed. */ Result update(const NonlinearFactorGraph& newFactors = NonlinearFactorGraph(), @@ -117,15 +117,6 @@ class GTSAM_UNSTABLE_EXPORT BatchFixedLagSmoother : public FixedLagSmoother { const NonlinearFactorGraph& graph, const Values& theta, const KeyVector& keys, const GaussianFactorGraph::Eliminate& eliminateFunction = EliminatePreferCholesky); -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 - static NonlinearFactorGraph calculateMarginalFactors( - const NonlinearFactorGraph& graph, const Values& theta, const std::set& keys, - const GaussianFactorGraph::Eliminate& eliminateFunction = EliminatePreferCholesky) { - KeyVector keyVector(keys.begin(), keys.end()); - return CalculateMarginalFactors(graph, theta, keyVector, eliminateFunction); - } -#endif - protected: /** A typedef defining an Key-Factor mapping **/ diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp index 758bcfe48..83d0ab719 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp @@ -139,7 +139,7 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::update(const NonlinearFacto // Add the new variables to theta theta_.insert(newTheta); // Add new variables to the end of the ordering - for(const Values::ConstKeyValuePair& key_value: newTheta) { + for(const auto key_value: newTheta) { ordering_.push_back(key_value.key); } // Augment Delta @@ -222,7 +222,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm // Find the set of new separator keys KeySet newSeparatorKeys; - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { newSeparatorKeys.insert(key_value.key); } @@ -236,7 +236,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm graph.push_back(smootherShortcut_); Values values; values.insert(smootherSummarizationValues); - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { if(!values.exists(key_value.key)) { values.insert(key_value.key, key_value.value); } @@ -471,7 +471,7 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values // Put the linearization points and deltas back for specific variables if(linearValues.size() > 0) { theta.update(linearValues); - for(const Values::ConstKeyValuePair& key_value: linearValues) { + for(const auto key_value: linearValues) { delta.at(key_value.key) = newDelta.at(key_value.key); } } @@ -574,7 +574,7 @@ void ConcurrentBatchFilter::moveSeparator(const FastList& keysToMove) { // Calculate the set of new separator keys: AffectedKeys + PreviousSeparatorKeys - KeysToMove KeySet newSeparatorKeys = removedFactors.keys(); - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { newSeparatorKeys.insert(key_value.key); } for(Key key: keysToMove) { diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.h b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.h index d04f579eb..c99c67492 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.h +++ b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.h @@ -67,10 +67,10 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentBatchFilter : public ConcurrentFilter { virtual ~ConcurrentBatchFilter() {}; /** Implement a GTSAM standard 'print' function */ - virtual void print(const std::string& s = "Concurrent Batch Filter:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s = "Concurrent Batch Filter:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /** Check if two Concurrent Filters are equal */ - virtual bool equals(const ConcurrentFilter& rhs, double tol = 1e-9) const; + bool equals(const ConcurrentFilter& rhs, double tol = 1e-9) const override; /** Access the current set of factors */ const NonlinearFactorGraph& getFactors() const { @@ -130,7 +130,7 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentBatchFilter : public ConcurrentFilter { * Perform any required operations before the synchronization process starts. * Called by 'synchronize' */ - virtual void presync(); + virtual void presync() override; /** * Populate the provided containers with factors that constitute the filter branch summarization @@ -139,7 +139,7 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentBatchFilter : public ConcurrentFilter { * @param summarizedFactors The summarized factors for the filter branch * @param rootValues The linearization points of the root clique variables */ - virtual void getSummarizedFactors(NonlinearFactorGraph& filterSummarization, Values& filterSummarizationValues); + void getSummarizedFactors(NonlinearFactorGraph& filterSummarization, Values& filterSummarizationValues) override; /** * Populate the provided containers with factors being sent to the smoother from the filter. These @@ -149,20 +149,20 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentBatchFilter : public ConcurrentFilter { * @param smootherFactors The new factors to be added to the smoother * @param smootherValues The linearization points of any new variables */ - virtual void getSmootherFactors(NonlinearFactorGraph& smootherFactors, Values& smootherValues); + void getSmootherFactors(NonlinearFactorGraph& smootherFactors, Values& smootherValues) override; /** * Apply the updated version of the smoother branch summarized factors. * * @param summarizedFactors An updated version of the smoother branch summarized factors */ - virtual void synchronize(const NonlinearFactorGraph& smootherSummarization, const Values& smootherSummarizationValues); + void synchronize(const NonlinearFactorGraph& smootherSummarization, const Values& smootherSummarizationValues) override; /** * Perform any required operations after the synchronization process finishes. * Called by 'synchronize' */ - virtual void postsync(); + virtual void postsync() override; protected: diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp index 600baa9f0..75d491bde 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp @@ -61,7 +61,7 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF theta_.insert(newTheta); // Add new variables to the end of the ordering - for(const Values::ConstKeyValuePair& key_value: newTheta) { + for(const auto key_value: newTheta) { ordering_.push_back(key_value.key); } @@ -135,7 +135,7 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa removeFactors(filterSummarizationSlots_); // Insert new linpoints into the values, augment the ordering, and store new dims to augment delta - for(const Values::ConstKeyValuePair& key_value: smootherValues) { + for(const auto key_value: smootherValues) { std::pair iter_inserted = theta_.tryInsert(key_value.key, key_value.value); if(iter_inserted.second) { // If the insert succeeded @@ -146,7 +146,7 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa iter_inserted.first->value = key_value.value; } } - for(const Values::ConstKeyValuePair& key_value: separatorValues) { + for(const auto key_value: separatorValues) { std::pair iter_inserted = theta_.tryInsert(key_value.key, key_value.value); if(iter_inserted.second) { // If the insert succeeded @@ -322,7 +322,7 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() { // Put the linearization points and deltas back for specific variables if(separatorValues_.size() > 0) { theta_.update(separatorValues_); - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { delta_.at(key_value.key) = newDelta.at(key_value.key); } } @@ -372,7 +372,7 @@ void ConcurrentBatchSmoother::updateSmootherSummarization() { // Get the set of separator keys gtsam::KeySet separatorKeys; - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { separatorKeys.insert(key_value.key); } diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.h b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.h index 67a1ef4f3..364d02caf 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.h +++ b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.h @@ -60,10 +60,10 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentBatchSmoother : public ConcurrentSmoother virtual ~ConcurrentBatchSmoother() {}; /** Implement a GTSAM standard 'print' function */ - virtual void print(const std::string& s = "Concurrent Batch Smoother:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s = "Concurrent Batch Smoother:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /** Check if two Concurrent Smoothers are equal */ - virtual bool equals(const ConcurrentSmoother& rhs, double tol = 1e-9) const; + bool equals(const ConcurrentSmoother& rhs, double tol = 1e-9) const override; /** Access the current set of factors */ const NonlinearFactorGraph& getFactors() const { @@ -124,7 +124,7 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentBatchSmoother : public ConcurrentSmoother * Perform any required operations before the synchronization process starts. * Called by 'synchronize' */ - virtual void presync(); + virtual void presync() override; /** * Populate the provided containers with factors that constitute the smoother branch summarization @@ -132,7 +132,7 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentBatchSmoother : public ConcurrentSmoother * * @param summarizedFactors The summarized factors for the filter branch */ - virtual void getSummarizedFactors(NonlinearFactorGraph& summarizedFactors, Values& separatorValues); + void getSummarizedFactors(NonlinearFactorGraph& summarizedFactors, Values& separatorValues) override; /** * Apply the new smoother factors sent by the filter, and the updated version of the filter @@ -143,14 +143,14 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentBatchSmoother : public ConcurrentSmoother * @param summarizedFactors An updated version of the filter branch summarized factors * @param rootValues The linearization point of the root variables */ - virtual void synchronize(const NonlinearFactorGraph& smootherFactors, const Values& smootherValues, - const NonlinearFactorGraph& summarizedFactors, const Values& separatorValues); + void synchronize(const NonlinearFactorGraph& smootherFactors, const Values& smootherValues, + const NonlinearFactorGraph& summarizedFactors, const Values& separatorValues) override; /** * Perform any required operations after the synchronization process finishes. * Called by 'synchronize' */ - virtual void postsync(); + virtual void postsync() override; protected: diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp index b70b9efc2..b9cf66a97 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp @@ -69,13 +69,13 @@ ConcurrentIncrementalFilter::Result ConcurrentIncrementalFilter::update(const No int group = 1; // Set all existing variables to Group1 if(isam2_.getLinearizationPoint().size() > 0) { - for(const Values::ConstKeyValuePair& key_value: isam2_.getLinearizationPoint()) { + for(const auto key_value: isam2_.getLinearizationPoint()) { orderingConstraints->operator[](key_value.key) = group; } ++group; } // Assign new variables to the root - for(const Values::ConstKeyValuePair& key_value: newTheta) { + for(const auto key_value: newTheta) { orderingConstraints->operator[](key_value.key) = group; } // Set marginalizable variables to Group0 @@ -201,7 +201,7 @@ void ConcurrentIncrementalFilter::synchronize(const NonlinearFactorGraph& smooth // Force iSAM2 not to relinearize anything during this iteration FastList noRelinKeys; - for(const Values::ConstKeyValuePair& key_value: isam2_.getLinearizationPoint()) { + for(const auto key_value: isam2_.getLinearizationPoint()) { noRelinKeys.push_back(key_value.key); } diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.h b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.h index e01919048..d0525a4da 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.h +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.h @@ -67,10 +67,10 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentIncrementalFilter : public virtual Concurr virtual ~ConcurrentIncrementalFilter() {}; /** Implement a GTSAM standard 'print' function */ - virtual void print(const std::string& s = "Concurrent Incremental Filter:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s = "Concurrent Incremental Filter:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /** Check if two Concurrent Filters are equal */ - virtual bool equals(const ConcurrentFilter& rhs, double tol = 1e-9) const; + bool equals(const ConcurrentFilter& rhs, double tol = 1e-9) const override; /** Access the current set of factors */ const NonlinearFactorGraph& getFactors() const { @@ -130,7 +130,7 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentIncrementalFilter : public virtual Concurr * Perform any required operations before the synchronization process starts. * Called by 'synchronize' */ - virtual void presync(); + void presync() override; /** * Populate the provided containers with factors that constitute the filter branch summarization @@ -139,7 +139,7 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentIncrementalFilter : public virtual Concurr * @param summarizedFactors The summarized factors for the filter branch * @param rootValues The linearization points of the root clique variables */ - virtual void getSummarizedFactors(NonlinearFactorGraph& filterSummarization, Values& filterSummarizationValues); + void getSummarizedFactors(NonlinearFactorGraph& filterSummarization, Values& filterSummarizationValues) override; /** * Populate the provided containers with factors being sent to the smoother from the filter. These @@ -149,20 +149,20 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentIncrementalFilter : public virtual Concurr * @param smootherFactors The new factors to be added to the smoother * @param smootherValues The linearization points of any new variables */ - virtual void getSmootherFactors(NonlinearFactorGraph& smootherFactors, Values& smootherValues); + void getSmootherFactors(NonlinearFactorGraph& smootherFactors, Values& smootherValues) override; /** * Apply the updated version of the smoother branch summarized factors. * * @param summarizedFactors An updated version of the smoother branch summarized factors */ - virtual void synchronize(const NonlinearFactorGraph& smootherSummarization, const Values& smootherSummarizationValues); + void synchronize(const NonlinearFactorGraph& smootherSummarization, const Values& smootherSummarizationValues) override; /** * Perform any required operations after the synchronization process finishes. * Called by 'synchronize' */ - virtual void postsync(); + void postsync() override; protected: diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp index 714d7c8d0..3886d0e42 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp @@ -66,7 +66,7 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons // Also, mark the separator keys as fixed linearization points FastMap constrainedKeys; FastList noRelinKeys; - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { constrainedKeys[key_value.key] = 1; noRelinKeys.push_back(key_value.key); } @@ -82,12 +82,12 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons Values values(newTheta); // Unfortunately, we must be careful here, as some of the smoother values // and/or separator values may have been added previously - for(const Values::ConstKeyValuePair& key_value: smootherValues_) { + for(const auto key_value: smootherValues_) { if(!isam2_.getLinearizationPoint().exists(key_value.key)) { values.insert(key_value.key, smootherValues_.at(key_value.key)); } } - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { if(!isam2_.getLinearizationPoint().exists(key_value.key)) { values.insert(key_value.key, separatorValues_.at(key_value.key)); } @@ -246,7 +246,7 @@ void ConcurrentIncrementalSmoother::updateSmootherSummarization() { // Get the set of separator keys KeySet separatorKeys; - for(const Values::ConstKeyValuePair& key_value: separatorValues_) { + for(const auto key_value: separatorValues_) { separatorKeys.insert(key_value.key); } diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.h b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.h index ec95e1ec8..8848b6a43 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.h +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.h @@ -57,10 +57,10 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentIncrementalSmoother : public virtual Concu virtual ~ConcurrentIncrementalSmoother() {}; /** Implement a GTSAM standard 'print' function */ - virtual void print(const std::string& s = "Concurrent Incremental Smoother:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s = "Concurrent Incremental Smoother:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /** Check if two Concurrent Smoothers are equal */ - virtual bool equals(const ConcurrentSmoother& rhs, double tol = 1e-9) const; + bool equals(const ConcurrentSmoother& rhs, double tol = 1e-9) const override; /** Access the current set of factors */ const NonlinearFactorGraph& getFactors() const { @@ -115,7 +115,7 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentIncrementalSmoother : public virtual Concu * Perform any required operations before the synchronization process starts. * Called by 'synchronize' */ - virtual void presync(); + void presync() override; /** * Populate the provided containers with factors that constitute the smoother branch summarization @@ -123,7 +123,7 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentIncrementalSmoother : public virtual Concu * * @param summarizedFactors The summarized factors for the filter branch */ - virtual void getSummarizedFactors(NonlinearFactorGraph& summarizedFactors, Values& separatorValues); + void getSummarizedFactors(NonlinearFactorGraph& summarizedFactors, Values& separatorValues) override; /** * Apply the new smoother factors sent by the filter, and the updated version of the filter @@ -134,14 +134,14 @@ class GTSAM_UNSTABLE_EXPORT ConcurrentIncrementalSmoother : public virtual Concu * @param summarizedFactors An updated version of the filter branch summarized factors * @param rootValues The linearization point of the root variables */ - virtual void synchronize(const NonlinearFactorGraph& smootherFactors, const Values& smootherValues, - const NonlinearFactorGraph& summarizedFactors, const Values& separatorValues); + void synchronize(const NonlinearFactorGraph& smootherFactors, const Values& smootherValues, + const NonlinearFactorGraph& summarizedFactors, const Values& separatorValues) override; /** * Perform any required operations after the synchronization process finishes. * Called by 'synchronize' */ - virtual void postsync(); + void postsync() override; protected: diff --git a/gtsam_unstable/nonlinear/LinearizedFactor.h b/gtsam_unstable/nonlinear/LinearizedFactor.h index 128889b82..caf67de21 100644 --- a/gtsam_unstable/nonlinear/LinearizedFactor.h +++ b/gtsam_unstable/nonlinear/LinearizedFactor.h @@ -112,17 +112,17 @@ class GTSAM_UNSTABLE_EXPORT LinearizedJacobianFactor : public LinearizedGaussian virtual ~LinearizedJacobianFactor() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } // Testable /** print function */ - virtual void print(const std::string& s="", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s="", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /** equals function with optional tolerance */ - virtual bool equals(const NonlinearFactor& expected, double tol = 1e-9) const; + bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; // access functions const constBVector b() const { return Ab_(size()).col(0); } @@ -130,17 +130,17 @@ class GTSAM_UNSTABLE_EXPORT LinearizedJacobianFactor : public LinearizedGaussian const constABlock A(Key key) const { return Ab_(std::find(begin(), end(), key) - begin()); } /** get the dimension of the factor (number of rows on linearization) */ - size_t dim() const { return Ab_.rows(); }; + size_t dim() const override { return Ab_.rows(); }; /** Calculate the error of the factor */ - double error(const Values& c) const; + double error(const Values& c) const override; /** * linearize to a GaussianFactor * Reimplemented from NoiseModelFactor to directly copy out the * matrices and only update the RHS b with an updated residual */ - boost::shared_ptr linearize(const Values& c) const; + boost::shared_ptr linearize(const Values& c) const override; /** (A*x-b) */ Vector error_vector(const Values& c) const; @@ -202,17 +202,17 @@ class GTSAM_UNSTABLE_EXPORT LinearizedHessianFactor : public LinearizedGaussianF virtual ~LinearizedHessianFactor() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } // Testable /** print function */ - virtual void print(const std::string& s="", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s="", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /** equals function with optional tolerance */ - virtual bool equals(const NonlinearFactor& expected, double tol = 1e-9) const; + bool equals(const NonlinearFactor& expected, double tol = 1e-9) const override; /** Return the constant term \f$ f \f$ as described above * @return The constant term \f$ f \f$ @@ -261,17 +261,17 @@ class GTSAM_UNSTABLE_EXPORT LinearizedHessianFactor : public LinearizedGaussianF } /** get the dimension of the factor (number of rows on linearization) */ - size_t dim() const { return info_.rows() - 1; } + size_t dim() const override { return info_.rows() - 1; } /** Calculate the error of the factor */ - double error(const Values& c) const; + double error(const Values& c) const override; /** * linearize to a GaussianFactor * Reimplemented from NoiseModelFactor to directly copy out the * matrices and only update the RHS b with an updated residual */ - boost::shared_ptr linearize(const Values& c) const; + boost::shared_ptr linearize(const Values& c) const override; private: /** Serialization function */ diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp index 1f19c0e8a..61db05167 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp @@ -64,7 +64,7 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph, std::set KeysToKeep; - for(const Values::ConstKeyValuePair& key_value: linPoint) { // we cycle over all the keys of factorGraph + for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph KeysToKeep.insert(key_value.key); } // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize for(Key key: keysToMarginalize) { diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp index ae9a3f827..b5fb61428 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp @@ -560,7 +560,7 @@ TEST( ConcurrentBatchSmoother, synchronize_3 ) GaussianFactorGraph::shared_ptr linearFactors = allFactors.linearize(allValues); KeySet eliminateKeys = linearFactors->keys(); - for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) { + for(const auto key_value: filterSeparatorValues) { eliminateKeys.erase(key_value.key); } KeyVector variables(eliminateKeys.begin(), eliminateKeys.end()); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp index c5dba1a69..08d71a420 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp @@ -80,7 +80,7 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph, std::set KeysToKeep; - for(const Values::ConstKeyValuePair& key_value: linPoint) { // we cycle over all the keys of factorGraph + for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph KeysToKeep.insert(key_value.key); } // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize for(Key key: keysToMarginalize) { diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp index 5de115013..ccb5a104e 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp @@ -512,7 +512,7 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_2 ) // Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1); Values expectedLinearizationPoint = filterSeparatorValues; Values actualLinearizationPoint; - for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) { + for(const auto key_value: filterSeparatorValues) { actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key)); } CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6)); @@ -580,7 +580,7 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_3 ) // GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate(); KeySet allkeys = LinFactorGraph->keys(); - for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) { + for(const auto key_value: filterSeparatorValues) { allkeys.erase(key_value.key); } KeyVector variables(allkeys.begin(), allkeys.end()); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp index ec78ee6e2..53c3d1610 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp @@ -513,7 +513,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_2 ) // Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1); Values expectedLinearizationPoint = filterSeparatorValues; Values actualLinearizationPoint; - for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) { + for(const auto key_value: filterSeparatorValues) { actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key)); } CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6)); @@ -582,7 +582,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_3 ) // GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate(); KeySet allkeys = LinFactorGraph->keys(); - for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) + for(const auto key_value: filterSeparatorValues) allkeys.erase(key_value.key); KeyVector variables(allkeys.begin(), allkeys.end()); std::pair result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky); diff --git a/gtsam_unstable/slam/BetweenFactorEM.h b/gtsam_unstable/slam/BetweenFactorEM.h index 104b3209d..43607ac41 100644 --- a/gtsam_unstable/slam/BetweenFactorEM.h +++ b/gtsam_unstable/slam/BetweenFactorEM.h @@ -84,8 +84,8 @@ class BetweenFactorEM: public NonlinearFactor { /** implement functions needed for Testable */ /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const { + void print(const std::string& s, const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const override { std::cout << s << "BetweenFactorEM(" << keyFormatter(key1_) << "," << keyFormatter(key2_) << ")\n"; measured_.print(" measured: "); @@ -97,7 +97,7 @@ class BetweenFactorEM: public NonlinearFactor { } /** equals */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { + bool equals(const NonlinearFactor& f, double tol = 1e-9) const override { const This *t = dynamic_cast(&f); if (t && Base::equals(f)) @@ -114,7 +114,7 @@ class BetweenFactorEM: public NonlinearFactor { /** implement functions needed to derive from Factor */ /* ************************************************************************* */ - virtual double error(const Values& x) const { + double error(const Values &x) const override { return whitenedError(x).squaredNorm(); } @@ -125,8 +125,7 @@ class BetweenFactorEM: public NonlinearFactor { * Hence \f$ b = z - h(x) = - \mathtt{error\_vector}(x) \f$ */ /* This version of linearize recalculates the noise model each time */ - virtual boost::shared_ptr linearize( - const Values& x) const { + boost::shared_ptr linearize(const Values &x) const override { // Only linearize if the factor is active if (!this->active(x)) return boost::shared_ptr(); @@ -403,12 +402,7 @@ class BetweenFactorEM: public NonlinearFactor { return measured_; } - /** number of variables attached to this factor */ - std::size_t size() const { - return 2; - } - - virtual size_t dim() const { + size_t dim() const override { return model_inlier_->R().rows() + model_inlier_->R().cols(); } diff --git a/gtsam_unstable/slam/BiasedGPSFactor.h b/gtsam_unstable/slam/BiasedGPSFactor.h index cf56afab2..c8bbdd78a 100644 --- a/gtsam_unstable/slam/BiasedGPSFactor.h +++ b/gtsam_unstable/slam/BiasedGPSFactor.h @@ -55,7 +55,7 @@ namespace gtsam { /** implement functions needed for Testable */ /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "BiasedGPSFactor(" << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << ")\n" @@ -64,7 +64,7 @@ namespace gtsam { } /** equals */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This *e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol) && traits::Equals(this->measured_, e->measured_, tol); } @@ -74,7 +74,7 @@ namespace gtsam { /** vector of errors */ Vector evaluateError(const Pose3& pose, const Point3& bias, boost::optional H1 = boost::none, boost::optional H2 = - boost::none) const { + boost::none) const override { if (H1 || H2){ H1->resize(3,6); // jacobian wrt pose diff --git a/gtsam_unstable/slam/DummyFactor.h b/gtsam_unstable/slam/DummyFactor.h index 574efabea..08b1c800a 100644 --- a/gtsam_unstable/slam/DummyFactor.h +++ b/gtsam_unstable/slam/DummyFactor.h @@ -34,10 +34,10 @@ class GTSAM_UNSTABLE_EXPORT DummyFactor : public NonlinearFactor { // testable /** print */ - virtual void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; /** Check if two factors are equal */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const; + bool equals(const NonlinearFactor& f, double tol = 1e-9) const override; // access @@ -48,13 +48,13 @@ class GTSAM_UNSTABLE_EXPORT DummyFactor : public NonlinearFactor { /** * Calculate the error of the factor - zero for dummy factors */ - virtual double error(const Values& c) const { return 0.0; } + double error(const Values& c) const override { return 0.0; } /** get the dimension of the factor (number of rows on linearization) */ - virtual size_t dim() const { return rowDim_; } + size_t dim() const override { return rowDim_; } /** linearize to a GaussianFactor */ - virtual boost::shared_ptr linearize(const Values& c) const; + boost::shared_ptr linearize(const Values& c) const override; /** * Creates a shared_ptr clone of the factor - needs to be specialized to allow @@ -62,7 +62,7 @@ class GTSAM_UNSTABLE_EXPORT DummyFactor : public NonlinearFactor { * * By default, throws exception if subclass does not implement the function. */ - virtual NonlinearFactor::shared_ptr clone() const { + NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( NonlinearFactor::shared_ptr(new DummyFactor(*this))); } diff --git a/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h b/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h index f499a0f4b..745605325 100644 --- a/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h +++ b/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel.h @@ -133,7 +133,7 @@ class EquivInertialNavFactor_GlobalVel : public NoiseModelFactor5key1()) << "," << keyFormatter(this->key2()) << "," @@ -153,7 +153,7 @@ class EquivInertialNavFactor_GlobalVel : public NoiseModelFactor5 (&expected); return e != nullptr && Base::equals(*e, tol) && (delta_pos_in_t0_ - e->delta_pos_in_t0_).norm() < tol @@ -302,7 +302,7 @@ class EquivInertialNavFactor_GlobalVel : public NoiseModelFactor5 H2 = boost::none, boost::optional H3 = boost::none, boost::optional H4 = boost::none, - boost::optional H5 = boost::none) const { + boost::optional H5 = boost::none) const override { // TODO: Write analytical derivative calculations // Jacobian w.r.t. Pose1 diff --git a/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel_NoBias.h b/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel_NoBias.h index 86efd10c1..9f5d800db 100644 --- a/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel_NoBias.h +++ b/gtsam_unstable/slam/EquivInertialNavFactor_GlobalVel_NoBias.h @@ -151,7 +151,7 @@ class EquivInertialNavFactor_GlobalVel_NoBias : public NoiseModelFactor4 (&expected); return e != nullptr && Base::equals(*e, tol) && (delta_pos_in_t0_ - e->delta_pos_in_t0_).norm() < tol diff --git a/gtsam_unstable/slam/GaussMarkov1stOrderFactor.h b/gtsam_unstable/slam/GaussMarkov1stOrderFactor.h index 762199753..f6de48625 100644 --- a/gtsam_unstable/slam/GaussMarkov1stOrderFactor.h +++ b/gtsam_unstable/slam/GaussMarkov1stOrderFactor.h @@ -71,7 +71,7 @@ class GaussMarkov1stOrderFactor: public NoiseModelFactor2 { /** implement functions needed for Testable */ /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "GaussMarkov1stOrderFactor(" << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << ")\n"; @@ -79,7 +79,7 @@ class GaussMarkov1stOrderFactor: public NoiseModelFactor2 { } /** equals */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This *e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol); } @@ -89,7 +89,7 @@ class GaussMarkov1stOrderFactor: public NoiseModelFactor2 { /** vector of errors */ Vector evaluateError(const VALUE& p1, const VALUE& p2, boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + boost::optional H2 = boost::none) const override { Vector v1( traits::Logmap(p1) ); Vector v2( traits::Logmap(p2) ); diff --git a/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h b/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h index 4763c4263..e5a3e9118 100644 --- a/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h +++ b/gtsam_unstable/slam/InertialNavFactor_GlobalVelocity.h @@ -114,7 +114,7 @@ class InertialNavFactor_GlobalVelocity : public NoiseModelFactor5key1()) << "," << keyFormatter(this->key2()) << "," @@ -133,7 +133,7 @@ class InertialNavFactor_GlobalVelocity : public NoiseModelFactor5 (&expected); return e != nullptr && Base::equals(*e, tol) && (measurement_acc_ - e->measurement_acc_).norm() < tol @@ -229,7 +229,7 @@ class InertialNavFactor_GlobalVelocity : public NoiseModelFactor5 H2 = boost::none, boost::optional H3 = boost::none, boost::optional H4 = boost::none, - boost::optional H5 = boost::none) const { + boost::optional H5 = boost::none) const override { // TODO: Write analytical derivative calculations // Jacobian w.r.t. Pose1 diff --git a/gtsam_unstable/slam/InvDepthFactor3.h b/gtsam_unstable/slam/InvDepthFactor3.h index 0d10b0123..50b0c5980 100644 --- a/gtsam_unstable/slam/InvDepthFactor3.h +++ b/gtsam_unstable/slam/InvDepthFactor3.h @@ -70,13 +70,13 @@ class InvDepthFactor3: public NoiseModelFactor3 { * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "InvDepthFactor3", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s, keyFormatter); traits::Print(measured_, s + ".z"); } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol) && traits::Equals(this->measured_, e->measured_, tol) && this->K_->equals(*e->K_, tol); } @@ -85,7 +85,7 @@ class InvDepthFactor3: public NoiseModelFactor3 { Vector evaluateError(const POSE& pose, const Vector5& point, const INVDEPTH& invDepth, boost::optional H1=boost::none, boost::optional H2=boost::none, - boost::optional H3=boost::none) const { + boost::optional H3=boost::none) const override { try { InvDepthCamera3 camera(pose, K_); return camera.project(point, invDepth, H1, H2, H3) - measured_; diff --git a/gtsam_unstable/slam/InvDepthFactorVariant1.h b/gtsam_unstable/slam/InvDepthFactorVariant1.h index ad66eee5b..785556750 100644 --- a/gtsam_unstable/slam/InvDepthFactorVariant1.h +++ b/gtsam_unstable/slam/InvDepthFactorVariant1.h @@ -66,13 +66,13 @@ class InvDepthFactorVariant1: public NoiseModelFactor2 { * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "InvDepthFactorVariant1", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s, keyFormatter); traits::Print(measured_, s + ".z"); } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol) @@ -102,7 +102,7 @@ class InvDepthFactorVariant1: public NoiseModelFactor2 { /// Evaluate error h(x)-z and optionally derivatives Vector evaluateError(const Pose3& pose, const Vector6& landmark, boost::optional H1=boost::none, - boost::optional H2=boost::none) const { + boost::optional H2=boost::none) const override { if (H1) { (*H1) = numericalDerivative11( diff --git a/gtsam_unstable/slam/InvDepthFactorVariant2.h b/gtsam_unstable/slam/InvDepthFactorVariant2.h index c6b5d5af8..97ceb8a8b 100644 --- a/gtsam_unstable/slam/InvDepthFactorVariant2.h +++ b/gtsam_unstable/slam/InvDepthFactorVariant2.h @@ -69,13 +69,13 @@ class InvDepthFactorVariant2: public NoiseModelFactor2 { * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "InvDepthFactorVariant2", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s, keyFormatter); traits::Print(measured_, s + ".z"); } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol) @@ -105,7 +105,7 @@ class InvDepthFactorVariant2: public NoiseModelFactor2 { /// Evaluate error h(x)-z and optionally derivatives Vector evaluateError(const Pose3& pose, const Vector3& landmark, boost::optional H1=boost::none, - boost::optional H2=boost::none) const { + boost::optional H2=boost::none) const override { if (H1) { (*H1) = numericalDerivative11( diff --git a/gtsam_unstable/slam/InvDepthFactorVariant3.h b/gtsam_unstable/slam/InvDepthFactorVariant3.h index 83df9e13b..21c7aa6de 100644 --- a/gtsam_unstable/slam/InvDepthFactorVariant3.h +++ b/gtsam_unstable/slam/InvDepthFactorVariant3.h @@ -68,13 +68,13 @@ class InvDepthFactorVariant3a: public NoiseModelFactor2 { * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "InvDepthFactorVariant3a", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s, keyFormatter); traits::Print(measured_, s + ".z"); } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol) @@ -105,7 +105,7 @@ class InvDepthFactorVariant3a: public NoiseModelFactor2 { /// Evaluate error h(x)-z and optionally derivatives Vector evaluateError(const Pose3& pose, const Vector3& landmark, boost::optional H1=boost::none, - boost::optional H2=boost::none) const { + boost::optional H2=boost::none) const override { if(H1) { (*H1) = numericalDerivative11(boost::bind(&InvDepthFactorVariant3a::inverseDepthError, this, _1, landmark), pose); @@ -188,13 +188,13 @@ class InvDepthFactorVariant3b: public NoiseModelFactor3 { * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "InvDepthFactorVariant3", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s, keyFormatter); traits::Print(measured_, s + ".z"); } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol) @@ -226,7 +226,7 @@ class InvDepthFactorVariant3b: public NoiseModelFactor3 { Vector evaluateError(const Pose3& pose1, const Pose3& pose2, const Vector3& landmark, boost::optional H1=boost::none, boost::optional H2=boost::none, - boost::optional H3=boost::none) const { + boost::optional H3=boost::none) const override { if(H1) (*H1) = numericalDerivative11(boost::bind(&InvDepthFactorVariant3b::inverseDepthError, this, _1, pose2, landmark), pose1); diff --git a/gtsam_unstable/slam/MultiProjectionFactor.h b/gtsam_unstable/slam/MultiProjectionFactor.h index 5bef97bcf..b6bf2a9c7 100644 --- a/gtsam_unstable/slam/MultiProjectionFactor.h +++ b/gtsam_unstable/slam/MultiProjectionFactor.h @@ -101,7 +101,7 @@ namespace gtsam { virtual ~MultiProjectionFactor() {} /// @return a deep copy of this factor - virtual NonlinearFactor::shared_ptr clone() const { + NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( NonlinearFactor::shared_ptr(new This(*this))); } @@ -110,7 +110,7 @@ namespace gtsam { * @param s optional string naming the factor * @param keyFormatter optional formatter useful for printing Symbols */ - void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "MultiProjectionFactor, z = "; std::cout << measured_ << "measurements, z = "; if(this->body_P_sensor_) @@ -119,7 +119,7 @@ namespace gtsam { } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol) @@ -129,7 +129,7 @@ namespace gtsam { } /// Evaluate error h(x)-z and optionally derivatives - Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const{ + Vector unwhitenedError(const Values& x, boost::optional&> H = boost::none) const override { Vector a; return a; diff --git a/gtsam_unstable/slam/PartialPriorFactor.h b/gtsam_unstable/slam/PartialPriorFactor.h index c6d892e93..d284366e8 100644 --- a/gtsam_unstable/slam/PartialPriorFactor.h +++ b/gtsam_unstable/slam/PartialPriorFactor.h @@ -85,20 +85,20 @@ namespace gtsam { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** implement functions needed for Testable */ /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { Base::print(s, keyFormatter); gtsam::print(prior_, "prior"); } /** equals */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This *e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol) && gtsam::equal_with_abs_tol(this->prior_, e->prior_, tol) && @@ -108,7 +108,7 @@ namespace gtsam { /** implement functions needed to derive from Factor */ /** vector of errors */ - Vector evaluateError(const T& p, boost::optional H = boost::none) const { + Vector evaluateError(const T& p, boost::optional H = boost::none) const override { if (H) (*H) = H_; // FIXME: this was originally the generic retraction - may not produce same results Vector full_logmap = T::Logmap(p); diff --git a/gtsam_unstable/slam/PoseBetweenFactor.h b/gtsam_unstable/slam/PoseBetweenFactor.h index bb5835f80..e20792e25 100644 --- a/gtsam_unstable/slam/PoseBetweenFactor.h +++ b/gtsam_unstable/slam/PoseBetweenFactor.h @@ -59,14 +59,14 @@ namespace gtsam { virtual ~PoseBetweenFactor() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** implement functions needed for Testable */ /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "BetweenFactor(" << keyFormatter(this->key1()) << "," << keyFormatter(this->key2()) << ")\n"; @@ -77,7 +77,7 @@ namespace gtsam { } /** equals */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This *e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol) @@ -90,7 +90,7 @@ namespace gtsam { /** vector of errors */ Vector evaluateError(const POSE& p1, const POSE& p2, boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + boost::optional H2 = boost::none) const override { if(body_P_sensor_) { POSE hx; if(H1 || H2) { diff --git a/gtsam_unstable/slam/PosePriorFactor.h b/gtsam_unstable/slam/PosePriorFactor.h index ce9861160..c9965f9bf 100644 --- a/gtsam_unstable/slam/PosePriorFactor.h +++ b/gtsam_unstable/slam/PosePriorFactor.h @@ -56,14 +56,14 @@ namespace gtsam { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** implement functions needed for Testable */ /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "PriorFactor on " << keyFormatter(this->key()) << "\n"; prior_.print(" prior mean: "); if(this->body_P_sensor_) @@ -72,7 +72,7 @@ namespace gtsam { } /** equals */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const { + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override { const This* e = dynamic_cast (&expected); return e != nullptr && Base::equals(*e, tol) @@ -83,7 +83,7 @@ namespace gtsam { /** implement functions needed to derive from Factor */ /** vector of errors */ - Vector evaluateError(const POSE& p, boost::optional H = boost::none) const { + Vector evaluateError(const POSE& p, boost::optional H = boost::none) const override { if(body_P_sensor_) { // manifold equivalent of h(x)-z -> log(z,h(x)) return prior_.localCoordinates(p.compose(*body_P_sensor_, H)); diff --git a/gtsam_unstable/slam/PoseToPointFactor.h b/gtsam_unstable/slam/PoseToPointFactor.h new file mode 100644 index 000000000..ec7da22ef --- /dev/null +++ b/gtsam_unstable/slam/PoseToPointFactor.h @@ -0,0 +1,90 @@ +/** + * @file PoseToPointFactor.hpp + * @brief This factor can be used to track a 3D landmark over time by + *providing local measurements of its location. + * @author David Wisth + **/ +#pragma once + +#include +#include +#include +#include + +namespace gtsam { + +/** + * A class for a measurement between a pose and a point. + * @addtogroup SLAM + */ +class PoseToPointFactor : public NoiseModelFactor2 { + private: + typedef PoseToPointFactor This; + typedef NoiseModelFactor2 Base; + + Point3 measured_; /** the point measurement in local coordinates */ + + public: + // shorthand for a smart pointer to a factor + typedef boost::shared_ptr shared_ptr; + + /** default constructor - only use for serialization */ + PoseToPointFactor() {} + + /** Constructor */ + PoseToPointFactor(Key key1, Key key2, const Point3& measured, + const SharedNoiseModel& model) + : Base(model, key1, key2), measured_(measured) {} + + virtual ~PoseToPointFactor() {} + + /** implement functions needed for Testable */ + + /** print */ + virtual void print(const std::string& s, const KeyFormatter& keyFormatter = + DefaultKeyFormatter) const { + std::cout << s << "PoseToPointFactor(" << keyFormatter(this->key1()) << "," + << keyFormatter(this->key2()) << ")\n" + << " measured: " << measured_.transpose() << std::endl; + this->noiseModel_->print(" noise model: "); + } + + /** equals */ + virtual bool equals(const NonlinearFactor& expected, + double tol = 1e-9) const { + const This* e = dynamic_cast(&expected); + return e != nullptr && Base::equals(*e, tol) && + traits::Equals(this->measured_, e->measured_, tol); + } + + /** implement functions needed to derive from Factor */ + + /** vector of errors + * @brief Error = wTwi.inverse()*wPwp - measured_ + * @param wTwi The pose of the sensor in world coordinates + * @param wPwp The estimated point location in world coordinates + * + * Note: measured_ and the error are in local coordiantes. + */ + Vector evaluateError(const Pose3& wTwi, const Point3& wPwp, + boost::optional H1 = boost::none, + boost::optional H2 = boost::none) const { + return wTwi.transformTo(wPwp, H1, H2) - measured_; + } + + /** return the measured */ + const Point3& measured() const { return measured_; } + + private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE& ar, const unsigned int /*version*/) { + ar& boost::serialization::make_nvp( + "NoiseModelFactor2", boost::serialization::base_object(*this)); + ar& BOOST_SERIALIZATION_NVP(measured_); + } + +}; // \class PoseToPointFactor + +} // namespace gtsam diff --git a/gtsam_unstable/slam/ProjectionFactorPPP.h b/gtsam_unstable/slam/ProjectionFactorPPP.h index 19b8d56e6..caa388e2f 100644 --- a/gtsam_unstable/slam/ProjectionFactorPPP.h +++ b/gtsam_unstable/slam/ProjectionFactorPPP.h @@ -96,7 +96,7 @@ namespace gtsam { virtual ~ProjectionFactorPPP() {} /// @return a deep copy of this factor - virtual NonlinearFactor::shared_ptr clone() const { + NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( NonlinearFactor::shared_ptr(new This(*this))); } @@ -105,14 +105,14 @@ namespace gtsam { * @param s optional string naming the factor * @param keyFormatter optional formatter useful for printing Symbols */ - void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "ProjectionFactorPPP, z = "; traits::Print(measured_); Base::print("", keyFormatter); } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol) @@ -124,7 +124,7 @@ namespace gtsam { Vector evaluateError(const Pose3& pose, const Pose3& transform, const Point3& point, boost::optional H1 = boost::none, boost::optional H2 = boost::none, - boost::optional H3 = boost::none) const { + boost::optional H3 = boost::none) const override { try { if(H1 || H2 || H3) { Matrix H0, H02; diff --git a/gtsam_unstable/slam/ProjectionFactorPPPC.h b/gtsam_unstable/slam/ProjectionFactorPPPC.h index 369075abf..af3294bce 100644 --- a/gtsam_unstable/slam/ProjectionFactorPPPC.h +++ b/gtsam_unstable/slam/ProjectionFactorPPPC.h @@ -91,7 +91,7 @@ namespace gtsam { virtual ~ProjectionFactorPPPC() {} /// @return a deep copy of this factor - virtual NonlinearFactor::shared_ptr clone() const { + NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( NonlinearFactor::shared_ptr(new This(*this))); } @@ -100,14 +100,14 @@ namespace gtsam { * @param s optional string naming the factor * @param keyFormatter optional formatter useful for printing Symbols */ - void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "ProjectionFactorPPPC, z = "; traits::Print(measured_); Base::print("", keyFormatter); } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const This *e = dynamic_cast(&p); return e && Base::equals(p, tol) @@ -119,7 +119,7 @@ namespace gtsam { boost::optional H1 = boost::none, boost::optional H2 = boost::none, boost::optional H3 = boost::none, - boost::optional H4 = boost::none) const { + boost::optional H4 = boost::none) const override { try { if(H1 || H2 || H3 || H4) { Matrix H0, H02; diff --git a/gtsam_unstable/slam/RelativeElevationFactor.h b/gtsam_unstable/slam/RelativeElevationFactor.h index 3507a4492..b713d45dc 100644 --- a/gtsam_unstable/slam/RelativeElevationFactor.h +++ b/gtsam_unstable/slam/RelativeElevationFactor.h @@ -43,22 +43,22 @@ class GTSAM_UNSTABLE_EXPORT RelativeElevationFactor: public NoiseModelFactor2( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } /** h(x)-z */ Vector evaluateError(const Pose3& pose, const Point3& point, - boost::optional H1 = boost::none, boost::optional H2 = boost::none) const; + boost::optional H1 = boost::none, boost::optional H2 = boost::none) const override; /** return the measured */ inline double measured() const { return measured_; } /** equals specialized to this factor */ - virtual bool equals(const NonlinearFactor& expected, double tol=1e-9) const; + bool equals(const NonlinearFactor& expected, double tol=1e-9) const override; /** print contents */ - void print(const std::string& s="", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; + void print(const std::string& s="", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override; private: diff --git a/gtsam_unstable/slam/SmartRangeFactor.h b/gtsam_unstable/slam/SmartRangeFactor.h index 5511a0209..6bab3f334 100644 --- a/gtsam_unstable/slam/SmartRangeFactor.h +++ b/gtsam_unstable/slam/SmartRangeFactor.h @@ -73,14 +73,14 @@ class SmartRangeFactor: public NoiseModelFactor { // Testable /** print */ - virtual void print(const std::string& s = "", - const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", + const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "SmartRangeFactor with " << size() << " measurements\n"; NoiseModelFactor::print(s); } /** Check if two factors are equal */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { + bool equals(const NonlinearFactor& f, double tol = 1e-9) const override { return false; } @@ -143,8 +143,8 @@ class SmartRangeFactor: public NoiseModelFactor { /** * Error function *without* the NoiseModel, \f$ z-h(x) \f$. */ - virtual Vector unwhitenedError(const Values& x, - boost::optional&> H = boost::none) const { + Vector unwhitenedError(const Values& x, + boost::optional&> H = boost::none) const override { size_t n = size(); if (n < 3) { if (H) { @@ -175,7 +175,7 @@ class SmartRangeFactor: public NoiseModelFactor { } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } diff --git a/gtsam_unstable/slam/SmartStereoProjectionFactor.h b/gtsam_unstable/slam/SmartStereoProjectionFactor.h index db80956fa..ac063eff9 100644 --- a/gtsam_unstable/slam/SmartStereoProjectionFactor.h +++ b/gtsam_unstable/slam/SmartStereoProjectionFactor.h @@ -102,7 +102,7 @@ class SmartStereoProjectionFactor: public SmartFactorBase { * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "", const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const { + DefaultKeyFormatter) const override { std::cout << s << "SmartStereoProjectionFactor\n"; std::cout << "linearizationMode:\n" << params_.linearizationMode << std::endl; std::cout << "triangulationParameters:\n" << params_.triangulation << std::endl; @@ -111,7 +111,7 @@ class SmartStereoProjectionFactor: public SmartFactorBase { } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const SmartStereoProjectionFactor *e = dynamic_cast(&p); return e && params_.linearizationMode == e->params_.linearizationMode @@ -327,8 +327,8 @@ class SmartStereoProjectionFactor: public SmartFactorBase { } /// linearize - virtual boost::shared_ptr linearize( - const Values& values) const { + boost::shared_ptr linearize( + const Values& values) const override { return linearizeDamped(values); } @@ -438,7 +438,7 @@ class SmartStereoProjectionFactor: public SmartFactorBase { } /// Calculate total reprojection error - virtual double error(const Values& values) const { + double error(const Values& values) const override { if (this->active(values)) { return totalReprojectionError(Base::cameras(values)); } else { // else of active flag @@ -449,9 +449,9 @@ class SmartStereoProjectionFactor: public SmartFactorBase { /** * This corrects the Jacobians and error vector for the case in which the right pixel in the monocular camera is missing (nan) */ - virtual void correctForMissingMeasurements(const Cameras& cameras, Vector& ue, + void correctForMissingMeasurements(const Cameras& cameras, Vector& ue, boost::optional Fs = boost::none, - boost::optional E = boost::none) const + boost::optional E = boost::none) const override { // when using stereo cameras, some of the measurements might be missing: for(size_t i=0; i < cameras.size(); i++){ diff --git a/gtsam_unstable/slam/SmartStereoProjectionPoseFactor.h b/gtsam_unstable/slam/SmartStereoProjectionPoseFactor.h index 7ea5a4c2f..124e45005 100644 --- a/gtsam_unstable/slam/SmartStereoProjectionPoseFactor.h +++ b/gtsam_unstable/slam/SmartStereoProjectionPoseFactor.h @@ -120,7 +120,7 @@ class SmartStereoProjectionPoseFactor: public SmartStereoProjectionFactor { * @param keyFormatter optional formatter useful for printing Symbols */ void print(const std::string& s = "", const KeyFormatter& keyFormatter = - DefaultKeyFormatter) const { + DefaultKeyFormatter) const override { std::cout << s << "SmartStereoProjectionPoseFactor, z = \n "; for(const boost::shared_ptr& K: K_all_) K->print("calibration = "); @@ -128,7 +128,7 @@ class SmartStereoProjectionPoseFactor: public SmartStereoProjectionFactor { } /// equals - virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const { + bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { const SmartStereoProjectionPoseFactor *e = dynamic_cast(&p); @@ -138,7 +138,7 @@ class SmartStereoProjectionPoseFactor: public SmartStereoProjectionFactor { /** * error calculates the error of the factor. */ - virtual double error(const Values& values) const { + double error(const Values& values) const override { if (this->active(values)) { return this->totalReprojectionError(cameras(values)); } else { // else of active flag @@ -157,7 +157,7 @@ class SmartStereoProjectionPoseFactor: public SmartStereoProjectionFactor { * to keys involved in this factor * @return vector of Values */ - Base::Cameras cameras(const Values& values) const { + Base::Cameras cameras(const Values& values) const override { Base::Cameras cameras; size_t i=0; for(const Key& k: this->keys_) { diff --git a/gtsam_unstable/slam/TSAMFactors.h b/gtsam_unstable/slam/TSAMFactors.h index 0a456e59c..6f98a2dbd 100644 --- a/gtsam_unstable/slam/TSAMFactors.h +++ b/gtsam_unstable/slam/TSAMFactors.h @@ -47,7 +47,7 @@ class DeltaFactor: public NoiseModelFactor2 { /// Evaluate measurement error h(x)-z Vector evaluateError(const Pose2& pose, const Point2& point, boost::optional H1 = boost::none, boost::optional H2 = - boost::none) const { + boost::none) const override { return pose.transformTo(point, H1, H2) - measured_; } }; @@ -79,7 +79,7 @@ class DeltaFactorBase: public NoiseModelFactor4 { boost::optional H1 = boost::none, // boost::optional H2 = boost::none, // boost::optional H3 = boost::none, // - boost::optional H4 = boost::none) const { + boost::optional H4 = boost::none) const override { if (H1 || H2 || H3 || H4) { // TODO use fixed-size matrices Matrix D_pose_g_base1, D_pose_g_pose; @@ -134,7 +134,7 @@ class OdometryFactorBase: public NoiseModelFactor4 { boost::optional H1 = boost::none, // boost::optional H2 = boost::none, // boost::optional H3 = boost::none, // - boost::optional H4 = boost::none) const { + boost::optional H4 = boost::none) const override { if (H1 || H2 || H3 || H4) { // TODO use fixed-size matrices Matrix D_pose1_g_base1, D_pose1_g_pose1; diff --git a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h index c29e3e794..a17e07f9c 100644 --- a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h +++ b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactor.h @@ -81,13 +81,13 @@ namespace gtsam { /** Clone */ - virtual gtsam::NonlinearFactor::shared_ptr clone() const { return boost::make_shared(*this); } + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::make_shared(*this); } /** implement functions needed for Testable */ /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "TransformBtwRobotsUnaryFactor(" << keyFormatter(key_) << ")\n"; std::cout << "MR between factor keys: " @@ -99,7 +99,7 @@ namespace gtsam { } /** equals */ - virtual bool equals(const NonlinearFactor& f, double tol=1e-9) const { + bool equals(const NonlinearFactor& f, double tol=1e-9) const override { const This *t = dynamic_cast (&f); if(t && Base::equals(f)) @@ -128,7 +128,7 @@ namespace gtsam { } /* ************************************************************************* */ - virtual double error(const gtsam::Values& x) const { + double error(const gtsam::Values& x) const override { return whitenedError(x).squaredNorm(); } @@ -139,7 +139,7 @@ namespace gtsam { * Hence \f$ b = z - h(x) = - \mathtt{error\_vector}(x) \f$ */ /* This version of linearize recalculates the noise model each time */ - virtual boost::shared_ptr linearize(const gtsam::Values& x) const { + boost::shared_ptr linearize(const gtsam::Values& x) const override { // Only linearize if the factor is active if (!this->active(x)) return boost::shared_ptr(); @@ -203,12 +203,7 @@ namespace gtsam { /* ************************************************************************* */ - /** number of variables attached to this factor */ - std::size_t size() const { - return 1; - } - - virtual size_t dim() const { + size_t dim() const override { return model_->R().rows() + model_->R().cols(); } diff --git a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h index 48aaa8ed7..8a56a5d02 100644 --- a/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h +++ b/gtsam_unstable/slam/TransformBtwRobotsUnaryFactorEM.h @@ -97,13 +97,13 @@ namespace gtsam { /** Clone */ - virtual NonlinearFactor::shared_ptr clone() const { return boost::make_shared(*this); } + NonlinearFactor::shared_ptr clone() const override { return boost::make_shared(*this); } /** implement functions needed for Testable */ /** print */ - virtual void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s, const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << "TransformBtwRobotsUnaryFactorEM(" << keyFormatter(key_) << ")\n"; std::cout << "MR between factor keys: " @@ -119,7 +119,7 @@ namespace gtsam { } /** equals */ - virtual bool equals(const NonlinearFactor& f, double tol=1e-9) const { + bool equals(const NonlinearFactor& f, double tol=1e-9) const override { const This *t = dynamic_cast (&f); if(t && Base::equals(f)) @@ -151,7 +151,7 @@ namespace gtsam { } /* ************************************************************************* */ - virtual double error(const Values& x) const { + double error(const Values& x) const override { return whitenedError(x).squaredNorm(); } @@ -162,7 +162,7 @@ namespace gtsam { * Hence \f$ b = z - h(x) = - \mathtt{error\_vector}(x) \f$ */ /* This version of linearize recalculates the noise model each time */ - virtual boost::shared_ptr linearize(const Values& x) const { + boost::shared_ptr linearize(const Values& x) const override { // Only linearize if the factor is active if (!this->active(x)) return boost::shared_ptr(); @@ -401,12 +401,7 @@ namespace gtsam { /* ************************************************************************* */ - /** number of variables attached to this factor */ - std::size_t size() const { - return 1; - } - - virtual size_t dim() const { + size_t dim() const override { return model_inlier_->R().rows() + model_inlier_->R().cols(); } diff --git a/gtsam_unstable/slam/serialization.cpp b/gtsam_unstable/slam/serialization.cpp index 8a661f2ef..88a94fd51 100644 --- a/gtsam_unstable/slam/serialization.cpp +++ b/gtsam_unstable/slam/serialization.cpp @@ -43,7 +43,6 @@ typedef PriorFactor PriorFactorPose3; typedef PriorFactor PriorFactorCal3_S2; typedef PriorFactor PriorFactorCal3DS2; typedef PriorFactor PriorFactorCalibratedCamera; -typedef PriorFactor PriorFactorSimpleCamera; typedef PriorFactor PriorFactorPinholeCameraCal3_S2; typedef PriorFactor PriorFactorStereoCamera; @@ -68,7 +67,6 @@ typedef NonlinearEquality NonlinearEqualityPose3; typedef NonlinearEquality NonlinearEqualityCal3_S2; typedef NonlinearEquality NonlinearEqualityCal3DS2; typedef NonlinearEquality NonlinearEqualityCalibratedCamera; -typedef NonlinearEquality NonlinearEqualitySimpleCamera; typedef NonlinearEquality NonlinearEqualityPinholeCameraCal3_S2; typedef NonlinearEquality NonlinearEqualityStereoCamera; @@ -77,10 +75,8 @@ typedef RangeFactor RangeFactor3D; typedef RangeFactor RangeFactorPose2; typedef RangeFactor RangeFactorPose3; typedef RangeFactor RangeFactorCalibratedCameraPoint; -typedef RangeFactor RangeFactorSimpleCameraPoint; typedef RangeFactor RangeFactorPinholeCameraCal3_S2Point; typedef RangeFactor RangeFactorCalibratedCamera; -typedef RangeFactor RangeFactorSimpleCamera; typedef RangeFactor RangeFactorPinholeCameraCal3_S2; typedef BearingRangeFactor BearingRangeFactor2D; @@ -90,7 +86,7 @@ typedef GenericProjectionFactor GenericProjectionFactorC typedef GenericProjectionFactor GenericProjectionFactorCal3DS2; typedef gtsam::GeneralSFMFactor GeneralSFMFactorCal3_S2; -//typedef gtsam::GeneralSFMFactor GeneralSFMFactorCal3DS2; +typedef gtsam::GeneralSFMFactor GeneralSFMFactorCal3DS2; typedef gtsam::GeneralSFMFactor2 GeneralSFMFactor2Cal3_S2; @@ -129,7 +125,6 @@ GTSAM_VALUE_EXPORT(gtsam::Cal3_S2); GTSAM_VALUE_EXPORT(gtsam::Cal3DS2); GTSAM_VALUE_EXPORT(gtsam::Cal3_S2Stereo); GTSAM_VALUE_EXPORT(gtsam::CalibratedCamera); -GTSAM_VALUE_EXPORT(gtsam::SimpleCamera); GTSAM_VALUE_EXPORT(gtsam::PinholeCameraCal3_S2); GTSAM_VALUE_EXPORT(gtsam::StereoCamera); @@ -150,7 +145,6 @@ BOOST_CLASS_EXPORT_GUID(PriorFactorPose3, "gtsam::PriorFactorPose3"); BOOST_CLASS_EXPORT_GUID(PriorFactorCal3_S2, "gtsam::PriorFactorCal3_S2"); BOOST_CLASS_EXPORT_GUID(PriorFactorCal3DS2, "gtsam::PriorFactorCal3DS2"); BOOST_CLASS_EXPORT_GUID(PriorFactorCalibratedCamera, "gtsam::PriorFactorCalibratedCamera"); -BOOST_CLASS_EXPORT_GUID(PriorFactorSimpleCamera, "gtsam::PriorFactorSimpleCamera"); BOOST_CLASS_EXPORT_GUID(PriorFactorStereoCamera, "gtsam::PriorFactorStereoCamera"); BOOST_CLASS_EXPORT_GUID(BetweenFactorLieVector, "gtsam::BetweenFactorLieVector"); @@ -174,7 +168,6 @@ BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose3, "gtsam::NonlinearEqualityPose3") BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3_S2, "gtsam::NonlinearEqualityCal3_S2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3DS2, "gtsam::NonlinearEqualityCal3DS2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCalibratedCamera, "gtsam::NonlinearEqualityCalibratedCamera"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualitySimpleCamera, "gtsam::NonlinearEqualitySimpleCamera"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoCamera, "gtsam::NonlinearEqualityStereoCamera"); BOOST_CLASS_EXPORT_GUID(RangeFactor2D, "gtsam::RangeFactor2D"); @@ -182,9 +175,7 @@ BOOST_CLASS_EXPORT_GUID(RangeFactor3D, "gtsam::RangeFactor3D"); BOOST_CLASS_EXPORT_GUID(RangeFactorPose2, "gtsam::RangeFactorPose2"); BOOST_CLASS_EXPORT_GUID(RangeFactorPose3, "gtsam::RangeFactorPose3"); BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCameraPoint, "gtsam::RangeFactorCalibratedCameraPoint"); -BOOST_CLASS_EXPORT_GUID(RangeFactorSimpleCameraPoint, "gtsam::RangeFactorSimpleCameraPoint"); BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCamera, "gtsam::RangeFactorCalibratedCamera"); -BOOST_CLASS_EXPORT_GUID(RangeFactorSimpleCamera, "gtsam::RangeFactorSimpleCamera"); BOOST_CLASS_EXPORT_GUID(BearingRangeFactor2D, "gtsam::BearingRangeFactor2D"); @@ -192,12 +183,28 @@ BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3_S2, "gtsam::GenericProjectio BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3DS2, "gtsam::GenericProjectionFactorCal3DS2"); BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3_S2, "gtsam::GeneralSFMFactorCal3_S2"); -//BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3DS2, "gtsam::GeneralSFMFactorCal3DS2"); +BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3DS2, "gtsam::GeneralSFMFactorCal3DS2"); BOOST_CLASS_EXPORT_GUID(GeneralSFMFactor2Cal3_S2, "gtsam::GeneralSFMFactor2Cal3_S2"); BOOST_CLASS_EXPORT_GUID(GenericStereoFactor3D, "gtsam::GenericStereoFactor3D"); +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41 + +typedef PriorFactor PriorFactorSimpleCamera; +typedef NonlinearEquality NonlinearEqualitySimpleCamera; +typedef RangeFactor RangeFactorSimpleCameraPoint; +typedef RangeFactor RangeFactorSimpleCamera; + +GTSAM_VALUE_EXPORT(gtsam::SimpleCamera); +BOOST_CLASS_EXPORT_GUID(PriorFactorSimpleCamera, "gtsam::PriorFactorSimpleCamera"); +BOOST_CLASS_EXPORT_GUID(NonlinearEqualitySimpleCamera, "gtsam::NonlinearEqualitySimpleCamera"); +BOOST_CLASS_EXPORT_GUID(RangeFactorSimpleCameraPoint, "gtsam::RangeFactorSimpleCameraPoint"); +BOOST_CLASS_EXPORT_GUID(RangeFactorSimpleCamera, "gtsam::RangeFactorSimpleCamera"); + +#endif + + /* ************************************************************************* */ // Actual implementations of functions /* ************************************************************************* */ diff --git a/gtsam_unstable/slam/tests/testPoseToPointFactor.h b/gtsam_unstable/slam/tests/testPoseToPointFactor.h new file mode 100644 index 000000000..8f8563e9d --- /dev/null +++ b/gtsam_unstable/slam/tests/testPoseToPointFactor.h @@ -0,0 +1,86 @@ +/** + * @file testPoseToPointFactor.cpp + * @brief + * @author David Wisth + * @date June 20, 2020 + */ + +#include +#include +#include + +using namespace gtsam; +using namespace gtsam::noiseModel; + +/// Verify zero error when there is no noise +TEST(PoseToPointFactor, errorNoiseless) { + Pose3 pose = Pose3::identity(); + Point3 point(1.0, 2.0, 3.0); + Point3 noise(0.0, 0.0, 0.0); + Point3 measured = t + noise; + + Key pose_key(1); + Key point_key(2); + PoseToPointFactor factor(pose_key, point_key, measured, + Isotropic::Sigma(3, 0.05)); + Vector expectedError = Vector3(0.0, 0.0, 0.0); + Vector actualError = factor.evaluateError(pose, point); + EXPECT(assert_equal(expectedError, actualError, 1E-5)); +} + +/// Verify expected error in test scenario +TEST(PoseToPointFactor, errorNoise) { + Pose3 pose = Pose3::identity(); + Point3 point(1.0, 2.0, 3.0); + Point3 noise(-1.0, 0.5, 0.3); + Point3 measured = t + noise; + + Key pose_key(1); + Key point_key(2); + PoseToPointFactor factor(pose_key, point_key, measured, + Isotropic::Sigma(3, 0.05)); + Vector expectedError = noise; + Vector actualError = factor.evaluateError(pose, point); + EXPECT(assert_equal(expectedError, actualError, 1E-5)); +} + +/// Check Jacobians are correct +TEST(PoseToPointFactor, jacobian) { + // Measurement + gtsam::Point3 l_meas = gtsam::Point3(1, 2, 3); + + // Linearisation point + gtsam::Point3 p_t = gtsam::Point3(-5, 12, 2); + gtsam::Rot3 p_R = gtsam::Rot3::RzRyRx(1.5 * M_PI, -0.3 * M_PI, 0.4 * M_PI); + Pose3 p(p_R, p_t); + + gtsam::Point3 l = gtsam::Point3(3, 0, 5); + + // Factor + Key pose_key(1); + Key point_key(2); + SharedGaussian noise = noiseModel::Diagonal::Sigmas(Vector3(0.1, 0.1, 0.1)); + PoseToPointFactor factor(pose_key, point_key, l_meas, noise); + + // Calculate numerical derivatives + auto f = boost::bind(&PoseToPointFactor::evaluateError, factor, _1, _2, + boost::none, boost::none); + Matrix numerical_H1 = numericalDerivative21(f, p, l); + Matrix numerical_H2 = numericalDerivative22(f, p, l); + + // Use the factor to calculate the derivative + Matrix actual_H1; + Matrix actual_H2; + factor.evaluateError(p, l, actual_H1, actual_H2); + + // Verify we get the expected error + EXPECT_TRUE(assert_equal(numerical_H1, actual_H1, 1e-8)); + EXPECT_TRUE(assert_equal(numerical_H2, actual_H2, 1e-8)); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/gtsam_unstable/timing/process_shonan_timing_results.py b/gtsam_unstable/timing/process_shonan_timing_results.py new file mode 100644 index 000000000..9cf934dba --- /dev/null +++ b/gtsam_unstable/timing/process_shonan_timing_results.py @@ -0,0 +1,215 @@ +""" +Process timing results from timeShonanAveraging +""" + +import xlrd +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.ticker import FuncFormatter +import heapq +from collections import Counter + +def make_combined_plot(name, p_values, times, costs, min_cost_range=10): + """ Make a plot that combines timing and SO(3) cost. + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times: list of timings (seconds) + costs: list of costs (double) + Will calculate the range of the costs, default minimum range = 10.0 + """ + min_cost = min(costs) + cost_range = max(max(costs)-min_cost,min_cost_range) + fig = plt.figure() + ax1 = fig.add_subplot(111) + ax1.plot(p_values, times, label="time") + ax1.set_ylabel('Time used to optimize \ seconds') + ax1.set_xlabel('p_value') + ax2 = ax1.twinx() + ax2.plot(p_values, costs, 'r', label="cost") + ax2.set_ylabel('Cost at SO(3) form') + ax2.set_xlabel('p_value') + ax2.set_xticks(p_values) + ax2.set_ylim(min_cost, min_cost + cost_range) + plt.title(name, fontsize=12) + ax1.legend(loc="upper left") + ax2.legend(loc="upper right") + plt.interactive(False) + plt.show() + +def make_convergence_plot(name, p_values, times, costs, iter=10): + """ Make a bar that show the success rate for each p_value according to whether the SO(3) cost converges + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times: list of timings (seconds) + costs: list of costs (double) + iter: int of iteration number for each p_value + """ + + max_cost = np.mean(np.array(heapq.nlargest(iter, costs))) + # calculate mean costs for each p value + p_values = list(dict(Counter(p_values)).keys()) + # make sure the iter number + iter = int(len(times)/len(p_values)) + p_mean_cost = [np.mean(np.array(costs[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_max = p_values[p_mean_cost.index(max(p_mean_cost))] + # print(p_mean_cost) + # print(p_max) + + #take mean and make the combined plot + mean_times = [] + mean_costs = [] + for p in p_values: + costs_tmp = costs[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + mean_cost = sum(costs_tmp)/len(costs_tmp) + mean_costs.append(mean_cost) + times_tmp = times[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + mean_time = sum(times_tmp)/len(times_tmp) + mean_times.append(mean_time) + make_combined_plot(name, p_values,mean_times, mean_costs) + + # calculate the convergence rate for each p_value + p_success_rates = [] + if p_mean_cost[0] >= 0.95*np.mean(np.array(costs)) and p_mean_cost[0] <= 1.05*np.mean(np.array(costs)): + p_success_rates = [ 1.0 for p in p_values] + else: + for p in p_values: + if p > p_max: + p_costs = costs[p_values.index(p)*iter:(p_values.index(p)+1)*iter] + # print(p_costs) + converged = [ int(p_cost < 0.3*max_cost) for p_cost in p_costs] + success_rate = sum(converged)/len(converged) + p_success_rates.append(success_rate) + else: + p_success_rates.append(0) + + plt.bar(p_values, p_success_rates, align='center', alpha=0.5) + plt.xticks(p_values) + plt.yticks(np.arange(0, 1.2, 0.2), ['0%', '20%', '40%', '60%', '80%', '100%']) + plt.xlabel("p_value") + plt.ylabel("success rate") + plt.title(name, fontsize=12) + plt.show() + +def make_eigen_and_bound_plot(name, p_values, times1, costPs, cost3s, times2, min_eigens, subounds): + """ Make a plot that combines time for optimizing, time for optimizing and compute min_eigen, + min_eigen, subound (subound = (f_R - f_SDP) / f_SDP). + Arguments: + name: string of the plot title + p_values: list of p-values (int) + times1: list of timings (seconds) + costPs: f_SDP + cost3s: f_R + times2: list of timings (seconds) + min_eigens: list of min_eigen (double) + subounds: list of subound (double) + """ + + if dict(Counter(p_values))[5] != 1: + p_values = list(dict(Counter(p_values)).keys()) + iter = int(len(times1)/len(p_values)) + p_mean_times1 = [np.mean(np.array(times1[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_times2 = [np.mean(np.array(times2[i*iter:(i+1)*iter])) for i in range(len(p_values))] + print("p_values \n", p_values) + print("p_mean_times_opti \n", p_mean_times1) + print("p_mean_times_eig \n", p_mean_times2) + + p_mean_costPs = [np.mean(np.array(costPs[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_cost3s = [np.mean(np.array(cost3s[i*iter:(i+1)*iter])) for i in range(len(p_values))] + p_mean_lambdas = [np.mean(np.array(min_eigens[i*iter:(i+1)*iter])) for i in range(len(p_values))] + + print("p_mean_costPs \n", p_mean_costPs) + print("p_mean_cost3s \n", p_mean_cost3s) + print("p_mean_lambdas \n", p_mean_lambdas) + print("*******************************************************************************************************************") + + + else: + plt.figure(1) + plt.ylabel('Time used (seconds)') + plt.xlabel('p_value') + plt.plot(p_values, times1, 'r', label="time for optimizing") + plt.plot(p_values, times2, 'blue', label="time for optimizing and check") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.interactive(False) + plt.show() + + plt.figure(2) + plt.ylabel('Min eigen_value') + plt.xlabel('p_value') + plt.plot(p_values, min_eigens, 'r', label="min_eigen values") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.interactive(False) + plt.show() + + plt.figure(3) + plt.ylabel('sub_bounds') + plt.xlabel('p_value') + plt.plot(p_values, subounds, 'blue', label="sub_bounds") + plt.title(name, fontsize=12) + plt.legend(loc="best") + plt.show() + +# Process arguments and call plot function +import argparse +import csv +import os + +parser = argparse.ArgumentParser() +parser.add_argument("path") +args = parser.parse_args() + + +file_path = [] +domain = os.path.abspath(args.path) +for info in os.listdir(args.path): + file_path.append(os.path.join(domain, info)) +file_path.sort() +print(file_path) + + +# name of all the plots +names = {} +names[0] = 'tinyGrid3D vertex = 9, edge = 11' +names[1] = 'smallGrid3D vertex = 125, edge = 297' +names[2] = 'parking-garage vertex = 1661, edge = 6275' +names[3] = 'sphere2500 vertex = 2500, edge = 4949' +# names[4] = 'sphere_bignoise vertex = 2200, edge = 8647' +names[5] = 'torus3D vertex = 5000, edge = 9048' +names[6] = 'cubicle vertex = 5750, edge = 16869' +names[7] = 'rim vertex = 10195, edge = 29743' + +# Parse CSV file +for key, name in names.items(): + print(key, name) + + # find according file to process + name_file = None + for path in file_path: + if name[0:3] in path: + name_file = path + if name_file == None: + print("The file %s is not in the path" % name) + continue + + p_values, times1, costPs, cost3s, times2, min_eigens, subounds = [],[],[],[],[],[],[] + with open(name_file) as csvfile: + reader = csv.reader(csvfile, delimiter='\t') + for row in reader: + print(row) + p_values.append(int(row[0])) + times1.append(float(row[1])) + costPs.append(float(row[2])) + cost3s.append(float(row[3])) + if len(row) > 4: + times2.append(float(row[4])) + min_eigens.append(float(row[5])) + subounds.append(float(row[6])) + + #plot + # make_combined_plot(name, p_values, times1, cost3s) + # make_convergence_plot(name, p_values, times1, cost3s) + make_eigen_and_bound_plot(name, p_values, times1, costPs, cost3s, times2, min_eigens, subounds) diff --git a/gtsam_unstable/timing/timeShonanAveraging.cpp b/gtsam_unstable/timing/timeShonanAveraging.cpp new file mode 100644 index 000000000..d42635404 --- /dev/null +++ b/gtsam_unstable/timing/timeShonanAveraging.cpp @@ -0,0 +1,180 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testShonanAveraging.cpp + * @date September 2019 + * @author Jing Wu + * @author Frank Dellaert + * @brief Timing script for Shonan Averaging algorithm + */ + +#include "gtsam/base/Matrix.h" +#include "gtsam/base/Vector.h" +#include "gtsam/geometry/Point3.h" +#include "gtsam/geometry/Rot3.h" +#include +#include + +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +// save a single line of timing info to an output stream +void saveData(size_t p, double time1, double costP, double cost3, double time2, + double min_eigenvalue, double suBound, std::ostream* os) { + *os << static_cast(p) << "\t" << time1 << "\t" << costP << "\t" << cost3 + << "\t" << time2 << "\t" << min_eigenvalue << "\t" << suBound << endl; +} + +void checkR(const Matrix& R) { + Matrix R2 = R.transpose(); + Matrix actual_R = R2 * R; + assert_equal(Rot3(), Rot3(actual_R)); +} + +void saveResult(string name, const Values& values) { + ofstream myfile; + myfile.open("shonan_result_of_" + name + ".dat"); + size_t nrSO3 = values.count(); + myfile << "#Type SO3 Number " << nrSO3 << "\n"; + for (int i = 0; i < nrSO3; ++i) { + Matrix R = values.at(i).matrix(); + // Check if the result of R.Transpose*R satisfy orthogonal constraint + checkR(R); + myfile << i; + for (int m = 0; m < 3; ++m) { + for (int n = 0; n < 3; ++n) { + myfile << " " << R(m, n); + } + } + myfile << "\n"; + } + myfile.close(); + cout << "Saved shonan_result.dat file" << endl; +} + +void saveG2oResult(string name, const Values& values, std::map poses) { + ofstream myfile; + myfile.open("shonan_result_of_" + name + ".g2o"); + size_t nrSO3 = values.count(); + for (int i = 0; i < nrSO3; ++i) { + Matrix R = values.at(i).matrix(); + // Check if the result of R.Transpose*R satisfy orthogonal constraint + checkR(R); + myfile << "VERTEX_SE3:QUAT" << " "; + myfile << i << " "; + myfile << poses[i].x() << " " << poses[i].y() << " " << poses[i].z() << " "; + Vector quaternion = Rot3(R).quaternion(); + myfile << quaternion(3) << " " << quaternion(2) << " " << quaternion(1) + << " " << quaternion(0); + myfile << "\n"; + } + myfile.close(); + cout << "Saved shonan_result.g2o file" << endl; +} + +void saveResultQuat(const Values& values) { + ofstream myfile; + myfile.open("shonan_result.dat"); + size_t nrSOn = values.count(); + for (int i = 0; i < nrSOn; ++i) { + GTSAM_PRINT(values.at(i)); + Rot3 R = Rot3(values.at(i).matrix()); + float x = R.toQuaternion().x(); + float y = R.toQuaternion().y(); + float z = R.toQuaternion().z(); + float w = R.toQuaternion().w(); + myfile << "QuatSO3 " << i; + myfile << "QuatSO3 " << i << " " << w << " " << x << " " << y << " " << z << "\n"; + myfile.close(); + } +} + +int main(int argc, char* argv[]) { + // primitive argument parsing: + if (argc > 3) { + throw runtime_error("Usage: timeShonanAveraging [g2oFile]"); + } + + string g2oFile; + try { + if (argc > 1) + g2oFile = argv[argc - 1]; + else + g2oFile = findExampleDataFile("sphere2500"); + + } catch (const exception& e) { + cerr << e.what() << '\n'; + exit(1); + } + + // Create a csv output file + size_t pos1 = g2oFile.find("data/"); + size_t pos2 = g2oFile.find(".g2o"); + string name = g2oFile.substr(pos1 + 5, pos2 - pos1 - 5); + cout << name << endl; + ofstream csvFile("shonan_timing_of_" + name + ".csv"); + + // Create Shonan averaging instance from the file. + // ShonanAveragingParameters parameters; + // double sigmaNoiseInRadians = 0 * M_PI / 180; + // parameters.setNoiseSigma(sigmaNoiseInRadians); + static const ShonanAveraging3 kShonan(g2oFile); + + // increase p value and try optimize using Shonan Algorithm. use chrono for + // timing + size_t pMin = 3; + Values Qstar; + Vector minEigenVector; + double CostP = 0, Cost3 = 0, lambdaMin = 0, suBound = 0; + cout << "(int)p" << "\t" << "time1" << "\t" << "costP" << "\t" << "cost3" << "\t" + << "time2" << "\t" << "MinEigenvalue" << "\t" << "SuBound" << endl; + + const Values randomRotations = kShonan.initializeRandomly(); + + for (size_t p = pMin; p <= 7; p++) { + // Randomly initialize at lowest level, initialize by line search after that + const Values initial = + (p > pMin) ? kShonan.initializeWithDescent(p, Qstar, minEigenVector, + lambdaMin) + : ShonanAveraging3::LiftTo(pMin, randomRotations); + chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); + // optimizing + const Values result = kShonan.tryOptimizingAt(p, initial); + chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); + chrono::duration timeUsed1 = + chrono::duration_cast>(t2 - t1); + lambdaMin = kShonan.computeMinEigenValue(result, &minEigenVector); + chrono::steady_clock::time_point t3 = chrono::steady_clock::now(); + chrono::duration timeUsed2 = + chrono::duration_cast>(t3 - t1); + Qstar = result; + CostP = kShonan.costAt(p, result); + const Values SO3Values = kShonan.roundSolution(result); + Cost3 = kShonan.cost(SO3Values); + suBound = (Cost3 - CostP) / CostP; + + saveData(p, timeUsed1.count(), CostP, Cost3, timeUsed2.count(), + lambdaMin, suBound, &cout); + saveData(p, timeUsed1.count(), CostP, Cost3, timeUsed2.count(), + lambdaMin, suBound, &csvFile); + } + saveResult(name, kShonan.roundSolution(Qstar)); + // saveG2oResult(name, kShonan.roundSolution(Qstar), kShonan.Poses()); + return 0; +} diff --git a/matlab/+gtsam/Contents.m b/matlab/+gtsam/Contents.m index 035e7e509..fb6d3081e 100644 --- a/matlab/+gtsam/Contents.m +++ b/matlab/+gtsam/Contents.m @@ -99,6 +99,7 @@ % %% SLAM and SFM % BearingFactor2D - class BearingFactor2D, see Doxygen page for details +% BearingFactor3D - class BearingFactor3D, see Doxygen page for details % BearingRangeFactor2D - class BearingRangeFactor2D, see Doxygen page for details % BetweenFactorLieMatrix - class BetweenFactorLieMatrix, see Doxygen page for details % BetweenFactorLieScalar - class BetweenFactorLieScalar, see Doxygen page for details diff --git a/matlab/+gtsam/Point2.m b/matlab/+gtsam/Point2.m new file mode 100644 index 000000000..3ea65c33e --- /dev/null +++ b/matlab/+gtsam/Point2.m @@ -0,0 +1,12 @@ +function pt = Point2(varargin) + % Point2 shim + if nargin == 2 && isa(varargin{1}, 'double') + pt = [varargin{1} varargin{2}]'; + elseif nargin == 1 + pt = varargin{1}; + elseif nargin == 0 + pt = [0 0]'; + else + error('Arguments do not match any overload of Point2 shim'); + end +end \ No newline at end of file diff --git a/matlab/+gtsam/Point3.m b/matlab/+gtsam/Point3.m new file mode 100644 index 000000000..5f66b4517 --- /dev/null +++ b/matlab/+gtsam/Point3.m @@ -0,0 +1,12 @@ +function pt = Point3(varargin) + % Point3 shim + if nargin == 3 && isa(varargin{1}, 'double') + pt = [varargin{1} varargin{2} varargin{3}]'; + elseif nargin == 1 + pt = varargin{1}; + elseif nargin == 0 + pt = [0 0 0]'; + else + error('Arguments do not match any overload of Point3 shim'); + end +end \ No newline at end of file diff --git a/matlab/+gtsam/cylinderSampleProjection.m b/matlab/+gtsam/cylinderSampleProjection.m index 2b913b52d..697a57faa 100644 --- a/matlab/+gtsam/cylinderSampleProjection.m +++ b/matlab/+gtsam/cylinderSampleProjection.m @@ -50,9 +50,9 @@ visible = true; for k = 1:cylinderNum - rayCameraToPoint = pose.translation().between(sampledPoint3).vector(); - rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid).vector(); - rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3).vector(); + rayCameraToPoint = pose.translation().between(sampledPoint3); + rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid); + rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3); % Condition 1: all points in front of the cylinders' % surfaces are visible diff --git a/matlab/+gtsam/cylinderSampleProjectionStereo.m b/matlab/+gtsam/cylinderSampleProjectionStereo.m index 10409ac6d..58b4140fd 100644 --- a/matlab/+gtsam/cylinderSampleProjectionStereo.m +++ b/matlab/+gtsam/cylinderSampleProjectionStereo.m @@ -25,20 +25,20 @@ % For Cheirality Exception sampledPoint3 = cylinders{i}.Points{j}; sampledPoint3local = pose.transformTo(sampledPoint3); - if sampledPoint3local.z < 0 + if sampledPoint3local(3) < 0 continue; end % measurements - Z.du = K.fx() * K.baseline() / sampledPoint3local.z; - Z.uL = K.fx() * sampledPoint3local.x / sampledPoint3local.z + K.px(); + Z.du = K.fx() * K.baseline() / sampledPoint3local(3); + Z.uL = K.fx() * sampledPoint3local(1) / sampledPoint3local(3) + K.px(); Z.uR = Z.uL + Z.du; - Z.v = K.fy() / sampledPoint3local.z + K.py(); + Z.v = K.fy() / sampledPoint3local(3) + K.py(); % ignore points not visible in the scene - if Z.uL < 0 || Z.uL >= imageSize.x || ... - Z.uR < 0 || Z.uR >= imageSize.x || ... - Z.v < 0 || Z.v >= imageSize.y + if Z.uL < 0 || Z.uL >= imageSize(1) || ... + Z.uR < 0 || Z.uR >= imageSize(1) || ... + Z.v < 0 || Z.v >= imageSize(2) continue; end @@ -54,9 +54,9 @@ visible = true; for k = 1:cylinderNum - rayCameraToPoint = pose.translation().between(sampledPoint3).vector(); - rayCameraToCylinder = pose.translation().between(cylinders{k}.centroid).vector(); - rayCylinderToPoint = cylinders{k}.centroid.between(sampledPoint3).vector(); + rayCameraToPoint = sampledPoint3 - pose.translation(); + rayCameraToCylinder = cylinders{k}.centroid - pose.translation(); + rayCylinderToPoint = sampledPoint3 - cylinders{k}.centroid; % Condition 1: all points in front of the cylinders' % surfaces are visible diff --git a/matlab/+gtsam/cylinderSampling.m b/matlab/+gtsam/cylinderSampling.m index dcaa9c721..dc76295fa 100644 --- a/matlab/+gtsam/cylinderSampling.m +++ b/matlab/+gtsam/cylinderSampling.m @@ -12,8 +12,8 @@ % sample the points for i = 1:pointsNum theta = 2 * pi * rand; - x = radius * cos(theta) + baseCentroid.x; - y = radius * sin(theta) + baseCentroid.y; + x = radius * cos(theta) + baseCentroid(1); + y = radius * sin(theta) + baseCentroid(2); z = height * rand; points3{i,1} = Point3([x,y,z]'); end @@ -22,5 +22,5 @@ cylinder.radius = radius; cylinder.height = height; cylinder.Points = points3; - cylinder.centroid = Point3(baseCentroid.x, baseCentroid.y, height/2); + cylinder.centroid = Point3(baseCentroid(1), baseCentroid(2), height/2); end \ No newline at end of file diff --git a/matlab/+gtsam/plotCamera.m b/matlab/+gtsam/plotCamera.m index d0d1f45b7..986cd9a68 100644 --- a/matlab/+gtsam/plotCamera.m +++ b/matlab/+gtsam/plotCamera.m @@ -1,7 +1,7 @@ function plotCamera(pose, axisLength) hold on - C = pose.translation().vector(); + C = pose.translation(); R = pose.rotation().matrix(); xAxis = C+R(:,1)*axisLength; diff --git a/matlab/+gtsam/plotFlyingResults.m b/matlab/+gtsam/plotFlyingResults.m index 5d4a287c4..202f2409b 100644 --- a/matlab/+gtsam/plotFlyingResults.m +++ b/matlab/+gtsam/plotFlyingResults.m @@ -13,7 +13,7 @@ function plotFlyingResults(pts3d, poses, posesCov, cylinders, options) %% plot all the cylinders and sampled points axis equal -axis([0, options.fieldSize.x, 0, options.fieldSize.y, 0, options.height + 30]); +axis([0, options.fieldSize(1), 0, options.fieldSize(2), 0, options.height + 30]); xlabel('X (m)'); ylabel('Y (m)'); zlabel('Height (m)'); @@ -50,8 +50,8 @@ function plotFlyingResults(pts3d, poses, posesCov, cylinders, options) [X,Y,Z] = cylinder(cylinders{i}.radius, sampleDensity * cylinders{i}.radius * cylinders{i}.height); - X = X + cylinders{i}.centroid.x; - Y = Y + cylinders{i}.centroid.y; + X = X + cylinders{i}.centroid(1); + Y = Y + cylinders{i}.centroid(2); Z = Z * cylinders{i}.height; h_cylinder{i} = surf(X,Y,Z); @@ -76,7 +76,7 @@ function plotFlyingResults(pts3d, poses, posesCov, cylinders, options) %plotCamera(poses{i}, 3); gRp = poses{i}.rotation().matrix(); % rotation from pose to global - C = poses{i}.translation().vector(); + C = poses{i}.translation(); axisLength = 3; xAxis = C+gRp(:,1)*axisLength; @@ -111,14 +111,14 @@ function plotFlyingResults(pts3d, poses, posesCov, cylinders, options) for j = 1:pointSize if ~isempty(pts3d{j}.cov{i}) hold on - h_point{j} = plot3(pts3d{j}.data.x, pts3d{j}.data.y, pts3d{j}.data.z); - h_point_cov{j} = gtsam.covarianceEllipse3D([pts3d{j}.data.x; pts3d{j}.data.y; pts3d{j}.data.z], ... + h_point{j} = plot3(pts3d{j}.data(1), pts3d{j}.data(2), pts3d{j}.data(3)); + h_point_cov{j} = gtsam.covarianceEllipse3D([pts3d{j}.data(1); pts3d{j}.data(2); pts3d{j}.data(3)], ... pts3d{j}.cov{i}, options.plot.covarianceScale); end end axis equal - axis([0, options.fieldSize.x, 0, options.fieldSize.y, 0, options.height + 30]); + axis([0, options.fieldSize(1), 0, options.fieldSize(2), 0, options.height + 30]); drawnow @@ -158,7 +158,7 @@ function plotFlyingResults(pts3d, poses, posesCov, cylinders, options) hold on campos([poses{i}.x, poses{i}.y, poses{i}.z]); - camtarget([options.fieldSize.x/2, options.fieldSize.y/2, 0]); + camtarget([options.fieldSize(1)/2, options.fieldSize(2)/2, 0]); camlight(hlight, 'headlight'); if options.writeVideo diff --git a/matlab/+gtsam/plotPoint2.m b/matlab/+gtsam/plotPoint2.m index cd066951d..8d10ecab6 100644 --- a/matlab/+gtsam/plotPoint2.m +++ b/matlab/+gtsam/plotPoint2.m @@ -1,10 +1,10 @@ function plotPoint2(p,color,P) % plotPoint2 shows a Point2, possibly with covariance matrix if size(color,2)==1 - plot(p.x,p.y,[color '*']); + plot(p(1),p(2),[color '*']); else - plot(p.x,p.y,color); + plot(p(1),p(2),color); end if exist('P', 'var') && (~isempty(P)) - gtsam.covarianceEllipse([p.x;p.y],P,color(1)); + gtsam.covarianceEllipse([p(1);p(2)],P,color(1)); end \ No newline at end of file diff --git a/matlab/+gtsam/plotPoint3.m b/matlab/+gtsam/plotPoint3.m index 390b44939..85c84a210 100644 --- a/matlab/+gtsam/plotPoint3.m +++ b/matlab/+gtsam/plotPoint3.m @@ -1,12 +1,12 @@ function plotPoint3(p, color, P) %PLOTPOINT3 Plot a Point3 with an optional covariance matrix if size(color,2)==1 - plot3(p.x,p.y,p.z,[color '*']); + plot3(p(1),p(2),p(3),[color '*']); else - plot3(p.x,p.y,p.z,color); + plot3(p(1),p(2),p(3),color); end if exist('P', 'var') - gtsam.covarianceEllipse3D([p.x;p.y;p.z],P); + gtsam.covarianceEllipse3D([p(1);p(2);p(3)],P); end end diff --git a/matlab/+gtsam/plotPose3.m b/matlab/+gtsam/plotPose3.m index 8c3c7dd76..258e3f776 100644 --- a/matlab/+gtsam/plotPose3.m +++ b/matlab/+gtsam/plotPose3.m @@ -4,7 +4,7 @@ function plotPose3(pose, P, axisLength) % get rotation and translation (center) gRp = pose.rotation().matrix(); % rotation from pose to global -C = pose.translation().vector(); +C = pose.translation(); if ~isempty(axisLength) % draw the camera axes diff --git a/matlab/+gtsam/points2DTrackStereo.m b/matlab/+gtsam/points2DTrackStereo.m index 60c9f1295..04cddb7f7 100644 --- a/matlab/+gtsam/points2DTrackStereo.m +++ b/matlab/+gtsam/points2DTrackStereo.m @@ -38,7 +38,7 @@ %% initialize graph and values initialEstimate = Values; for i = 1:pointsNum - point_j = points3d{i}.data.retract(0.05*randn(3,1)); + point_j = points3d{i}.data + (0.05*randn(3,1)); initialEstimate.insert(symbol('p', i), point_j); end diff --git a/matlab/CMakeLists.txt b/matlab/CMakeLists.txt index 9abd4e31d..52d56a2b5 100644 --- a/matlab/CMakeLists.txt +++ b/matlab/CMakeLists.txt @@ -2,6 +2,10 @@ include(GtsamMatlabWrap) +# Record the root dir for gtsam - needed during external builds, e.g., ROS +set(GTSAM_SOURCE_ROOT_DIR ${GTSAM_SOURCE_DIR}) +message(STATUS "GTSAM_SOURCE_ROOT_DIR: [${GTSAM_SOURCE_ROOT_DIR}]") + # Tests #message(STATUS "Installing Matlab Toolbox") install_matlab_scripts("${GTSAM_SOURCE_ROOT_DIR}/matlab/" "*.m;*.fig") @@ -21,7 +25,7 @@ install_matlab_scripts("${GTSAM_SOURCE_ROOT_DIR}/matlab/" "README-gtsam-toolbox. file(GLOB matlab_examples_data_graph "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.graph") file(GLOB matlab_examples_data_mat "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.mat") file(GLOB matlab_examples_data_txt "${GTSAM_SOURCE_ROOT_DIR}/examples/Data/*.txt") -set(matlab_examples_data ${matlab_examples_data_graph} ${matlab_examples_data_mat} ${matlab_examples_data_txt}) +set(matlab_examples_data ${matlab_examples_data_graph} ${matlab_examples_data_mat} ${matlab_examples_data_txt}) if(GTSAM_BUILD_TYPE_POSTFIXES) foreach(build_type ${CMAKE_CONFIGURATION_TYPES}) string(TOUPPER "${build_type}" build_type_upper) @@ -38,4 +42,3 @@ if(GTSAM_BUILD_TYPE_POSTFIXES) else() install(FILES ${matlab_examples_data} DESTINATION ${GTSAM_TOOLBOX_INSTALL_PATH}/gtsam_examples/Data) endif() - diff --git a/matlab/README.md b/matlab/README.md index 86e7d9fe0..39053364d 100644 --- a/matlab/README.md +++ b/matlab/README.md @@ -4,16 +4,16 @@ http://borg.cc.gatech.edu/projects/gtsam This is the GTSAM MATLAB toolbox, a MATLAB wrapper around the GTSAM C++ library. To build it, enable `GTSAM_INSTALL_MATLAB_TOOLBOX=ON` in CMake. -The interface to the toolbox is generated automatically by the wrap -tool which directly parses C++ header files. The tool generates matlab proxy objects together with all the native functions for wrapping and unwrapping scalar and non scalar types and objects. The interface generated by the wrap tool also redirects the standard output stream (cout) to matlab's console. +The interface to the toolbox is generated automatically by the wrap tool which directly parses C++ header files. +The tool generates matlab proxy objects together with all the native functions for wrapping and unwrapping scalar and non scalar types and objects. +The interface generated by the wrap tool also redirects the standard output stream (cout) to matlab's console. ## Ubuntu -If you have a newer Ubuntu system (later than 10.04), you must make a small modification to your MATLAB installation, due to MATLAB being distributed with an old version of the C++ standard library. Delete or rename all files starting with `libstdc++` in your MATLAB installation directory, in paths: - - /usr/local/MATLAB/[version]/sys/os/[system]/ - /usr/local/MATLAB/[version]/bin/[system]/ +If you have a newer Ubuntu system (later than 10.04), you must make a small modification to your MATLAB installation, due to MATLAB being distributed with an old version of the C++ standard library. Delete or rename all files starting with `libstdc++` in your MATLAB installation directory, in paths: + /usr/local/MATLAB/[version]/sys/os/[system]/ + /usr/local/MATLAB/[version]/bin/[system]/ ## Adding the toolbox to your MATLAB path @@ -37,25 +37,28 @@ export LD_LIBRARY_PATH=/gtsam:$LD_LIBRARY_PATH ### Linker issues -If you compile the MATLAB toolbox and everything compiles smoothly, but when you run any MATLAB script, you get following error messages in MATLAB +If you compile the MATLAB toolbox and everything compiles smoothly, but when you run any MATLAB script, you get any of the following error messages in MATLAB + ``` Invalid MEX-file '/usr/local/gtsam_toolbox/gtsam_wrapper.mexa64': Missing symbol 'mexAtExit' required by '/usr/local/gtsam_toolbox/gtsam_wrapper.mexa64' Missing symbol 'mexCallMATLABWithObject' required by '/usr/local/gtsam_toolbox/gtsam_wrapper.mexa64' ... ``` + run following shell line + ```sh export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libstdc++.so.6 ``` -before you run MATLAB from the same shell. -This mainly happens if you have GCC >= 5 and newer version MATLAB like R2017a. +before you run MATLAB from the same shell. +This mainly happens if you have GCC >= 5 and newer version MATLAB like R2017a. ## Trying out the examples -The examples are located in the 'gtsam_examples' subfolder. You may either run them individually at the MATLAB command line, or open the GTSAM example GUI by running 'gtsamExamples'. Example: +The examples are located in the 'gtsam_examples' subfolder. You may either run them individually at the MATLAB command line, or open the GTSAM example GUI by running 'gtsamExamples'. Example: ```matlab >> cd /Users/yourname/toolbox % Change to wherever you installed the toolbox @@ -65,7 +68,7 @@ The examples are located in the 'gtsam_examples' subfolder. You may either run ## Running the unit tests -The GTSAM MATLAB toolbox also has a small set of unit tests located in the gtsam_tests directory. Example: +The GTSAM MATLAB toolbox also has a small set of unit tests located in the gtsam_tests directory. Example: ```matlab >> cd /Users/yourname/toolbox % Change to wherever you installed the toolbox @@ -86,4 +89,4 @@ Tests complete! ## Writing your own code -Coding for the GTSAM MATLAB toolbox is straightforward and very fast once you understand a few basic concepts! Please see the manual to get started. +Coding for the GTSAM MATLAB toolbox is straightforward and very fast once you understand a few basic concepts! Please see the manual to get started. diff --git a/matlab/gtsam_examples/CameraFlyingExample.m b/matlab/gtsam_examples/CameraFlyingExample.m index add2bc75a..a0dfef22a 100644 --- a/matlab/gtsam_examples/CameraFlyingExample.m +++ b/matlab/gtsam_examples/CameraFlyingExample.m @@ -46,7 +46,7 @@ % camera baseline options.camera.baseline = 0.05; % camera focal length -options.camera.f = round(options.camera.resolution.x * options.camera.horizon / ... +options.camera.f = round(options.camera.resolution(1) * options.camera.horizon / ... options.camera.fov); % camera focal baseline options.camera.fB = options.camera.f * options.camera.baseline; @@ -54,15 +54,15 @@ options.camera.disparity = options.camera.fB / options.camera.horizon; % Monocular Camera Calibration options.camera.monoK = Cal3_S2(options.camera.f, options.camera.f, 0, ... - options.camera.resolution.x/2, options.camera.resolution.y/2); + options.camera.resolution(1)/2, options.camera.resolution(2)/2); % Stereo Camera Calibration options.camera.stereoK = Cal3_S2Stereo(options.camera.f, options.camera.f, 0, ... - options.camera.resolution.x/2, options.camera.resolution.y/2, options.camera.disparity); + options.camera.resolution(1)/2, options.camera.resolution(2)/2, options.camera.disparity); % write video output options.writeVideo = true; % the testing field size (unit: meter) -options.fieldSize = Point2([100, 100]'); +options.fieldSize = Point2(100, 100); % camera flying speed (unit: meter / second) options.speed = 20; % camera flying height @@ -113,14 +113,14 @@ i = 1; while i <= cylinderNum theta = theta + 2*pi/10; - x = 40 * rand * cos(theta) + options.fieldSize.x/2; - y = 20 * rand * sin(theta) + options.fieldSize.y/2; - baseCentroid{i} = Point2([x, y]'); + x = 40 * rand * cos(theta) + options.fieldSize(1)/2; + y = 20 * rand * sin(theta) + options.fieldSize(2)/2; + baseCentroid{i} = Point2(x, y); % prevent two cylinders interact with each other regenerate = false; for j = 1:i-1 - if i > 1 && baseCentroid{i}.dist(baseCentroid{j}) < options.cylinder.radius * 2 + if i > 1 && norm(baseCentroid{i} - baseCentroid{j}) < options.cylinder.radius * 2 regenerate = true; break; end @@ -146,17 +146,17 @@ angle = 0.1*pi*(rand-0.5); inc_t = Point3(distance * cos(angle), ... distance * sin(angle), 0); - t = t.compose(inc_t); + t = t + inc_t; - if t.x > options.fieldSize.x - 10 || t.y > options.fieldSize.y - 10; + if t(1) > options.fieldSize(1) - 10 || t(2) > options.fieldSize(2) - 10; break; end %t = Point3([(i-1)*(options.fieldSize.x - 10)/options.poseNum + 10, ... % 15, 10]'); camera = PinholeCameraCal3_S2.Lookat(t, ... - Point3(options.fieldSize.x/2, options.fieldSize.y/2, 0), ... - Point3([0,0,1]'), options.camera.monoK); + Point3(options.fieldSize(1)/2, options.fieldSize(2)/2, 0), ... + Point3(0,0,1), options.camera.monoK); cameraPoses{end+1} = camera.pose; end diff --git a/matlab/gtsam_examples/MonocularVOExample.m b/matlab/gtsam_examples/MonocularVOExample.m index 11c4253de..0d09a1487 100644 --- a/matlab/gtsam_examples/MonocularVOExample.m +++ b/matlab/gtsam_examples/MonocularVOExample.m @@ -15,14 +15,14 @@ %% Create two cameras and corresponding essential matrix E aRb = Rot3.Yaw(pi/2); -aTb = Point3 (0.1, 0, 0); +aTb = [.1, 0, 0]'; identity=Pose3; aPb = Pose3(aRb, aTb); cameraA = CalibratedCamera(identity); cameraB = CalibratedCamera(aPb); %% Create 5 points -P = { Point3(0, 0, 1), Point3(-0.1, 0, 1), Point3(0.1, 0, 1), Point3(0, 0.5, 0.5), Point3(0, -0.5, 0.5) }; +P = { [0, 0, 1]', [-0.1, 0, 1]', [0.1, 0, 1]', [0, 0.5, 0.5]', [0, -0.5, 0.5]' }; %% Project points in both cameras for i=1:5 diff --git a/matlab/gtsam_examples/PlanarSLAMExample.m b/matlab/gtsam_examples/PlanarSLAMExample.m index aec933d74..3febc23e6 100644 --- a/matlab/gtsam_examples/PlanarSLAMExample.m +++ b/matlab/gtsam_examples/PlanarSLAMExample.m @@ -71,9 +71,12 @@ plot2DTrajectory(result, [], marginals); plot2DPoints(result, 'b', marginals); -plot([result.atPose2(i1).x; result.atPoint2(j1).x],[result.atPose2(i1).y; result.atPoint2(j1).y], 'c-'); -plot([result.atPose2(i2).x; result.atPoint2(j1).x],[result.atPose2(i2).y; result.atPoint2(j1).y], 'c-'); -plot([result.atPose2(i3).x; result.atPoint2(j2).x],[result.atPose2(i3).y; result.atPoint2(j2).y], 'c-'); +p_j1 = result.atPoint2(j1); +p_j2 = result.atPoint2(j2); + +plot([result.atPose2(i1).x; p_j1(1)],[result.atPose2(i1).y; p_j1(2)], 'c-'); +plot([result.atPose2(i2).x; p_j1(1)],[result.atPose2(i2).y; p_j1(2)], 'c-'); +plot([result.atPose2(i3).x; p_j2(1)],[result.atPose2(i3).y; p_j2(2)], 'c-'); axis([-0.6 4.8 -1 1]) axis equal view(2) diff --git a/matlab/gtsam_examples/PlanarSLAMExample_sampling.m b/matlab/gtsam_examples/PlanarSLAMExample_sampling.m index 375ed645c..93979a68a 100644 --- a/matlab/gtsam_examples/PlanarSLAMExample_sampling.m +++ b/matlab/gtsam_examples/PlanarSLAMExample_sampling.m @@ -60,15 +60,18 @@ S{j}=chol(Q{j}); % for sampling end -plot([sample.atPose2(i1).x; sample.atPoint2(j1).x],[sample.atPose2(i1).y; sample.atPoint2(j1).y], 'c-'); -plot([sample.atPose2(i2).x; sample.atPoint2(j1).x],[sample.atPose2(i2).y; sample.atPoint2(j1).y], 'c-'); -plot([sample.atPose2(i3).x; sample.atPoint2(j2).x],[sample.atPose2(i3).y; sample.atPoint2(j2).y], 'c-'); +p_j1 = sample.atPoint2(j1); +p_j2 = sample.atPoint2(j2); + +plot([sample.atPose2(i1).x; p_j1(1)],[sample.atPose2(i1).y; p_j1(2)], 'c-'); +plot([sample.atPose2(i2).x; p_j1(1)],[sample.atPose2(i2).y; p_j1(2)], 'c-'); +plot([sample.atPose2(i3).x; p_j2(1)],[sample.atPose2(i3).y; p_j2(2)], 'c-'); view(2); axis auto; axis equal %% Do Sampling on point 2 N=1000; for s=1:N delta = S{2}*randn(2,1); - proposedPoint = Point2(point{2}.x+delta(1),point{2}.y+delta(2)); + proposedPoint = Point2(point{2} + delta); plotPoint2(proposedPoint,'k.') end \ No newline at end of file diff --git a/matlab/gtsam_examples/Pose2SLAMwSPCG.m b/matlab/gtsam_examples/Pose2SLAMwSPCG.m index 67f22fe1d..2df7efe2f 100644 --- a/matlab/gtsam_examples/Pose2SLAMwSPCG.m +++ b/matlab/gtsam_examples/Pose2SLAMwSPCG.m @@ -54,7 +54,7 @@ %% Optimize using Levenberg-Marquardt optimization with SubgraphSolver params = gtsam.LevenbergMarquardtParams; subgraphParams = gtsam.SubgraphSolverParameters; -params.setLinearSolverType('CONJUGATE_GRADIENT'); +params.setLinearSolverType('ITERATIVE'); params.setIterativeParams(subgraphParams); optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate); result = optimizer.optimize(); diff --git a/matlab/gtsam_examples/SBAExample.m b/matlab/gtsam_examples/SBAExample.m index 584ace09a..f94a2ee4e 100644 --- a/matlab/gtsam_examples/SBAExample.m +++ b/matlab/gtsam_examples/SBAExample.m @@ -47,7 +47,7 @@ %% Add Gaussian priors for a pose and a landmark to constrain the system cameraPriorNoise = noiseModel.Diagonal.Sigmas(cameraNoiseSigmas); firstCamera = PinholeCameraCal3_S2(truth.cameras{1}.pose, truth.K); -graph.add(PriorFactorSimpleCamera(symbol('c',1), firstCamera, cameraPriorNoise)); +graph.add(PriorFactorPinholeCameraCal3_S2(symbol('c',1), firstCamera, cameraPriorNoise)); pointPriorNoise = noiseModel.Isotropic.Sigma(3,pointNoiseSigma); graph.add(PriorFactorPoint3(symbol('p',1), truth.points{1}, pointPriorNoise)); @@ -64,7 +64,7 @@ initialEstimate.insert(symbol('c',i), camera_i); end for j=1:size(truth.points,2) - point_j = Point3(truth.points{j}.vector() + 0.1*randn(3,1)); + point_j = Point3(truth.points{j} + 0.1*randn(3,1)); initialEstimate.insert(symbol('p',j), point_j); end initialEstimate.print(sprintf('\nInitial estimate:\n ')); diff --git a/matlab/gtsam_examples/SFMExample.m b/matlab/gtsam_examples/SFMExample.m index 6700e90d2..a57da929c 100644 --- a/matlab/gtsam_examples/SFMExample.m +++ b/matlab/gtsam_examples/SFMExample.m @@ -58,7 +58,7 @@ initialEstimate.insert(symbol('x',i), pose_i); end for j=1:size(truth.points,2) - point_j = Point3(truth.points{j}.vector() + 0.1*randn(3,1)); + point_j = Point3(truth.points{j} + 0.1*randn(3,1)); initialEstimate.insert(symbol('p',j), point_j); end initialEstimate.print(sprintf('\nInitial estimate:\n ')); diff --git a/matlab/gtsam_examples/StereoVOExample.m b/matlab/gtsam_examples/StereoVOExample.m index b437ca80c..e7345fcf2 100644 --- a/matlab/gtsam_examples/StereoVOExample.m +++ b/matlab/gtsam_examples/StereoVOExample.m @@ -51,10 +51,10 @@ initialEstimate = Values; initialEstimate.insert(x1, first_pose); % noisy estimate for pose 2 -initialEstimate.insert(x2, Pose3(Rot3(), Point3(0.1,-.1,1.1))); -initialEstimate.insert(l1, Point3( 1, 1, 5)); -initialEstimate.insert(l2, Point3(-1, 1, 5)); -initialEstimate.insert(l3, Point3( 0,-.5, 5)); +initialEstimate.insert(x2, Pose3(Rot3(), [0.1, -.1, 1.1]')); +initialEstimate.insert(l1, [ 1, 1, 5]'); +initialEstimate.insert(l2, [-1, 1, 5]'); +initialEstimate.insert(l3, [ 0,-.5, 5]'); %% optimize fprintf(1,'Optimizing\n'); tic diff --git a/matlab/gtsam_examples/VisualizeMEstimators.m b/matlab/gtsam_examples/VisualizeMEstimators.m index 8a0485334..ce505df5d 100644 --- a/matlab/gtsam_examples/VisualizeMEstimators.m +++ b/matlab/gtsam_examples/VisualizeMEstimators.m @@ -48,7 +48,7 @@ function plot_m_estimator(x, model, plot_title, fig_id, filename) rho = zeros(size(x)); for i = 1:size(x, 2) w(i) = model.weight(x(i)); - rho(i) = model.residual(x(i)); + rho(i) = model.loss(x(i)); end psi = w .* x; diff --git a/matlab/gtsam_tests/testPlanarSLAMExample.m b/matlab/gtsam_tests/testPlanarSLAMExample.m index d3cab5d1f..e0b4dbfc8 100644 --- a/matlab/gtsam_tests/testPlanarSLAMExample.m +++ b/matlab/gtsam_tests/testPlanarSLAMExample.m @@ -66,4 +66,4 @@ point_1 = result.atPoint2(symbol('l',1)); marginals.marginalCovariance(symbol('l',1)); -CHECK('point_1.equals(Point2(2,2),1e-4)',point_1.equals(Point2(2,2),1e-4)); +CHECK('point_1.equals(Point2(2,2),1e-4)',norm(point_1 - Point2(2,2)) < 1e-4); diff --git a/matlab/gtsam_tests/testSFMExample.m b/matlab/gtsam_tests/testSFMExample.m index 985cbdb2c..a1f63c3a7 100644 --- a/matlab/gtsam_tests/testSFMExample.m +++ b/matlab/gtsam_tests/testSFMExample.m @@ -69,5 +69,5 @@ for j=1:size(truth.points,2) point_j = result.atPoint3(symbol('p',j)); - CHECK('point_j.equals(truth.points{j},1e-5)',point_j.equals(truth.points{j},1e-5)) + CHECK('point_j.equals(truth.points{j},1e-5)',norm(point_j - truth.points{j}) < 1e-5) end diff --git a/matlab/gtsam_tests/testSerialization.m b/matlab/gtsam_tests/testSerialization.m index f8b21b7ad..e55822c1c 100644 --- a/matlab/gtsam_tests/testSerialization.m +++ b/matlab/gtsam_tests/testSerialization.m @@ -28,10 +28,6 @@ pose1ds = Pose2.string_deserialize(serialized_pose1); CHECK('pose1ds.equals(pose1, 1e-9)', pose1ds.equals(pose1, 1e-9)); -serialized_landmark1 = landmark1.string_serialize(); -landmark1ds = Point2.string_deserialize(serialized_landmark1); -CHECK('landmark1ds.equals(landmark1, 1e-9)', landmark1ds.equals(landmark1, 1e-9)); - %% Create and serialize Values values = Values; values.insert(i1, pose1); diff --git a/matlab/gtsam_tests/testStereoVOExample.m b/matlab/gtsam_tests/testStereoVOExample.m index 218d0ace1..c721244ba 100644 --- a/matlab/gtsam_tests/testStereoVOExample.m +++ b/matlab/gtsam_tests/testStereoVOExample.m @@ -65,4 +65,4 @@ CHECK('pose_x1.equals(first_pose,1e-4)',pose_x1.equals(first_pose,1e-4)); point_l1 = result.atPoint3(l1); -CHECK('point_1.equals(expected_l1,1e-4)',point_l1.equals(expected_l1,1e-4)); \ No newline at end of file +CHECK('point_1.equals(expected_l1,1e-4)',norm(point_l1 - expected_l1) < 1e-4); \ No newline at end of file diff --git a/matlab/gtsam_tests/testValues.m b/matlab/gtsam_tests/testValues.m index fe2cd30fe..48bc83f2c 100644 --- a/matlab/gtsam_tests/testValues.m +++ b/matlab/gtsam_tests/testValues.m @@ -5,8 +5,8 @@ E = EssentialMatrix(Rot3,Unit3); tol = 1e-9; -values.insert(0, Point2); -values.insert(1, Point3); +values.insert(0, Point2(0, 0)); +values.insert(1, Point3(0, 0, 0)); values.insert(2, Rot2); values.insert(3, Pose2); values.insert(4, Rot3); @@ -21,8 +21,8 @@ values.insert(11, [1;2;3]); values.insert(12, [1 2;3 4]); -EXPECT('at',values.atPoint2(0).equals(Point2,tol)); -EXPECT('at',values.atPoint3(1).equals(Point3,tol)); +EXPECT('at',values.atPoint2(0) == Point2(0, 0)); +EXPECT('at',values.atPoint3(1) == Point3(0, 0, 0)); EXPECT('at',values.atRot2(2).equals(Rot2,tol)); EXPECT('at',values.atPose2(3).equals(Pose2,tol)); EXPECT('at',values.atRot3(4).equals(Rot3,tol)); diff --git a/matlab/gtsam_tests/testVisualISAMExample.m b/matlab/gtsam_tests/testVisualISAMExample.m index 223e823a6..f75942ea7 100644 --- a/matlab/gtsam_tests/testVisualISAMExample.m +++ b/matlab/gtsam_tests/testVisualISAMExample.m @@ -51,5 +51,5 @@ for j=1:size(truth.points,2) point_j = result.atPoint3(symbol('l',j)); - CHECK('point_j.equals(truth.points{j},1e-5)',point_j.equals(truth.points{j},1e-5)) + CHECK('point_j.equals(truth.points{j},1e-5)',norm(point_j - truth.points{j}) < 1e-5) end diff --git a/matlab/unstable_examples/+imuSimulator/IMUComparison.m b/matlab/unstable_examples/+imuSimulator/IMUComparison.m index 68e20bb25..871f023ef 100644 --- a/matlab/unstable_examples/+imuSimulator/IMUComparison.m +++ b/matlab/unstable_examples/+imuSimulator/IMUComparison.m @@ -28,19 +28,19 @@ %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Trajectory estimate (integrated in the navigation frame) positionsIMUnav = zeros(3, length(times)+1); -positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation; posesIMUnav(1).p = positionsIMUnav(:,1); posesIMUnav(1).R = poses(1).R; % Trajectory estimate (integrated in the body frame) positionsIMUbody = zeros(3, length(times)+1); -positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation; posesIMUbody(1).p = positionsIMUbody(:,1); posesIMUbody(1).R = poses(1).R; @@ -120,9 +120,9 @@ currentPoseGlobalIMUnav, currentVelocityGlobalIMUnav, acc_omega, deltaT); %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation.vector; - positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation; + positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation; % - poses(i).p = positions(:,i); posesIMUbody(i).p = positionsIMUbody(:,i); diff --git a/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m b/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m index c589bea32..450697de0 100644 --- a/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m +++ b/matlab/unstable_examples/+imuSimulator/IMUComparison_with_cov.m @@ -28,7 +28,7 @@ %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; @@ -112,7 +112,7 @@ end %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; + positions(:,i) = currentPoseGlobal.translation; i = i + 1; end diff --git a/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m b/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m index c86e40a21..0d8abad2c 100644 --- a/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m +++ b/matlab/unstable_examples/+imuSimulator/calculateIMUMeas_coriolis.m @@ -7,7 +7,7 @@ % Acceleration measurement (in this simple toy example no other forces % act on the body and the only acceleration is the centripetal Coriolis acceleration) -measuredAcc = Point3(cross(omega1Body, velocity1Body)).vector; +measuredAcc = Point3(cross(omega1Body, velocity1Body)); acc_omega = [ measuredAcc; measuredOmega ]; end diff --git a/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m b/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m index 534b9365e..5ed1fc516 100644 --- a/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m +++ b/matlab/unstable_examples/+imuSimulator/calculateIMUMeasurement.m @@ -6,16 +6,16 @@ % Calculate gyro measured rotation rate by transforming body rotation rate % into the IMU frame. -measuredOmega = imuFrame.rotation.unrotate(Point3(omega1Body)).vector; +measuredOmega = imuFrame.rotation.unrotate(Point3(omega1Body)); % Transform both velocities into IMU frame, accounting for the velocity % induced by rigid body rotation on a lever arm (Coriolis effect). velocity1inertial = imuFrame.rotation.unrotate( ... - Point3(velocity1Body + cross(omega1Body, imuFrame.translation.vector))).vector; + Point3(velocity1Body + cross(omega1Body, imuFrame.translation))); imu2in1 = Rot3.Expmap(measuredOmega * deltaT); velocity2inertial = imu2in1.rotate(imuFrame.rotation.unrotate( ... - Point3(velocity2Body + cross(omega2Body, imuFrame.translation.vector)))).vector; + Point3(velocity2Body + cross(omega2Body, imuFrame.translation)))); % Acceleration in IMU frame measuredAcc = (velocity2inertial - velocity1inertial) / deltaT; diff --git a/matlab/unstable_examples/+imuSimulator/coriolisExample.m b/matlab/unstable_examples/+imuSimulator/coriolisExample.m index 35d27aa73..ee4deb433 100644 --- a/matlab/unstable_examples/+imuSimulator/coriolisExample.m +++ b/matlab/unstable_examples/+imuSimulator/coriolisExample.m @@ -190,13 +190,13 @@ newFactors.add(PriorFactorConstantBias(currentBiasKey, zeroBias, sigma_init_b)); % Store data - positionsInFixedGT(:,1) = currentPoseFixedGT.translation.vector; + positionsInFixedGT(:,1) = currentPoseFixedGT.translation; velocityInFixedGT(:,1) = currentVelocityFixedGT; - positionsInRotatingGT(:,1) = currentPoseRotatingGT.translation.vector; - %velocityInRotatingGT(:,1) = currentPoseRotatingGT.velocity.vector; - positionsEstimates(:,i) = currentPoseEstimate.translation.vector; - velocitiesEstimates(:,i) = currentVelocityEstimate.vector; - currentRotatingFrameForPlot(1).p = currentRotatingFrame.translation.vector; + positionsInRotatingGT(:,1) = currentPoseRotatingGT.translation; + %velocityInRotatingGT(:,1) = currentPoseRotatingGT.velocity; + positionsEstimates(:,i) = currentPoseEstimate.translation; + velocitiesEstimates(:,i) = currentVelocityEstimate; + currentRotatingFrameForPlot(1).p = currentRotatingFrame.translation; currentRotatingFrameForPlot(1).R = currentRotatingFrame.rotation.matrix; else @@ -204,18 +204,18 @@ % Update the position and velocity % x_t = x_0 + v_0*dt + 1/2*a_0*dt^2 % v_t = v_0 + a_0*dt - currentPositionFixedGT = Point3(currentPoseFixedGT.translation.vector ... + currentPositionFixedGT = Point3(currentPoseFixedGT.translation ... + currentVelocityFixedGT * deltaT + 0.5 * accelFixed * deltaT * deltaT); currentVelocityFixedGT = currentVelocityFixedGT + accelFixed * deltaT; currentPoseFixedGT = Pose3(Rot3, currentPositionFixedGT); % constant orientation % Rotate pose in fixed frame to get pose in rotating frame - previousPositionRotatingGT = currentPoseRotatingGT.translation.vector; + previousPositionRotatingGT = currentPoseRotatingGT.translation; currentRotatingFrame = currentRotatingFrame.compose(changePoseRotatingFrame); inverseCurrentRotatingFrame = (currentRotatingFrame.inverse); currentPoseRotatingGT = inverseCurrentRotatingFrame.compose(currentPoseFixedGT); - currentPositionRotatingGT = currentPoseRotatingGT.translation.vector; + currentPositionRotatingGT = currentPoseRotatingGT.translation; % Get velocity in rotating frame by treating it like a position and using compose % Maybe Luca knows a better way to do this within GTSAM. @@ -230,11 +230,11 @@ % - 0.5 * accelFixed * deltaT * deltaT) / deltaT + accelFixed * deltaT; % Store GT (ground truth) poses - positionsInFixedGT(:,i) = currentPoseFixedGT.translation.vector; + positionsInFixedGT(:,i) = currentPoseFixedGT.translation; velocityInFixedGT(:,i) = currentVelocityFixedGT; - positionsInRotatingGT(:,i) = currentPoseRotatingGT.translation.vector; + positionsInRotatingGT(:,i) = currentPoseRotatingGT.translation; velocityInRotatingGT(:,i) = currentVelocityRotatingGT; - currentRotatingFrameForPlot(i).p = currentRotatingFrame.translation.vector; + currentRotatingFrameForPlot(i).p = currentRotatingFrame.translation; currentRotatingFrameForPlot(i).R = currentRotatingFrame.rotation.matrix; %% Estimate trajectory in rotating frame using GTSAM (ground truth measurements) @@ -303,9 +303,9 @@ currentVelocityEstimate = isam.calculateEstimate(currentVelKey); currentBias = isam.calculateEstimate(currentBiasKey); - positionsEstimates(:,i) = currentPoseEstimate.translation.vector; - velocitiesEstimates(:,i) = currentVelocityEstimate.vector; - biasEstimates(:,i) = currentBias.vector; + positionsEstimates(:,i) = currentPoseEstimate.translation; + velocitiesEstimates(:,i) = currentVelocityEstimate; + biasEstimates(:,i) = currentBias; % In matrix form: R_error = R_gt'*R_estimate % Perform Logmap on the rotation matrix to get a vector diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m index 2eddf75ee..64ba36d3b 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisBetween.m @@ -151,14 +151,14 @@ if options.includeCameraFactors b = [-1000 2000 -2000 2000 -30 30]; for i = 1:size(metadata.camera.gtLandmarkPoints,2) - p = metadata.camera.gtLandmarkPoints(i).vector; + p = metadata.camera.gtLandmarkPoints(i); if(p(1) > b(1) && p(1) < b(2) && p(2) > b(3) && p(2) < b(4) && p(3) > b(5) && p(3) < b(6)) plot3(p(1), p(2), p(3), 'k+'); end end pointsToPlot = metadata.camera.gtLandmarkPoints(find(projectionFactorSeenBy > 0)); for i = 1:length(pointsToPlot) - p = pointsToPlot(i).vector; + p = pointsToPlot(i); plot3(p(1), p(2), p(3), 'gs', 'MarkerSize', 10); end end @@ -233,9 +233,9 @@ for i=0:options.trajectoryLength % compute estimation errors currentPoseKey = symbol('x', i); - gtPosition = gtValues.at(currentPoseKey).translation.vector; - estPosition = estimate.at(currentPoseKey).translation.vector; - estR = estimate.at(currentPoseKey).rotation.matrix; + gtPosition = gtValues.atPose3(currentPoseKey).translation; + estPosition = estimate.atPose3(currentPoseKey).translation; + estR = estimate.atPose3(currentPoseKey).rotation.matrix; errPosition = estPosition - gtPosition; % compute covariances: diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m index 00ae4b9c2..07f146dcb 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateFactorGraph.m @@ -14,7 +14,7 @@ for i=0:length(measurements) % Get the current pose currentPoseKey = symbol('x', i); - currentPose = values.at(currentPoseKey); + currentPose = values.atPose3(currentPoseKey); if i==0 %% first time step, add priors @@ -26,11 +26,11 @@ % IMU velocity and bias priors if options.includeIMUFactors == 1 currentVelKey = symbol('v', 0); - currentVel = values.at(currentVelKey).vector; + currentVel = values.atPoint3(currentVelKey); graph.add(PriorFactorLieVector(currentVelKey, LieVector(currentVel), noiseModels.noiseVel)); currentBiasKey = symbol('b', 0); - currentBias = values.at(currentBiasKey); + currentBias = values.atPoint3(currentBiasKey); graph.add(PriorFactorConstantBias(currentBiasKey, currentBias, noiseModels.noisePriorBias)); end @@ -155,7 +155,7 @@ if options.includeCameraFactors == 1 for j = 1:length(measurements(i).landmarks) cameraMeasurmentNoise = measurementNoise.cameraNoiseVector .* randn(2,1); - cameraPixelMeasurement = measurements(i).landmarks(j).vector; + cameraPixelMeasurement = measurements(i).landmarks(j); % Only add the measurement if it is in the image frame (based on calibration) if(cameraPixelMeasurement(1) > 0 && cameraPixelMeasurement(2) > 0 ... && cameraPixelMeasurement(1) < 2*metadata.camera.calibration.px ... diff --git a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m index 195b7ff69..3d8a9b5d2 100644 --- a/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/covarianceAnalysisCreateTrajectory.m @@ -40,7 +40,7 @@ %% gt Between measurements if options.includeBetweenFactors == 1 && i > 0 - prevPose = values.at(currentPoseKey - 1); + prevPose = values.atPose3(currentPoseKey - 1); deltaPose = prevPose.between(currentPose); measurements(i).deltaVector = Pose3.Logmap(deltaPose); end @@ -65,7 +65,7 @@ IMUdeltaPose = IMUPose1.between(IMUPose2); IMUdeltaPoseVector = Pose3.Logmap(IMUdeltaPose); IMUdeltaRotVector = IMUdeltaPoseVector(1:3); - IMUdeltaPositionVector = IMUPose2.translation.vector - IMUPose1.translation.vector; % translation in absolute frame + IMUdeltaPositionVector = IMUPose2.translation - IMUPose1.translation; % translation in absolute frame measurements(i).imu(j).deltaT = deltaT; @@ -88,7 +88,7 @@ %% gt GPS measurements if options.includeGPSFactors == 1 && i > 0 - gpsPositionVector = imuSimulator.getPoseFromGtScenario(gtScenario,scenarioInd).translation.vector; + gpsPositionVector = imuSimulator.getPoseFromGtScenario(gtScenario,scenarioInd).translation; measurements(i).gpsPositionVector = gpsPositionVector; end diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m index 3f72f1215..2de1e1103 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory.m @@ -5,9 +5,9 @@ import gtsam.*; imu2in1 = Rot3.Expmap(acc_omegaIMU(4:6) * deltaT); -accelGlobal = initialPoseGlobal.rotation().rotate(Point3(acc_omegaIMU(1:3))).vector; +accelGlobal = initialPoseGlobal.rotation().rotate(Point3(acc_omegaIMU(1:3))); -finalPosition = Point3(initialPoseGlobal.translation.vector ... +finalPosition = Point3(initialPoseGlobal.translation ... + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; finalRotation = initialPoseGlobal.rotation.compose(imu2in1); diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m index 50b223060..bec2d760d 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_bodyFrame.m @@ -3,7 +3,7 @@ % Before integrating in the body frame we need to compensate for the Coriolis % effect -acc_body = acc_omegaIMU(1:3) - Point3(cross(acc_omegaIMU(4:6), velocity1Body)).vector; +acc_body = acc_omegaIMU(1:3) - Point3(cross(acc_omegaIMU(4:6), velocity1Body)); % after compensating for coriolis this will be essentially zero % since we are moving at constant body velocity @@ -16,8 +16,8 @@ finalVelocityBody = velocity1Body + acc_body * deltaT; %% Express the integrated quantities in the global frame -finalVelocityGlobal = initialVelocityGlobal + (initialPoseGlobal.rotation().rotate(Point3(finalVelocityBody)).vector() ); -finalPosition = initialPoseGlobal.translation().vector() + initialPoseGlobal.rotation().rotate( Point3(finalPositionBody)).vector() ; +finalVelocityGlobal = initialVelocityGlobal + (initialPoseGlobal.rotation().rotate(Point3(finalVelocityBody)) ); +finalPosition = initialPoseGlobal.translation() + initialPoseGlobal.rotation().rotate( Point3(finalPositionBody)) ; finalRotation = initialPoseGlobal.rotation.compose(imu2in1); % Include position and rotation in a pose finalPose = Pose3(finalRotation, Point3(finalPosition) ); diff --git a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m index b919520ac..ea851315f 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m +++ b/matlab/unstable_examples/+imuSimulator/integrateIMUTrajectory_navFrame.m @@ -9,8 +9,8 @@ intermediateRotation = initialPoseGlobal.rotation.compose( Rot3.Expmap(acc_omegaIMU(4:6) * deltaT/2 )); % Integrate positions (equation (1) in Lupton) -accelGlobal = intermediateRotation.rotate(Point3(acc_omegaIMU(1:3))).vector; -finalPosition = Point3(initialPoseGlobal.translation.vector ... +accelGlobal = intermediateRotation.rotate(Point3(acc_omegaIMU(1:3))); +finalPosition = Point3(initialPoseGlobal.translation ... + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; diff --git a/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m b/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m index e51b622b0..a704342ae 100644 --- a/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m +++ b/matlab/unstable_examples/+imuSimulator/integrateTrajectory.m @@ -6,16 +6,16 @@ % Rotation: R^1_2 body2in1 = Rot3.Expmap(omega1Body * deltaT); % Velocity 2 in frame 1: v^1_2 = R^1_2 v^2_2 -velocity2inertial = body2in1.rotate(Point3(velocity2Body)).vector; +velocity2inertial = body2in1.rotate(Point3(velocity2Body)); % Acceleration: a^1 = (v^1_2 - v^1_1)/dt accelBody1 = (velocity2inertial - velocity1Body) / deltaT; % Velocity 1 in frame W: v^W_1 = R^W_1 v^1_1 -initialVelocityGlobal = initialPose.rotation().rotate(Point3(velocity1Body)).vector; +initialVelocityGlobal = initialPose.rotation().rotate(Point3(velocity1Body)); % Acceleration in frame W: a^W = R^W_1 a^1 -accelGlobal = initialPose.rotation().rotate(Point3(accelBody1)).vector; +accelGlobal = initialPose.rotation().rotate(Point3(accelBody1)); -finalPosition = Point3(initialPose.translation.vector + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); +finalPosition = Point3(initialPose.translation + initialVelocityGlobal * deltaT + 0.5 * accelGlobal * deltaT * deltaT); finalVelocityGlobal = initialVelocityGlobal + accelGlobal * deltaT; finalRotation = initialPose.rotation.compose(body2in1); finalPose = Pose3(finalRotation, finalPosition); diff --git a/matlab/unstable_examples/+imuSimulator/test1onestep.m b/matlab/unstable_examples/+imuSimulator/test1onestep.m index 883569849..cb66d23d6 100644 --- a/matlab/unstable_examples/+imuSimulator/test1onestep.m +++ b/matlab/unstable_examples/+imuSimulator/test1onestep.m @@ -10,7 +10,7 @@ velocity = [1;0;0]; R = Rot3.Expmap(omega * deltaT); -velocity2body = R.unrotate(Point3(velocity)).vector; +velocity2body = R.unrotate(Point3(velocity)); acc_omegaExpected = [-0.01; 0; 0; 0; 0; 0.1]; acc_omegaActual = imuSimulator.calculateIMUMeasurement(omega, omega, velocity, velocity2body, deltaT, Pose3(Rot3, Point3([1;0;0]))); disp('IMU measurement discrepancy:'); @@ -40,7 +40,7 @@ initialPose = Pose3; initialVelocity = velocity; finalPoseExpected = Pose3.Expmap([ omega; velocity ] * deltaT); -finalVelocityExpected = finalPoseExpected.rotation.rotate(Point3(velocity)).vector; +finalVelocityExpected = finalPoseExpected.rotation.rotate(Point3(velocity)); [ finalPoseActual, finalVelocityActual ] = imuSimulator.integrateTrajectory(initialPose, omega, velocity, velocity, deltaT); disp('Final pose discrepancy:'); disp(finalPoseExpected.between(finalPoseActual).matrix); diff --git a/matlab/unstable_examples/+imuSimulator/test2constglobal.m b/matlab/unstable_examples/+imuSimulator/test2constglobal.m index 19956d08a..6ab35d50b 100644 --- a/matlab/unstable_examples/+imuSimulator/test2constglobal.m +++ b/matlab/unstable_examples/+imuSimulator/test2constglobal.m @@ -21,12 +21,12 @@ i = 2; for t = times - velocity1body = currentPoseGlobal.rotation.unrotate(Point3(currentVelocityGlobal)).vector; + velocity1body = currentPoseGlobal.rotation.unrotate(Point3(currentVelocityGlobal)); R = Rot3.Expmap(omega * deltaT); - velocity2body = currentPoseGlobal.rotation.compose(R).unrotate(Point3(currentVelocityGlobal)).vector; + velocity2body = currentPoseGlobal.rotation.compose(R).unrotate(Point3(currentVelocityGlobal)); [ currentPoseGlobal, currentVelocityGlobal ] = imuSimulator.integrateTrajectory(currentPoseGlobal, omega, velocity1body, velocity2body, deltaT); - positions(:,i) = currentPoseGlobal.translation.vector; + positions(:,i) = currentPoseGlobal.translation; i = i + 1; end diff --git a/matlab/unstable_examples/+imuSimulator/test3constbody.m b/matlab/unstable_examples/+imuSimulator/test3constbody.m index b3ee2edfc..8ee14ab78 100644 --- a/matlab/unstable_examples/+imuSimulator/test3constbody.m +++ b/matlab/unstable_examples/+imuSimulator/test3constbody.m @@ -26,27 +26,27 @@ currentVelocityGlobal = velocity; % Initial state (IMU) currentPoseGlobalIMU = Pose3; %currentPoseGlobal.compose(IMUinBody); -%currentVelocityGlobalIMU = IMUinBody.rotation.unrotate(Point3(velocity)).vector; % no Coriolis here? +%currentVelocityGlobalIMU = IMUinBody.rotation.unrotate(Point3(velocity)); % no Coriolis here? currentVelocityGlobalIMU = IMUinBody.rotation.unrotate( ... - Point3(velocity + cross(omega, IMUinBody.translation.vector))).vector; + Point3(velocity + cross(omega, IMUinBody.translation))); % Positions % body trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Expected IMU trajectory (from body trajectory and lever arm) positionsIMUe = zeros(3, length(times)+1); -positionsIMUe(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; +positionsIMUe(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation; posesIMUe(1).p = positionsIMUe(:,1); posesIMUe(1).R = poses(1).R * IMUinBody.rotation.matrix; % Integrated IMU trajectory (from IMU measurements) positionsIMU = zeros(3, length(times)+1); -positionsIMU(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; +positionsIMU(:,1) = IMUinBody.compose(currentPoseGlobalIMU).translation; posesIMU(1).p = positionsIMU(:,1); posesIMU(1).R = IMUinBody.compose(currentPoseGlobalIMU).rotation.matrix; @@ -62,9 +62,9 @@ currentPoseGlobalIMU, currentVelocityGlobalIMU, acc_omega, deltaT); % Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUe(:,i) = currentPoseGlobal.translation.vector + currentPoseGlobal.rotation.matrix * IMUinBody.translation.vector; - positionsIMU(:,i) = IMUinBody.compose(currentPoseGlobalIMU).translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUe(:,i) = currentPoseGlobal.translation + currentPoseGlobal.rotation.matrix * IMUinBody.translation; + positionsIMU(:,i) = IMUinBody.compose(currentPoseGlobalIMU).translation; poses(i).p = positions(:,i); posesIMUe(i).p = positionsIMUe(:,i); diff --git a/matlab/unstable_examples/+imuSimulator/test4circle.m b/matlab/unstable_examples/+imuSimulator/test4circle.m index 22ee175dd..ab2c546db 100644 --- a/matlab/unstable_examples/+imuSimulator/test4circle.m +++ b/matlab/unstable_examples/+imuSimulator/test4circle.m @@ -34,19 +34,19 @@ %% Prepare data structures for actual trajectory and estimates % Actual trajectory positions = zeros(3, length(times)+1); -positions(:,1) = currentPoseGlobal.translation.vector; +positions(:,1) = currentPoseGlobal.translation; poses(1).p = positions(:,1); poses(1).R = currentPoseGlobal.rotation.matrix; % Trajectory estimate (integrated in the navigation frame) positionsIMUnav = zeros(3, length(times)+1); -positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUnav(:,1) = currentPoseGlobalIMUbody.translation; posesIMUnav(1).p = positionsIMUnav(:,1); posesIMUnav(1).R = poses(1).R; % Trajectory estimate (integrated in the body frame) positionsIMUbody = zeros(3, length(times)+1); -positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation.vector; +positionsIMUbody(:,1) = currentPoseGlobalIMUbody.translation; posesIMUbody(1).p = positionsIMUbody(:,1); posesIMUbody(1).R = poses(1).R; @@ -72,9 +72,9 @@ currentPoseGlobalIMUnav, currentVelocityGlobalIMUnav, acc_omega, deltaT); %% Store data in some structure for statistics and plots - positions(:,i) = currentPoseGlobal.translation.vector; - positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation.vector; - positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation.vector; + positions(:,i) = currentPoseGlobal.translation; + positionsIMUbody(:,i) = currentPoseGlobalIMUbody.translation; + positionsIMUnav(:,i) = currentPoseGlobalIMUnav.translation; % - poses(i).p = positions(:,i); posesIMUbody(i).p = positionsIMUbody(:,i); diff --git a/matlab/unstable_examples/FlightCameraTransformIMU.m b/matlab/unstable_examples/FlightCameraTransformIMU.m index 9a8a27344..d2f2bc34d 100644 --- a/matlab/unstable_examples/FlightCameraTransformIMU.m +++ b/matlab/unstable_examples/FlightCameraTransformIMU.m @@ -120,7 +120,7 @@ end % current ground-truth position indicator - h_cursor = plot3(a1, pose_t.x,pose_t.y,pose_t.z,'*'); + h_cursor = plot3(a1, pose_t(1),pose_t(2),pose_t(3),'*'); camera_pose = pose.compose(camera_transform); @@ -198,7 +198,7 @@ if ~result.exists(lKey) p = landmarks.atPoint3(lKey); n = normrnd(0,landmark_noise,3,1); - noisy_landmark = Point3(p.x()+n(1),p.y()+n(2),p.z()+n(3)); + noisy_landmark = p + n; initial.insert(lKey, noisy_landmark); % and add a prior since its position is known @@ -245,32 +245,33 @@ initial = Values; fg = NonlinearFactorGraph; - currentVelocityGlobal = result.at(currentVelKey); - currentBias = result.at(currentBiasKey); + currentVelocityGlobal = result.atPoint3(currentVelKey); + currentBias = result.atConstantBias(currentBiasKey); %% plot current pose result - isam_pose = result.at(xKey); + isam_pose = result.atPose3(xKey); pose_t = isam_pose.translation(); if exist('h_result','var') delete(h_result); end - h_result = plot3(a1, pose_t.x,pose_t.y,pose_t.z,'^b', 'MarkerSize', 10); + h_result = plot3(a1, pose_t(1),pose_t(2),pose_t(3),'^b', 'MarkerSize', 10); title(a1, sprintf('Step %d', i)); if exist('h_text1(1)', 'var') delete(h_text1(1)); % delete(h_text2(1)); end - ty = result.at(transformKey).translation().y(); - K_estimate = result.at(calibrationKey); + t = result.atPose3(transformKey).translation(); + ty = t(2); + K_estimate = result.atCal3_S2(calibrationKey); K_errors = K.localCoordinates(K_estimate); - camera_transform_estimate = result.at(transformKey); + camera_transform_estimate = result.atPose3(transformKey); - fx = result.at(calibrationKey).fx(); - fy = result.at(calibrationKey).fy(); + fx = result.atCal3_S2(calibrationKey).fx(); + fy = result.atCal3_S2(calibrationKey).fy(); % h_text1 = text(-600,0,0,sprintf('Y-Transform(0.0): %0.2f',ty)); text(0,1300,0,sprintf('Calibration and IMU-cam transform errors:')); @@ -304,7 +305,7 @@ end %% print out final camera transform and write video -result.at(transformKey); +result.atPose3(transformKey); if(write_video) close(videoObj); end \ No newline at end of file diff --git a/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m b/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m index 4557d711f..9796a9737 100644 --- a/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m +++ b/matlab/unstable_examples/TransformCalProjectionFactorIMUExampleISAM.m @@ -53,7 +53,7 @@ % insert shifted points for i=1:nrPoints - initial.insert(100+i,landmarks{i}.compose(y_shift)); + initial.insert(100+i,landmarks{i} + y_shift); end figure(1); @@ -134,7 +134,7 @@ end if i == 2 fg.add(PriorFactorPose3(2, Pose3(Rot3(),Point3(1,0,0)),pose_cov)); - fg.add(PriorFactorLieVector(currentVelKey, currentVelocityGlobal, sigma_init_v)); + fg.add(PriorFactorVector(currentVelKey, currentVelocityGlobal, sigma_init_v)); fg.add(PriorFactorConstantBias(currentBiasKey, currentBias, sigma_init_b)); end if i > 1 @@ -144,7 +144,7 @@ step = move_circle; end - initial.insert(i,result.at(i-1).compose(step)); + initial.insert(i,result.atPose3(i-1).compose(step)); fg.add(BetweenFactorPose3(i-1,i, step, covariance)); deltaT = 1; @@ -158,10 +158,13 @@ [ currentIMUPoseGlobal, currentVelocityGlobal ] = imuSimulator.integrateTrajectory( ... currentIMUPoseGlobal, omega, velocity, velocity, deltaT); - - currentSummarizedMeasurement = gtsam.ImuFactorPreintegratedMeasurements( ... - currentBias, IMU_metadata.AccelerometerSigma.^2 * eye(3), ... - IMU_metadata.GyroscopeSigma.^2 * eye(3), IMU_metadata.IntegrationSigma.^2 * eye(3)); + + params = gtsam.PreintegrationParams.MakeSharedD(9.81); + params.setAccelerometerCovariance(IMU_metadata.AccelerometerSigma.^2 * eye(3)); + params.setGyroscopeCovariance(IMU_metadata.GyroscopeSigma.^2 * eye(3)); + params.setIntegrationCovariance(IMU_metadata.IntegrationSigma.^2 * eye(3)); + currentSummarizedMeasurement = gtsam.PreintegratedImuMeasurements( ... + params, currentBias); accMeas = acc_omega(1:3)-g; omegaMeas = acc_omega(4:6); @@ -171,7 +174,7 @@ fg.add(ImuFactor( ... i-1, currentVelKey-1, ... i, currentVelKey, ... - currentBiasKey, currentSummarizedMeasurement, g, w_coriolis)); + currentBiasKey, currentSummarizedMeasurement)); % Bias evolution as given in the IMU metadata fg.add(BetweenFactorConstantBias(currentBiasKey-1, currentBiasKey, imuBias.ConstantBias(zeros(3,1), zeros(3,1)), ... @@ -204,8 +207,8 @@ initial = Values; fg = NonlinearFactorGraph; - currentVelocityGlobal = isam.calculateEstimate(currentVelKey); - currentBias = isam.calculateEstimate(currentBiasKey); + currentVelocityGlobal = isam.calculateEstimate().atVector(currentVelKey); + currentBias = isam.calculateEstimate().atConstantBias(currentBiasKey); %% Compute some marginals marginal = isam.marginalCovariance(calibrationKey); @@ -249,10 +252,10 @@ gtsam.plotPose3(currentIMUPoseGlobal, [], 2); %% plot results - result_camera_transform = result.at(transformKey); + result_camera_transform = result.atPose3(transformKey); for j=1:i - gtsam.plotPose3(result.at(j),[],0.5); - gtsam.plotPose3(result.at(j).compose(result_camera_transform),[],0.5); + gtsam.plotPose3(result.atPose3(j),[],0.5); + gtsam.plotPose3(result.atPose3(j).compose(result_camera_transform),[],0.5); end xlabel('x (m)'); @@ -265,14 +268,15 @@ % axis equal for l=101:100+nrPoints - plotPoint3(result.at(l),'g'); + plotPoint3(result.atPoint3(l),'g'); end - ty = result.at(transformKey).translation().y(); - fx = result.at(calibrationKey).fx(); - fy = result.at(calibrationKey).fy(); - px = result.at(calibrationKey).px(); - py = result.at(calibrationKey).py(); + t = result.atPose3(transformKey).translation(); + ty = t(2); + fx = result.atCal3_S2(calibrationKey).fx(); + fy = result.atCal3_S2(calibrationKey).fy(); + px = result.atCal3_S2(calibrationKey).px(); + py = result.atCal3_S2(calibrationKey).py(); text(1,5,5,sprintf('Y-Transform(0.0): %0.2f',ty)); text(1,5,3,sprintf('fx(900): %.0f',fx)); text(1,5,1,sprintf('fy(900): %.0f',fy)); @@ -342,10 +346,10 @@ fprintf('Cheirality Exception count: %d\n', cheirality_exception_count); disp('Transform after optimization'); -result.at(transformKey) +result.atPose3(transformKey) disp('Calibration after optimization'); -result.at(calibrationKey) +result.atCal3_S2(calibrationKey) disp('Bias after optimization'); currentBias diff --git a/matlab/unstable_examples/TransformProjectionFactorExample.m b/matlab/unstable_examples/TransformProjectionFactorExample.m index 79a96209d..f6a896cff 100644 --- a/matlab/unstable_examples/TransformProjectionFactorExample.m +++ b/matlab/unstable_examples/TransformProjectionFactorExample.m @@ -33,7 +33,7 @@ % insert shifted points for i=1:nrPoints - initial.insert(100+i,landmarks{i}.compose(y_shift)); + initial.insert(100+i,landmarks{i} + y_shift); end figure(1); diff --git a/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m b/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m index ca5b70c62..88f6cc63c 100644 --- a/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m +++ b/matlab/unstable_examples/TransformProjectionFactorExampleISAM.m @@ -47,7 +47,7 @@ % insert shifted points for i=1:nrPoints - initial.insert(100+i,landmarks{i}.compose(y_shift)); + initial.insert(100+i,landmarks{i} + y_shift); end figure(1); @@ -146,7 +146,8 @@ plotPoint3(result.atPoint3(l),'g'); end - ty = result.atPose3(1000).translation().y(); + t = result.atPose3(1000).translation(); + ty = t(2); text(5,5,5,sprintf('Y-Transform: %0.2g',ty)); if(write_video) diff --git a/matlab/unstable_examples/plot_projected_landmarks.m b/matlab/unstable_examples/plot_projected_landmarks.m index 6b8101844..30e222016 100644 --- a/matlab/unstable_examples/plot_projected_landmarks.m +++ b/matlab/unstable_examples/plot_projected_landmarks.m @@ -25,9 +25,9 @@ key_index = gtsam.symbolIndex(key); p = landmarks.atPoint3(gtsam.symbol('l',key_index)); - x(i+1) = p.x; - y(i+1) = p.y; - z(i+1) = p.z; + x(i+1) = p(1); + y(i+1) = p(2); + z(i+1) = p(3); end diff --git a/matlab/unstable_examples/project_landmarks.m b/matlab/unstable_examples/project_landmarks.m index aaccc9248..3bccef94b 100644 --- a/matlab/unstable_examples/project_landmarks.m +++ b/matlab/unstable_examples/project_landmarks.m @@ -11,9 +11,9 @@ z = camera.project(landmarks.atPoint3(symbol('l',i))); % check bounding box - if z.x < 0 || z.x > 1280 + if z(1) < 0 || z(1) > 1280 continue - elseif z.y < 0 || z.y > 960 + elseif z(2) < 0 || z(2) > 960 continue end diff --git a/matlab/unstable_examples/testTSAMFactors.m b/matlab/unstable_examples/testTSAMFactors.m index 5cfd0aa80..2d1de5855 100644 --- a/matlab/unstable_examples/testTSAMFactors.m +++ b/matlab/unstable_examples/testTSAMFactors.m @@ -50,8 +50,8 @@ % Check result CHECK('error',result.atPose2(100).equals(b1,1e-5)) CHECK('error',result.atPose2(10).equals(origin,1e-5)) -CHECK('error',result.atPoint2(1).equals(Point2(0,1),1e-5)) -CHECK('error',result.atPoint2(2).equals(Point2(0,1),1e-5)) +CHECK('error',result.atPoint2(1) - Point2(0,1) < 1e-5) +CHECK('error',result.atPoint2(2) - Point2(0,1) < 1e-5) CHECK('error',result.atPose2(20).equals(origin,1e-5)) CHECK('error',result.atPose2(200).equals(b2,1e-5)) diff --git a/package.xml b/package.xml index f8554729e..341c78ba3 100644 --- a/package.xml +++ b/package.xml @@ -1,7 +1,7 @@ gtsam - 4.0.2 + 4.1.0 gtsam Frank Dellaert diff --git a/package_scripts/README.md b/package_scripts/README.md deleted file mode 100644 index e27747717..000000000 --- a/package_scripts/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# How to build Debian and Ubuntu Packages - -## Preparations - -Packages must be signed with a GPG key. First have a look of the keys -you have available: - - gpg --list-secret-keys - -If you don't have one, create one, then list again. - -Pick a secret key you like from the listed keys, for instance -"Your Name ". Then unlock that key by -signing a dummy file. The following line should pop up a window to -enter the passphrase: - - echo | gpg --local-user "Your Name " -s >/dev/null - -Now you can run the below scripts. Without this step they will fail -with "No secret key" or similar messages. - -## How to generate a Debian package - -Run the package script, providing a name/email that matches your PGP key. - - cd [GTSAM_SOURCE_ROOT] - bash package_scripts/prepare_debian.sh -e "Your Name " - - -## How to generate Ubuntu packages for a PPA - -Run the packaging script, passing the name of the gpg key -(see above) with the "-e" option: - - cd [GTSAM_SOURCE_ROOT] - bash package_scripts/prepare_ubuntu_pkgs_for_ppa.sh -e "Your Name " - -Check that you have uploaded this key to the ubuntu key server, and -have added the key to your account. - -Upload the package to your ppa: - - cd ~/gtsam_ubuntu - bash [GTSAM_SOURCE_ROOT]/package_scripts/upload_all_gtsam_ppa.sh -p "ppa:your-name/ppa-name" diff --git a/package_scripts/compile_static_boost.sh b/package_scripts/compile_static_boost.sh deleted file mode 100755 index ca3b99e09..000000000 --- a/package_scripts/compile_static_boost.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -# Compile boost statically, with -fPIC to allow linking it into the mex -# module (which is a dynamic library). --disable-icu prevents depending -# on libicu, which is unneeded and would require then linking the mex -# module with it as well. We just stage instead of install, then the -# toolbox_package_unix.sh script uses the staged boost. -./b2 link=static threading=multi cxxflags=-fPIC cflags=-fPIC --disable-icu -a -j4 stage diff --git a/package_scripts/prepare_debian.sh b/package_scripts/prepare_debian.sh deleted file mode 100755 index 5dd191fc6..000000000 --- a/package_scripts/prepare_debian.sh +++ /dev/null @@ -1,187 +0,0 @@ -#!/bin/bash -# Prepare to build a Debian package. -# Jose Luis Blanco Claraco, 2019 (for GTSAM) -# Jose Luis Blanco Claraco, 2008-2018 (for MRPT) - -set -e # end on error -#set -x # for debugging - -APPEND_SNAPSHOT_NUM=0 -IS_FOR_UBUNTU=0 -APPEND_LINUX_DISTRO="" -VALUE_EXTRA_CMAKE_PARAMS="" -while getopts "sud:c:e:" OPTION -do - case $OPTION in - s) - APPEND_SNAPSHOT_NUM=1 - ;; - u) - IS_FOR_UBUNTU=1 - ;; - d) - APPEND_LINUX_DISTRO=$OPTARG - ;; - c) - VALUE_EXTRA_CMAKE_PARAMS=$OPTARG - ;; - e) - PACKAGER_EMAIL=$OPTARG - ;; - ?) - echo "Unknown command line argument!" - exit 1 - ;; - esac -done - -if [ -z ${PACKAGER_EMAIL+x} ]; then - echo "must specify packager email via -e option!" - exit -1 -fi - -if [ -f CMakeLists.txt ]; -then - source package_scripts/prepare_debian_gen_snapshot_version.sh -else - echo "Error: cannot find CMakeList.txt. This script is intended to be run from the root of the source tree." - exit 1 -fi - -# Append snapshot? -if [ $APPEND_SNAPSHOT_NUM == "1" ]; -then - CUR_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - source $CUR_SCRIPT_DIR/prepare_debian_gen_snapshot_version.sh # populate GTSAM_SNAPSHOT_VERSION - - GTSAM_VERSION_STR="${GTSAM_VERSION_STR}~snapshot${GTSAM_SNAPSHOT_VERSION}${APPEND_LINUX_DISTRO}" -else - GTSAM_VERSION_STR="${GTSAM_VERSION_STR}${APPEND_LINUX_DISTRO}" -fi - -# Call prepare_release -GTSAMSRC=`pwd` - -if [ -f $HOME/gtsam_release/gtsam*.tar.gz ]; -then - echo "## release file already exists. Reusing it." -else - source package_scripts/prepare_release.sh - echo - echo "## Done prepare_release.sh" -fi - -echo "=========== Generating GTSAM ${GTSAM_VER_MMP} Debian package ==============" -cd $GTSAMSRC - -set -x -if [ -z "$GTSAM_DEB_DIR" ]; then - GTSAM_DEB_DIR="$HOME/gtsam_debian" -fi -GTSAM_EXTERN_DEBIAN_DIR="$GTSAMSRC/debian/" -GTSAM_EXTERN_UBUNTU_PPA_DIR="$GTSAMSRC/debian/" - -if [ -f ${GTSAM_EXTERN_DEBIAN_DIR}/control ]; -then - echo "Using debian dir: ${GTSAM_EXTERN_DEBIAN_DIR}" -else - echo "ERROR: Cannot find ${GTSAM_EXTERN_DEBIAN_DIR}" - exit 1 -fi - -GTSAM_DEBSRC_DIR=$GTSAM_DEB_DIR/gtsam-${GTSAM_VERSION_STR} - -echo "GTSAM_VERSION_STR: ${GTSAM_VERSION_STR}" -echo "GTSAM_DEBSRC_DIR: ${GTSAM_DEBSRC_DIR}" - -# Prepare a directory for building the debian package: -# -rm -fR $GTSAM_DEB_DIR || true -mkdir -p $GTSAM_DEB_DIR || true - -# Orig tarball: -echo "Copying orig tarball: gtsam_${GTSAM_VERSION_STR}.orig.tar.gz" -cp $HOME/gtsam_release/gtsam*.tar.gz $GTSAM_DEB_DIR/gtsam_${GTSAM_VERSION_STR}.orig.tar.gz -cd ${GTSAM_DEB_DIR} -tar -xf gtsam_${GTSAM_VERSION_STR}.orig.tar.gz - -if [ ! -d "${GTSAM_DEBSRC_DIR}" ]; -then - mv gtsam-* ${GTSAM_DEBSRC_DIR} # fix different dir names for Ubuntu PPA packages -fi - -if [ ! -f "${GTSAM_DEBSRC_DIR}/CMakeLists.txt" ]; -then - echo "*ERROR*: Seems there was a problem copying sources to ${GTSAM_DEBSRC_DIR}... aborting script." - exit 1 -fi - -cd ${GTSAM_DEBSRC_DIR} - -# Copy debian directory: -#mkdir debian -cp -r ${GTSAM_EXTERN_DEBIAN_DIR}/* debian - -# Use modified control & rules files for Ubuntu PPA packages: -#if [ $IS_FOR_UBUNTU == "1" ]; -#then - # already done: cp ${GTSAM_EXTERN_UBUNTU_PPA_DIR}/control.in debian/ - # Ubuntu: force use of gcc-7: - #sed -i '9i\export CXX=/usr/bin/g++-7\' debian/rules - #sed -i '9i\export CC=/usr/bin/gcc-7\' debian/rules7 -#fi - -# Export signing pub key: -mkdir debian/upstream/ -gpg --export --export-options export-minimal --armor > debian/upstream/signing-key.asc - -# Parse debian/ control.in --> control -#mv debian/control.in debian/control -#sed -i "s/@GTSAM_VER_MM@/${GTSAM_VER_MM}/g" debian/control - -# Replace the text "REPLACE_HERE_EXTRA_CMAKE_PARAMS" in the "debian/rules" file -# with: ${${VALUE_EXTRA_CMAKE_PARAMS}} -RULES_FILE=debian/rules -sed -i -e "s/REPLACE_HERE_EXTRA_CMAKE_PARAMS/${VALUE_EXTRA_CMAKE_PARAMS}/g" $RULES_FILE -echo "Using these extra parameters for CMake: '${VALUE_EXTRA_CMAKE_PARAMS}'" - -# Strip my custom files... -rm debian/*.new || true - - -# Figure out the next Debian version number: -echo "Detecting next Debian version number..." - -CHANGELOG_UPSTREAM_VER=$( dpkg-parsechangelog | sed -n 's/Version:.*\([0-9]\.[0-9]*\.[0-9]*.*snapshot.*\)-.*/\1/p' ) -CHANGELOG_LAST_DEBIAN_VER=$( dpkg-parsechangelog | sed -n 's/Version:.*\([0-9]\.[0-9]*\.[0-9]*\).*-\([0-9]*\).*/\2/p' ) - -echo " -> PREVIOUS UPSTREAM: $CHANGELOG_UPSTREAM_VER -> New: ${GTSAM_VERSION_STR}" -echo " -> PREVIOUS DEBIAN VERSION: $CHANGELOG_LAST_DEBIAN_VER" - -# If we have the same upstream versions, increase the Debian version, otherwise create a new entry: -if [ "$CHANGELOG_UPSTREAM_VER" = "$GTSAM_VERSION_STR" ]; -then - NEW_DEBIAN_VER=$[$CHANGELOG_LAST_DEBIAN_VER + 1] - echo "Changing to a new Debian version: ${GTSAM_VERSION_STR}-${NEW_DEBIAN_VER}" - DEBCHANGE_CMD="--newversion ${GTSAM_VERSION_STR}-${NEW_DEBIAN_VER}" -else - DEBCHANGE_CMD="--newversion ${GTSAM_VERSION_STR}-1" -fi - -echo "Adding a new entry to debian/changelog..." - -DEBEMAIL=${PACKAGER_EMAIL} debchange $DEBCHANGE_CMD -b --distribution unstable --force-distribution New version of upstream sources. - -echo "Copying back the new changelog to a temporary file in: ${GTSAM_EXTERN_DEBIAN_DIR}changelog.new" -cp debian/changelog ${GTSAM_EXTERN_DEBIAN_DIR}changelog.new - -set +x - -echo "==============================================================" -echo "Now, you can build the source Deb package with 'debuild -S -sa'" -echo "==============================================================" - -cd .. -ls -lh - -exit 0 diff --git a/package_scripts/prepare_debian_gen_snapshot_version.sh b/package_scripts/prepare_debian_gen_snapshot_version.sh deleted file mode 100755 index 589d422fe..000000000 --- a/package_scripts/prepare_debian_gen_snapshot_version.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# See https://reproducible-builds.org/specs/source-date-epoch/ -# get SOURCE_DATE_EPOCH with UNIX time_t -if [ -d ".git" ]; -then - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) -else - echo "Error: intended for use from within a git repository" - exit 1 -fi -GTSAM_SNAPSHOT_VERSION=$(date -d @$SOURCE_DATE_EPOCH +%Y%m%d-%H%M) - -GTSAM_SNAPSHOT_VERSION+="-git-" -GTSAM_SNAPSHOT_VERSION+=`git rev-parse --short=8 HEAD` -GTSAM_SNAPSHOT_VERSION+="-" - -# x.y.z version components: -GTSAM_VERSION_MAJOR=$(grep "(GTSAM_VERSION_MAJOR" CMakeLists.txt | sed -r 's/^.*GTSAM_VERSION_MAJOR\s*([0-9])*.*$/\1/g') -GTSAM_VERSION_MINOR=$(grep "(GTSAM_VERSION_MINOR" CMakeLists.txt | sed -r 's/^.*GTSAM_VERSION_MINOR\s*([0-9])*.*$/\1/g') -GTSAM_VERSION_PATCH=$(grep "(GTSAM_VERSION_PATCH" CMakeLists.txt | sed -r 's/^.*GTSAM_VERSION_PATCH\s*([0-9])*.*$/\1/g') - -GTSAM_VER_MM="${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}" -GTSAM_VER_MMP="${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}" -GTSAM_VERSION_STR=$GTSAM_VER_MMP diff --git a/package_scripts/prepare_release.sh b/package_scripts/prepare_release.sh deleted file mode 100755 index 750fc27b3..000000000 --- a/package_scripts/prepare_release.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# Export sources from a git tree and prepare it for a public release. -# Jose Luis Blanco Claraco, 2019 (for GTSAM) -# Jose Luis Blanco Claraco, 2008-2018 (for MRPT) - -set -e # exit on error -#set -x # for debugging - -# Checks -# -------------------------------- -if [ -f version_prefix.txt ]; -then - if [ -z ${GTSAM_VERSION_STR+x} ]; - then - source package_scripts/prepare_debian_gen_snapshot_version.sh - fi - echo "ERROR: Run this script from the GTSAM source tree root directory." - exit 1 -fi - -GTSAM_SRC=`pwd` -OUT_RELEASES_DIR="$HOME/gtsam_release" - -OUT_DIR=$OUT_RELEASES_DIR/gtsam-${GTSAM_VERSION_STR} - -echo "=========== Generating GTSAM release ${GTSAM_VER_MMP} ==================" -echo "GTSAM_VERSION_STR : ${GTSAM_VERSION_STR}" -echo "OUT_DIR : ${OUT_DIR}" -echo "============================================================" -echo - -# Prepare output directory: -rm -fR $OUT_RELEASES_DIR || true -mkdir -p ${OUT_DIR} - -# Export / copy sources to target dir: -if [ -d "$GTSAM_SRC/.git" ]; -then - echo "# Exporting git source tree to ${OUT_DIR}" - git archive --format=tar HEAD | tar -x -C ${OUT_DIR} - - # Remove VCS control files: - find ${OUT_DIR} -name '.gitignore' | xargs rm - - # Generate ./SOURCE_DATE_EPOCH with UNIX time_t - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) -else - echo "# Copying sources to ${OUT_DIR}" - cp -R . ${OUT_DIR} - - # Generate ./SOURCE_DATE_EPOCH with UNIX time_t - SOURCE_DATE_EPOCH=$(date +%s) -fi - -# See https://reproducible-builds.org/specs/source-date-epoch/ -echo $SOURCE_DATE_EPOCH > ${OUT_DIR}/SOURCE_DATE_EPOCH - -cd ${OUT_DIR} - -# Dont include Debian files in releases: -rm -fR package_scripts - -# Orig tarball: -cd .. -echo "# Creating orig tarball: gtsam-${GTSAM_VERSION_STR}.tar.gz" -tar czf gtsam-${GTSAM_VERSION_STR}.tar.gz gtsam-${GTSAM_VERSION_STR} - -rm -fr gtsam-${GTSAM_VERSION_STR} - -# GPG signature: -gpg --armor --detach-sign gtsam-${GTSAM_VERSION_STR}.tar.gz diff --git a/package_scripts/prepare_ubuntu_pkgs_for_ppa.sh b/package_scripts/prepare_ubuntu_pkgs_for_ppa.sh deleted file mode 100755 index 33c016b94..000000000 --- a/package_scripts/prepare_ubuntu_pkgs_for_ppa.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Creates a set of packages for each different Ubuntu distribution, with the -# intention of uploading them to a PPA on launchpad -# -# JLBC, 2010 -# [Addition 2012:] -# -# You can declare a variable (in the caller shell) with extra flags for the -# CMake in the final ./configure like: -# -# GTSAM_PKG_CUSTOM_CMAKE_PARAMS="\"-DDISABLE_SSE3=ON\"" -# - - -function show_help { - echo "USAGE:" - echo "" - echo "- to display this help: " - echo "prepare_ubuntu_packages_for_ppa.sh -h or -?" - echo "" - echo "- to package to your PPA: " - echo "prepare_ubuntu_packages_for_ppa.sh -e email_of_your_gpg_key" - echo "" - echo "to pass custom config for GTSAM, set the following" - echo "environment variable beforehand: " - echo "" - echo "GTSAM_PKG_CUSTOM_CMAKE_PARAMS=\"\"-DDISABLE_SSE3=ON\"\"" - echo "" -} - -while getopts "h?e:" opt; do - case "$opt" in - h|\?) - show_help - exit 0 - ;; - e) PACKAGER_EMAIL=$OPTARG - ;; - esac -done - -if [ -z ${PACKAGER_EMAIL+x} ]; then - show_help - exit -1 -fi - - -set -e - -# List of distributions to create PPA packages for: -LST_DISTROS=(xenial bionic eoan focal) - -# Checks -# -------------------------------- -if [ -f CMakeLists.txt ]; -then - source package_scripts/prepare_debian_gen_snapshot_version.sh - echo "GTSAM version: ${GTSAM_VER_MMP}" -else - echo "ERROR: Run this script from the GTSAM root directory." - exit 1 -fi - -if [ -z "${gtsam_ubuntu_OUT_DIR}" ]; then - export gtsam_ubuntu_OUT_DIR="$HOME/gtsam_ubuntu" -fi -GTSAMSRC=`pwd` -if [ -z "${GTSAM_DEB_DIR}" ]; then - export GTSAM_DEB_DIR="$HOME/gtsam_debian" -fi -GTSAM_EXTERN_DEBIAN_DIR="$GTSAMSRC/debian/" - -# Clean out dirs: -rm -fr $gtsam_ubuntu_OUT_DIR/ - -# ------------------------------------------------------------------- -# And now create the custom packages for each Ubuntu distribution: -# ------------------------------------------------------------------- -count=${#LST_DISTROS[@]} -IDXS=$(seq 0 $(expr $count - 1)) - -cp ${GTSAM_EXTERN_DEBIAN_DIR}/changelog /tmp/my_changelog - -for IDX in ${IDXS}; -do - DEBIAN_DIST=${LST_DISTROS[$IDX]} - - # ------------------------------------------------------------------- - # Call the standard "prepare_debian.sh" script: - # ------------------------------------------------------------------- - cd ${GTSAMSRC} - bash package_scripts/prepare_debian.sh -e "$PACKAGER_EMAIL" -s -u -d ${DEBIAN_DIST} -c "${GTSAM_PKG_CUSTOM_CMAKE_PARAMS}" - - CUR_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - source $CUR_SCRIPT_DIR/prepare_debian_gen_snapshot_version.sh # populate GTSAM_SNAPSHOT_VERSION - - echo "===== Distribution: ${DEBIAN_DIST} =========" - cd ${GTSAM_DEB_DIR}/gtsam-${GTSAM_VER_MMP}~snapshot${GTSAM_SNAPSHOT_VERSION}${DEBIAN_DIST}/debian - #cp ${GTSAM_EXTERN_DEBIAN_DIR}/changelog changelog - cp /tmp/my_changelog changelog - DEBCHANGE_CMD="--newversion ${GTSAM_VERSION_STR}~snapshot${GTSAM_SNAPSHOT_VERSION}${DEBIAN_DIST}-1" - echo "Changing to a new Debian version: ${DEBCHANGE_CMD}" - echo "Adding a new entry to debian/changelog for distribution ${DEBIAN_DIST}" - DEBEMAIL="${PACKAGER_EMAIL}" debchange $DEBCHANGE_CMD -b --distribution ${DEBIAN_DIST} --force-distribution New version of upstream sources. - - cp changelog /tmp/my_changelog - - echo "Now, let's build the source Deb package with 'debuild -S -sa':" - cd .. - # -S: source package - # -sa: force inclusion of sources - # -d: don't check dependencies in this system - debuild -S -sa -d - - # Make a copy of all these packages: - cd .. - mkdir -p $gtsam_ubuntu_OUT_DIR/$DEBIAN_DIST - cp gtsam_* $gtsam_ubuntu_OUT_DIR/${DEBIAN_DIST}/ - echo ">>>>>> Saving packages to: $gtsam_ubuntu_OUT_DIR/$DEBIAN_DIST/" -done - - -exit 0 diff --git a/package_scripts/toolbox_package_unix.sh b/package_scripts/toolbox_package_unix.sh deleted file mode 100755 index 28de2572a..000000000 --- a/package_scripts/toolbox_package_unix.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/sh - -# Script to build a tarball with the matlab toolbox - -# Detect platform -os=`uname -s` -arch=`uname -m` -if [ "$os" = "Linux" -a "$arch" = "x86_64" ]; then - platform=lin64 -elif [ "$os" = "Linux" -a "$arch" = "i686" ]; then - platform=lin32 -elif [ "$os" = "Darwin" -a "$arch" = "x86_64" ]; then - platform=mac64 -else - echo "Unrecognized platform" - exit 1 -fi - -echo "Platform is ${platform}" - -# Check for empty diectory -if [ ! -z "`ls`" ]; then - echo "Please run this script from an empty build directory" - exit 1 -fi - -# Check for boost -if [ -z "$1" ]; then - echo "Usage: $0 BOOSTTREE" - echo "BOOSTTREE should be a boost source tree compiled with toolbox_build_boost." - exit 1 -fi - -# Run cmake -cmake -DCMAKE_BUILD_TYPE=Release \ --DGTSAM_INSTALL_MATLAB_TOOLBOX:BOOL=ON \ --DCMAKE_INSTALL_PREFIX="$PWD/stage" \ --DBoost_NO_SYSTEM_PATHS:BOOL=ON \ --DBoost_USE_STATIC_LIBS:BOOL=ON \ --DBOOST_ROOT="$1" \ --DGTSAM_BUILD_TESTS:BOOL=OFF \ --DGTSAM_BUILD_TIMING:BOOL=OFF \ --DGTSAM_BUILD_EXAMPLES_ALWAYS:BOOL=OFF \ --DGTSAM_WITH_TBB:BOOL=OFF \ --DGTSAM_SUPPORT_NESTED_DISSECTION:BOOL=OFF \ --DGTSAM_INSTALL_GEOGRAPHICLIB:BOOL=OFF \ --DGTSAM_BUILD_UNSTABLE:BOOL=OFF \ --DGTSAM_MEX_BUILD_STATIC_MODULE:BOOL=ON .. - -if [ $? -ne 0 ]; then - echo "CMake failed" - exit 1 -fi - -# Compile -make -j8 install - -if [ $? -ne 0 ]; then - echo "Compile failed" - exit 1 -fi - -# Create package -tar czf gtsam-toolbox-3.2.0-$platform.tgz -C stage/gtsam_toolbox toolbox diff --git a/package_scripts/upload_all_gtsam_ppa.sh b/package_scripts/upload_all_gtsam_ppa.sh deleted file mode 100755 index f06d005fb..000000000 --- a/package_scripts/upload_all_gtsam_ppa.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -function show_help { - echo "USAGE:" - echo "" - echo "- to display this help: " - echo "upload_all_gtsam_ppa.sh -h or -?" - echo "" - echo "- to upload to your PPA: " - echo "upload_all_gtsam_ppa.sh -p ppa:your_name/name_of_ppa" - echo "" -} - -while getopts "h?p:" opt; do - case "$opt" in - h|\?) - show_help - exit 0 - ;; - p) ppa_name=$OPTARG - ;; - esac -done - -if [ -z ${ppa_name+x} ]; then - show_help - exit -1 -fi - - -find . -name '*.changes' | xargs -I FIL dput ${ppa_name} FIL diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt new file mode 100644 index 000000000..b50701464 --- /dev/null +++ b/python/CMakeLists.txt @@ -0,0 +1,140 @@ +set(GTSAM_PYTHON_BUILD_DIRECTORY ${PROJECT_BINARY_DIR}/python) + +if (NOT GTSAM_BUILD_PYTHON) + return() +endif() + +# Generate setup.py. +file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS) +configure_file(${PROJECT_SOURCE_DIR}/python/setup.py.in + ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py) + +# Supply MANIFEST.in for older versions of Python +file(COPY ${PROJECT_SOURCE_DIR}/python/MANIFEST.in + DESTINATION ${GTSAM_PYTHON_BUILD_DIRECTORY}) + +include(PybindWrap) + +############################################################ +## Load the necessary files to compile the wrapper + +# Load the pybind11 code +add_subdirectory(${PROJECT_SOURCE_DIR}/wrap/pybind11 pybind11) +# Set the wrapping script variable +set(PYBIND_WRAP_SCRIPT "${PROJECT_SOURCE_DIR}/wrap/scripts/pybind_wrap.py") +############################################################ + +add_custom_target(gtsam_header DEPENDS "${PROJECT_SOURCE_DIR}/gtsam/gtsam.i") +add_custom_target(gtsam_unstable_header DEPENDS "${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i") + +# ignoring the non-concrete types (type aliases) +set(ignore + gtsam::Point2 + gtsam::Point3 + gtsam::LieVector + gtsam::LieMatrix + gtsam::ISAM2ThresholdMapValue + gtsam::FactorIndices + gtsam::FactorIndexSet + gtsam::IndexPairSetMap + gtsam::IndexPairVector + gtsam::BetweenFactorPose2s + gtsam::BetweenFactorPose3s + gtsam::Point2Vector + gtsam::Pose3Vector + gtsam::KeyVector + gtsam::BinaryMeasurementsUnit3 + gtsam::KeyPairDoubleMap) + +pybind_wrap(gtsam_py # target + ${PROJECT_SOURCE_DIR}/gtsam/gtsam.i # interface_header + "gtsam.cpp" # generated_cpp + "gtsam" # module_name + "gtsam" # top_namespace + "${ignore}" # ignore_classes + ${PROJECT_SOURCE_DIR}/python/gtsam/gtsam.tpl + gtsam # libs + "gtsam;gtsam_header" # dependencies + ON # use_boost + ) + +set_target_properties(gtsam_py PROPERTIES + INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" + INSTALL_RPATH_USE_LINK_PATH TRUE + OUTPUT_NAME "gtsam" + LIBRARY_OUTPUT_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam" + DEBUG_POSTFIX "" # Otherwise you will have a wrong name + RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name + ) + +set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam) + +# Symlink all tests .py files to build folder. +create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam" + "${GTSAM_MODULE_PATH}") + +# Common directory for data/datasets stored with the package. +# This will store the data in the Python site package directly. +file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}") + +# Add gtsam as a dependency to the install target +set(GTSAM_PYTHON_DEPENDENCIES gtsam_py) + + +if(GTSAM_UNSTABLE_BUILD_PYTHON) + set(ignore + gtsam::Point2 + gtsam::Point3 + gtsam::LieVector + gtsam::LieMatrix + gtsam::ISAM2ThresholdMapValue + gtsam::FactorIndices + gtsam::FactorIndexSet + gtsam::BetweenFactorPose3s + gtsam::Point2Vector + gtsam::Pose3Vector + gtsam::KeyVector + gtsam::FixedLagSmootherKeyTimestampMapValue + gtsam::BinaryMeasurementsUnit3 + gtsam::CameraSetCal3_S2 + gtsam::CameraSetCal3Bundler + gtsam::KeyPairDoubleMap) + + pybind_wrap(gtsam_unstable_py # target + ${PROJECT_SOURCE_DIR}/gtsam_unstable/gtsam_unstable.i # interface_header + "gtsam_unstable.cpp" # generated_cpp + "gtsam_unstable" # module_name + "gtsam" # top_namespace + "${ignore}" # ignore_classes + ${PROJECT_SOURCE_DIR}/python/gtsam_unstable/gtsam_unstable.tpl + gtsam_unstable # libs + "gtsam_unstable;gtsam_unstable_header" # dependencies + ON # use_boost + ) + + set_target_properties(gtsam_unstable_py PROPERTIES + INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib" + INSTALL_RPATH_USE_LINK_PATH TRUE + OUTPUT_NAME "gtsam_unstable" + LIBRARY_OUTPUT_DIRECTORY "${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable" + DEBUG_POSTFIX "" # Otherwise you will have a wrong name + RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name + ) + + set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable) + + # Symlink all tests .py files to build folder. + create_symlinks("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable" + "${GTSAM_UNSTABLE_MODULE_PATH}") + + # Add gtsam_unstable to the install target + list(APPEND GTSAM_PYTHON_DEPENDENCIES gtsam_unstable_py) + +endif() + +# Add custom target so we can install with `make python-install` +set(GTSAM_PYTHON_INSTALL_TARGET python-install) +add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET} + COMMAND ${PYTHON_EXECUTABLE} ${GTSAM_PYTHON_BUILD_DIRECTORY}/setup.py install + DEPENDS ${GTSAM_PYTHON_DEPENDENCIES} + WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}) diff --git a/python/MANIFEST.in b/python/MANIFEST.in new file mode 100644 index 000000000..41d7f0c59 --- /dev/null +++ b/python/MANIFEST.in @@ -0,0 +1 @@ +recursive-include gtsam/Data * diff --git a/python/README.md b/python/README.md new file mode 100644 index 000000000..6e2a30d2e --- /dev/null +++ b/python/README.md @@ -0,0 +1,67 @@ +# README + +# Python Wrapper + +This is the Python wrapper around the GTSAM C++ library. We use our custom [wrap library](https://github.com/borglab/wrap) to generate the bindings to the underlying C++ code. + +## Requirements + +- If you want to build the GTSAM python library for a specific python version (eg 3.6), + use the `-DGTSAM_PYTHON_VERSION=3.6` option when running `cmake` otherwise the default interpreter will be used. +- If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment), + then the environment should be active while building GTSAM. +- This wrapper needs `pyparsing(>=2.4.2)`, and `numpy(>=1.11.0)`. These can be installed as follows: + + ```bash + pip install -r /python/requirements.txt + ``` + +## Install + +- Run cmake with the `GTSAM_BUILD_PYTHON` cmake flag enabled to configure building the wrapper. The wrapped module will be built and copied to the directory `/python`. For example, if your local Python version is 3.6.10, then you should run: + ```bash + cmake .. -DGTSAM_BUILD_PYTHON=1 -DGTSAM_PYTHON_VERSION=3.6.10 + ``` +- Build GTSAM and the wrapper with `make` (or `ninja` if you use `-GNinja`). + +- To install, simply run `make python-install` (`ninja python-install`). + - The same command can be used to install into a virtual environment if it is active. + - **NOTE**: if you don't want GTSAM to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install GTSAM to a subdirectory of the build directory. + +- You can also directly run `make python-install` without running `make`, and it will compile all the dependencies accordingly. + +## Unit Tests + +The Python toolbox also has a small set of unit tests located in the +test directory. To run them: + + ```bash + cd /python/gtsam/tests + python -m unittest discover + ``` + +## Utils + +TODO + +## Examples + +TODO + +## Writing Your Own Scripts + +See the tests for examples. + +### Some Important Notes: + +- Vector/Matrix: + + - GTSAM expects double-precision floating point vectors and matrices. + Hence, you should pass numpy matrices with `dtype=float`, or `float64`, to avoid any conversion needed. + - Also, GTSAM expects _column-major_ matrices, unlike the default storage + scheme in numpy. But this is only performance-related as `pybind11` should translate them when needed. However, this will result a copy if your matrix is not in the expected type + and storage order. + +## Wrapping Custom GTSAM-based Project + +Please refer to the template project and the corresponding tutorial available [here](https://github.com/borglab/GTSAM-project-python). diff --git a/python/gtsam/__init__.py b/python/gtsam/__init__.py new file mode 100644 index 000000000..70a00c3dc --- /dev/null +++ b/python/gtsam/__init__.py @@ -0,0 +1,35 @@ +from . import utils +from .gtsam import * +from .utils import findExampleDataFile + + +def _init(): + """This function is to add shims for the long-gone Point2 and Point3 types""" + + import numpy as np + + global Point2 # export function + + def Point2(x=np.nan, y=np.nan): + """Shim for the deleted Point2 type.""" + if isinstance(x, np.ndarray): + assert x.shape == (2,), "Point2 takes 2-vector" + return x # "copy constructor" + return np.array([x, y], dtype=float) + + global Point3 # export function + + def Point3(x=np.nan, y=np.nan, z=np.nan): + """Shim for the deleted Point3 type.""" + if isinstance(x, np.ndarray): + assert x.shape == (3,), "Point3 takes 3-vector" + return x # "copy constructor" + return np.array([x, y, z], dtype=float) + + # for interactive debugging + if __name__ == "__main__": + # we want all definitions accessible + globals().update(locals()) + + +_init() diff --git a/cython/gtsam/examples/DogLegOptimizerExample.py b/python/gtsam/examples/DogLegOptimizerExample.py similarity index 95% rename from cython/gtsam/examples/DogLegOptimizerExample.py rename to python/gtsam/examples/DogLegOptimizerExample.py index 776ceedc4..26f4fef84 100644 --- a/cython/gtsam/examples/DogLegOptimizerExample.py +++ b/python/gtsam/examples/DogLegOptimizerExample.py @@ -35,17 +35,17 @@ def run(args): graph = gtsam.NonlinearFactorGraph() # Priors - prior = gtsam.noiseModel_Isotropic.Sigma(3, 1) + prior = gtsam.noiseModel.Isotropic.Sigma(3, 1) graph.add(gtsam.PriorFactorPose2(11, T11, prior)) graph.add(gtsam.PriorFactorPose2(21, T21, prior)) # Odometry - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.01, 0.01, 0.3])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.01, 0.01, 0.3])) graph.add(gtsam.BetweenFactorPose2(11, 12, T11.between(T12), model)) graph.add(gtsam.BetweenFactorPose2(21, 22, T21.between(T22), model)) # Range - model_rho = gtsam.noiseModel_Isotropic.Sigma(1, 0.01) + model_rho = gtsam.noiseModel.Isotropic.Sigma(1, 0.01) graph.add(gtsam.RangeFactorPose2(12, 22, 1.0, model_rho)) params = gtsam.DoglegParams() diff --git a/cython/gtsam/examples/GPSFactorExample.py b/python/gtsam/examples/GPSFactorExample.py similarity index 93% rename from cython/gtsam/examples/GPSFactorExample.py rename to python/gtsam/examples/GPSFactorExample.py index 493a07725..0bc0d1bf3 100644 --- a/cython/gtsam/examples/GPSFactorExample.py +++ b/python/gtsam/examples/GPSFactorExample.py @@ -26,8 +26,8 @@ h0 = 274 # Create noise models -GPS_NOISE = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) -PRIOR_NOISE = gtsam.noiseModel_Isotropic.Sigma(6, 0.25) +GPS_NOISE = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) +PRIOR_NOISE = gtsam.noiseModel.Isotropic.Sigma(6, 0.25) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/python/gtsam/examples/ImuFactorExample.py b/python/gtsam/examples/ImuFactorExample.py new file mode 100644 index 000000000..bb707a8f5 --- /dev/null +++ b/python/gtsam/examples/ImuFactorExample.py @@ -0,0 +1,180 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +A script validating and demonstrating the ImuFactor inference. + +Author: Frank Dellaert, Varun Agrawal +""" + +from __future__ import print_function + +import argparse +import math + +import gtsam +import matplotlib.pyplot as plt +import numpy as np +from gtsam.symbol_shorthand import B, V, X +from gtsam.utils.plot import plot_pose3 +from mpl_toolkits.mplot3d import Axes3D + +from PreintegrationExample import POSES_FIG, PreintegrationExample + +BIAS_KEY = B(0) + + +np.set_printoptions(precision=3, suppress=True) + + +class ImuFactorExample(PreintegrationExample): + + def __init__(self, twist_scenario="sick_twist"): + self.velocity = np.array([2, 0, 0]) + self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) + self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) + + # Choose one of these twists to change scenario: + twist_scenarios = dict( + zero_twist=(np.zeros(3), np.zeros(3)), + forward_twist=(np.zeros(3), self.velocity), + loop_twist=(np.array([0, -math.radians(30), 0]), self.velocity), + sick_twist=(np.array([math.radians(30), -math.radians(30), 0]), + self.velocity) + ) + + accBias = np.array([-0.3, 0.1, 0.2]) + gyroBias = np.array([0.1, 0.3, -0.1]) + bias = gtsam.imuBias.ConstantBias(accBias, gyroBias) + + dt = 1e-2 + super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario], + bias, dt) + + def addPrior(self, i, graph): + state = self.scenario.navState(i) + graph.push_back(gtsam.PriorFactorPose3( + X(i), state.pose(), self.priorNoise)) + graph.push_back(gtsam.PriorFactorVector( + V(i), state.velocity(), self.velNoise)) + + def run(self, T=12, compute_covariances=False, verbose=True): + graph = gtsam.NonlinearFactorGraph() + + # initialize data structure for pre-integrated IMU measurements + pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias) + + T = 12 + num_poses = T # assumes 1 factor per second + initial = gtsam.Values() + initial.insert(BIAS_KEY, self.actualBias) + + # simulate the loop + i = 0 # state index + initial_state_i = self.scenario.navState(0) + initial.insert(X(i), initial_state_i.pose()) + initial.insert(V(i), initial_state_i.velocity()) + + # add prior on beginning + self.addPrior(0, graph) + + for k, t in enumerate(np.arange(0, T, self.dt)): + # get measurements and add them to PIM + measuredOmega = self.runner.measuredAngularVelocity(t) + measuredAcc = self.runner.measuredSpecificForce(t) + pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt) + + # Plot IMU many times + if k % 10 == 0: + self.plotImu(t, measuredOmega, measuredAcc) + + if (k+1) % int(1 / self.dt) == 0: + # Plot every second + self.plotGroundTruthPose(t, scale=1) + plt.title("Ground Truth Trajectory") + + # create IMU factor every second + factor = gtsam.ImuFactor(X(i), V(i), + X(i + 1), V(i + 1), + BIAS_KEY, pim) + graph.push_back(factor) + + if verbose: + print(factor) + print(pim.predict(initial_state_i, self.actualBias)) + + pim.resetIntegration() + + rotationNoise = gtsam.Rot3.Expmap(np.random.randn(3)*0.1) + translationNoise = gtsam.Point3(*np.random.randn(3)*1) + poseNoise = gtsam.Pose3(rotationNoise, translationNoise) + + actual_state_i = self.scenario.navState(t + self.dt) + print("Actual state at {0}:\n{1}".format( + t+self.dt, actual_state_i)) + + noisy_state_i = gtsam.NavState( + actual_state_i.pose().compose(poseNoise), + actual_state_i.velocity() + np.random.randn(3)*0.1) + + initial.insert(X(i+1), noisy_state_i.pose()) + initial.insert(V(i+1), noisy_state_i.velocity()) + i += 1 + + # add priors on end + self.addPrior(num_poses - 1, graph) + + initial.print_("Initial values:") + + # optimize using Levenberg-Marquardt optimization + params = gtsam.LevenbergMarquardtParams() + params.setVerbosityLM("SUMMARY") + optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) + result = optimizer.optimize() + + result.print_("Optimized values:") + + if compute_covariances: + # Calculate and print marginal covariances + marginals = gtsam.Marginals(graph, result) + print("Covariance on bias:\n", + marginals.marginalCovariance(BIAS_KEY)) + for i in range(num_poses): + print("Covariance on pose {}:\n{}\n".format( + i, marginals.marginalCovariance(X(i)))) + print("Covariance on vel {}:\n{}\n".format( + i, marginals.marginalCovariance(V(i)))) + + # Plot resulting poses + i = 0 + while result.exists(X(i)): + pose_i = result.atPose3(X(i)) + plot_pose3(POSES_FIG+1, pose_i, 1) + i += 1 + plt.title("Estimated Trajectory") + + gtsam.utils.plot.set_axes_equal(POSES_FIG+1) + + print("Bias Values", result.atConstantBias(BIAS_KEY)) + + plt.ioff() + plt.show() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser("ImuFactorExample.py") + parser.add_argument("--twist_scenario", + default="sick_twist", + choices=("zero_twist", "forward_twist", "loop_twist", "sick_twist")) + parser.add_argument("--time", "-T", default=12, + type=int, help="Total time in seconds") + parser.add_argument("--compute_covariances", + default=False, action='store_true') + parser.add_argument("--verbose", default=False, action='store_true') + args = parser.parse_args() + + ImuFactorExample(args.twist_scenario).run( + args.time, args.compute_covariances, args.verbose) diff --git a/cython/gtsam/examples/ImuFactorExample2.py b/python/gtsam/examples/ImuFactorISAM2Example.py similarity index 60% rename from cython/gtsam/examples/ImuFactorExample2.py rename to python/gtsam/examples/ImuFactorISAM2Example.py index 0d98f08fe..bb90b95bf 100644 --- a/cython/gtsam/examples/ImuFactorExample2.py +++ b/python/gtsam/examples/ImuFactorISAM2Example.py @@ -1,6 +1,6 @@ """ -iSAM2 example with ImuFactor. -Author: Robert Truax (C++), Frank Dellaert (Python) +ImuFactor example with iSAM2. +Authors: Robert Truax (C++), Frank Dellaert, Varun Agrawal (Python) """ # pylint: disable=invalid-name, E1101 @@ -8,19 +8,18 @@ import math -import gtsam -import gtsam.utils.plot as gtsam_plot import matplotlib.pyplot as plt import numpy as np +from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 + +import gtsam from gtsam import (ISAM2, BetweenFactorConstantBias, Cal3_S2, ConstantTwistScenario, ImuFactor, NonlinearFactorGraph, PinholeCameraCal3_S2, Point3, Pose3, PriorFactorConstantBias, PriorFactorPose3, PriorFactorVector, Rot3, Values) -from gtsam import symbol_shorthand_B as B -from gtsam import symbol_shorthand_V as V -from gtsam import symbol_shorthand_X as X -from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 +from gtsam.symbol_shorthand import B, V, X +from gtsam.utils import plot def vector3(x, y, z): @@ -28,70 +27,62 @@ def vector3(x, y, z): return np.array([x, y, z], dtype=np.float) -def ISAM2_plot(values, fignum=0): - """Plot poses.""" - fig = plt.figure(fignum) - axes = fig.gca(projection='3d') - plt.cla() - - i = 0 - min_bounds = 0, 0, 0 - max_bounds = 0, 0, 0 - while values.exists(X(i)): - pose_i = values.atPose3(X(i)) - gtsam_plot.plot_pose3(fignum, pose_i, 10) - position = pose_i.translation().vector() - min_bounds = [min(v1, v2) for (v1, v2) in zip(position, min_bounds)] - max_bounds = [max(v1, v2) for (v1, v2) in zip(position, max_bounds)] - # max_bounds = min(pose_i.x(), max_bounds[0]), 0, 0 - i += 1 - - # draw - axes.set_xlim3d(min_bounds[0]-1, max_bounds[0]+1) - axes.set_ylim3d(min_bounds[1]-1, max_bounds[1]+1) - axes.set_zlim3d(min_bounds[2]-1, max_bounds[2]+1) - plt.pause(1) - - -# IMU preintegration parameters -# Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis g = 9.81 n_gravity = vector3(0, 0, -g) -PARAMS = gtsam.PreintegrationParams.MakeSharedU(g) -I = np.eye(3) -PARAMS.setAccelerometerCovariance(I * 0.1) -PARAMS.setGyroscopeCovariance(I * 0.1) -PARAMS.setIntegrationCovariance(I * 0.1) -PARAMS.setUse2ndOrderCoriolis(False) -PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) -BIAS_COVARIANCE = gtsam.noiseModel_Isotropic.Variance(6, 0.1) -DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), - Point3(0.05, -0.10, 0.20)) +def preintegration_parameters(): + # IMU preintegration parameters + # Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis + PARAMS = gtsam.PreintegrationParams.MakeSharedU(g) + I = np.eye(3) + PARAMS.setAccelerometerCovariance(I * 0.1) + PARAMS.setGyroscopeCovariance(I * 0.1) + PARAMS.setIntegrationCovariance(I * 0.1) + PARAMS.setUse2ndOrderCoriolis(False) + PARAMS.setOmegaCoriolis(vector3(0, 0, 0)) -def IMU_example(): - """Run iSAM 2 example with IMU factor.""" + BIAS_COVARIANCE = gtsam.noiseModel.Isotropic.Variance(6, 0.1) + DELTA = Pose3(Rot3.Rodrigues(0, 0, 0), + Point3(0.05, -0.10, 0.20)) - # Start with a camera on x-axis looking at origin - radius = 30 + return PARAMS, BIAS_COVARIANCE, DELTA + + +def get_camera(radius): up = Point3(0, 0, 1) target = Point3(0, 0, 0) position = Point3(radius, 0, 0) camera = PinholeCameraCal3_S2.Lookat(position, target, up, Cal3_S2()) - pose_0 = camera.pose() + return camera - # Create the set of ground-truth landmarks and poses - angular_velocity = math.radians(180) # rad/sec - delta_t = 1.0/18 # makes for 10 degrees per step +def get_scenario(radius, pose_0, angular_velocity, delta_t): + """Create the set of ground-truth landmarks and poses""" angular_velocity_vector = vector3(0, -angular_velocity, 0) linear_velocity_vector = vector3(radius * angular_velocity, 0, 0) scenario = ConstantTwistScenario( angular_velocity_vector, linear_velocity_vector, pose_0) + return scenario + + +def IMU_example(): + """Run iSAM 2 example with IMU factor.""" + + # Start with a camera on x-axis looking at origin + radius = 30 + camera = get_camera(radius) + pose_0 = camera.pose() + + delta_t = 1.0/18 # makes for 10 degrees per step + angular_velocity = math.radians(180) # rad/sec + scenario = get_scenario(radius, pose_0, angular_velocity, delta_t) + + PARAMS, BIAS_COVARIANCE, DELTA = preintegration_parameters() + # Create a factor graph - newgraph = NonlinearFactorGraph() + graph = NonlinearFactorGraph() # Create (incremental) ISAM2 solver isam = ISAM2() @@ -102,23 +93,23 @@ def IMU_example(): # Add a prior on pose x0. This indirectly specifies where the origin is. # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw - noise = gtsam.noiseModel_Diagonal.Sigmas( + noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) - newgraph.push_back(PriorFactorPose3(X(0), pose_0, noise)) + graph.push_back(PriorFactorPose3(X(0), pose_0, noise)) # Add imu priors biasKey = B(0) - biasnoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) - biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias_ConstantBias(), + biasnoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) + biasprior = PriorFactorConstantBias(biasKey, gtsam.imuBias.ConstantBias(), biasnoise) - newgraph.push_back(biasprior) - initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) - velnoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + graph.push_back(biasprior) + initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) + velnoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) # Calculate with correct initial velocity n_velocity = vector3(0, angular_velocity * radius, 0) velprior = PriorFactorVector(V(0), n_velocity, velnoise) - newgraph.push_back(velprior) + graph.push_back(velprior) initialEstimate.insert(V(0), n_velocity) accum = gtsam.PreintegratedImuMeasurements(PARAMS) @@ -139,9 +130,9 @@ def IMU_example(): if i % 5 == 0: biasKey += 1 factor = BetweenFactorConstantBias( - biasKey - 1, biasKey, gtsam.imuBias_ConstantBias(), BIAS_COVARIANCE) - newgraph.add(factor) - initialEstimate.insert(biasKey, gtsam.imuBias_ConstantBias()) + biasKey - 1, biasKey, gtsam.imuBias.ConstantBias(), BIAS_COVARIANCE) + graph.add(factor) + initialEstimate.insert(biasKey, gtsam.imuBias.ConstantBias()) # Predict acceleration and gyro measurements in (actual) body frame nRb = scenario.rotation(t).matrix() @@ -152,21 +143,24 @@ def IMU_example(): # Add Imu Factor imufac = ImuFactor(X(i - 1), V(i - 1), X(i), V(i), biasKey, accum) - newgraph.add(imufac) + graph.add(imufac) # insert new velocity, which is wrong initialEstimate.insert(V(i), n_velocity) accum.resetIntegration() # Incremental solution - isam.update(newgraph, initialEstimate) + isam.update(graph, initialEstimate) result = isam.calculateEstimate() - ISAM2_plot(result) + plot.plot_incremental_trajectory(0, result, + start=i, scale=3, time_interval=0.01) # reset - newgraph = NonlinearFactorGraph() + graph = NonlinearFactorGraph() initialEstimate.clear() + plt.show() + if __name__ == '__main__': IMU_example() diff --git a/cython/gtsam/examples/OdometryExample.py b/python/gtsam/examples/OdometryExample.py similarity index 94% rename from cython/gtsam/examples/OdometryExample.py rename to python/gtsam/examples/OdometryExample.py index e778e3f85..8b519ce9a 100644 --- a/cython/gtsam/examples/OdometryExample.py +++ b/python/gtsam/examples/OdometryExample.py @@ -21,8 +21,8 @@ import gtsam.utils.plot as gtsam_plot # Create noise models -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/cython/gtsam/examples/PlanarManipulatorExample.py b/python/gtsam/examples/PlanarManipulatorExample.py similarity index 98% rename from cython/gtsam/examples/PlanarManipulatorExample.py rename to python/gtsam/examples/PlanarManipulatorExample.py index e42ae09d7..9af4f7fcc 100644 --- a/cython/gtsam/examples/PlanarManipulatorExample.py +++ b/python/gtsam/examples/PlanarManipulatorExample.py @@ -167,13 +167,11 @@ def plot(self, fignum, q): axes = fig.gca() sXl1 = Pose2(0, 0, math.radians(90)) - t = sXl1.translation() - p1 = np.array([t.x(), t.y()]) + p1 = sXl1.translation() gtsam_plot.plot_pose2_on_axes(axes, sXl1) def plot_line(p, g, color): - t = g.translation() - q = np.array([t.x(), t.y()]) + q = g.translation() line = np.append(p[np.newaxis], q[np.newaxis], axis=0) axes.plot(line[:, 0], line[:, 1], color) return q diff --git a/cython/gtsam/examples/PlanarSLAMExample.py b/python/gtsam/examples/PlanarSLAMExample.py similarity index 91% rename from cython/gtsam/examples/PlanarSLAMExample.py rename to python/gtsam/examples/PlanarSLAMExample.py index c84f0f834..5ffdf048d 100644 --- a/cython/gtsam/examples/PlanarSLAMExample.py +++ b/python/gtsam/examples/PlanarSLAMExample.py @@ -13,15 +13,15 @@ from __future__ import print_function -import gtsam import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X + +import gtsam +from gtsam.symbol_shorthand import X, L # Create noise models -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) -MEASUREMENT_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.2])) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) +MEASUREMENT_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.2])) # Create an empty nonlinear factor graph graph = gtsam.NonlinearFactorGraph() diff --git a/cython/gtsam/examples/Pose2SLAMExample.py b/python/gtsam/examples/Pose2SLAMExample.py similarity index 96% rename from cython/gtsam/examples/Pose2SLAMExample.py rename to python/gtsam/examples/Pose2SLAMExample.py index 680f2209f..2569f0953 100644 --- a/cython/gtsam/examples/Pose2SLAMExample.py +++ b/python/gtsam/examples/Pose2SLAMExample.py @@ -27,10 +27,9 @@ def vector3(x, y, z): """Create 3d double numpy array.""" return np.array([x, y, z], dtype=np.float) - # Create noise models -PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) -ODOMETRY_NOISE = gtsam.noiseModel_Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) +PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.3, 0.3, 0.1)) +ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(vector3(0.2, 0.2, 0.1)) # 1. Create a factor graph container and add factors to it graph = gtsam.NonlinearFactorGraph() diff --git a/cython/gtsam/examples/Pose2SLAMExample_g2o.py b/python/gtsam/examples/Pose2SLAMExample_g2o.py similarity index 96% rename from cython/gtsam/examples/Pose2SLAMExample_g2o.py rename to python/gtsam/examples/Pose2SLAMExample_g2o.py index 09114370d..b2ba9c5bc 100644 --- a/cython/gtsam/examples/Pose2SLAMExample_g2o.py +++ b/python/gtsam/examples/Pose2SLAMExample_g2o.py @@ -53,7 +53,7 @@ def vector3(x, y, z): assert args.kernel == "none", "Supplied kernel type is not yet implemented" # Add prior on the pose having index (key) = 0 -priorModel = gtsam.noiseModel_Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) +priorModel = gtsam.noiseModel.Diagonal.Variances(vector3(1e-6, 1e-6, 1e-8)) graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel)) params = gtsam.GaussNewtonParams() @@ -82,7 +82,7 @@ def vector3(x, y, z): print ("Done!") if args.plot: - resultPoses = gtsam.utilities_extractPose2(result) + resultPoses = gtsam.utilities.extractPose2(result) for i in range(resultPoses.shape[0]): plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :])) plt.show() diff --git a/cython/gtsam/examples/Pose3SLAMExample_g2o.py b/python/gtsam/examples/Pose3SLAMExample_g2o.py similarity index 93% rename from cython/gtsam/examples/Pose3SLAMExample_g2o.py rename to python/gtsam/examples/Pose3SLAMExample_g2o.py index 3c1a54f7b..82b3bda98 100644 --- a/cython/gtsam/examples/Pose3SLAMExample_g2o.py +++ b/python/gtsam/examples/Pose3SLAMExample_g2o.py @@ -39,11 +39,11 @@ def vector6(x, y, z, a, b, c): graph, initial = gtsam.readG2o(g2oFile, is3D) # Add Prior on the first key -priorModel = gtsam.noiseModel_Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, +priorModel = gtsam.noiseModel.Diagonal.Variances(vector6(1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4)) print("Adding prior to g2o file ") -firstKey = initial.keys().at(0) +firstKey = initial.keys()[0] graph.add(gtsam.PriorFactorPose3(firstKey, gtsam.Pose3(), priorModel)) params = gtsam.GaussNewtonParams() @@ -65,7 +65,7 @@ def vector6(x, y, z, a, b, c): print ("Done!") if args.plot: - resultPoses = gtsam.utilities_allPose3s(result) + resultPoses = gtsam.utilities.allPose3s(result) for i in range(resultPoses.size()): plot.plot_pose3(1, resultPoses.atPose3(i)) plt.show() diff --git a/cython/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py b/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py similarity index 91% rename from cython/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py rename to python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py index 02c696905..2b2c5f991 100644 --- a/cython/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py +++ b/python/gtsam/examples/Pose3SLAMExample_initializePose3Chordal.py @@ -24,9 +24,9 @@ graph, initial = gtsam.readG2o(g2oFile, is3D) # Add prior on the first key. TODO: assumes first key ios z -priorModel = gtsam.noiseModel_Diagonal.Variances( +priorModel = gtsam.noiseModel.Diagonal.Variances( np.array([1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4])) -firstKey = initial.keys().at(0) +firstKey = initial.keys()[0] graph.add(gtsam.PriorFactorPose3(0, gtsam.Pose3(), priorModel)) # Initializing Pose3 - chordal relaxation" diff --git a/cython/gtsam/examples/PreintegrationExample.py b/python/gtsam/examples/PreintegrationExample.py similarity index 77% rename from cython/gtsam/examples/PreintegrationExample.py rename to python/gtsam/examples/PreintegrationExample.py index 76520b355..b54919bec 100644 --- a/cython/gtsam/examples/PreintegrationExample.py +++ b/python/gtsam/examples/PreintegrationExample.py @@ -10,12 +10,11 @@ import math +import gtsam import matplotlib.pyplot as plt import numpy as np -from mpl_toolkits.mplot3d import Axes3D - -import gtsam from gtsam.utils.plot import plot_pose3 +from mpl_toolkits.mplot3d import Axes3D IMU_FIG = 1 POSES_FIG = 2 @@ -68,60 +67,62 @@ def __init__(self, twist=None, bias=None, dt=1e-2): else: accBias = np.array([0, 0.1, 0]) gyroBias = np.array([0, 0, 0]) - self.actualBias = gtsam.imuBias_ConstantBias(accBias, gyroBias) + self.actualBias = gtsam.imuBias.ConstantBias(accBias, gyroBias) self.runner = gtsam.ScenarioRunner( self.scenario, self.params, self.dt, self.actualBias) + fig, self.axes = plt.subplots(4, 3) + fig.set_tight_layout(True) + def plotImu(self, t, measuredOmega, measuredAcc): plt.figure(IMU_FIG) # plot angular velocity omega_b = self.scenario.omega_b(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 1) - plt.scatter(t, omega_b[i], color='k', marker='.') - plt.scatter(t, measuredOmega[i], color=color, marker='.') - plt.xlabel('angular velocity ' + label) + ax = self.axes[0][i] + ax.scatter(t, omega_b[i], color='k', marker='.') + ax.scatter(t, measuredOmega[i], color=color, marker='.') + ax.set_xlabel('angular velocity ' + label) # plot acceleration in nav acceleration_n = self.scenario.acceleration_n(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 4) - plt.scatter(t, acceleration_n[i], color=color, marker='.') - plt.xlabel('acceleration in nav ' + label) + ax = self.axes[1][i] + ax.scatter(t, acceleration_n[i], color=color, marker='.') + ax.set_xlabel('acceleration in nav ' + label) # plot acceleration in body acceleration_b = self.scenario.acceleration_b(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 7) - plt.scatter(t, acceleration_b[i], color=color, marker='.') - plt.xlabel('acceleration in body ' + label) + ax = self.axes[2][i] + ax.scatter(t, acceleration_b[i], color=color, marker='.') + ax.set_xlabel('acceleration in body ' + label) # plot actual specific force, as well as corrupted actual = self.runner.actualSpecificForce(t) for i, (label, color) in enumerate(zip(self.labels, self.colors)): - plt.subplot(4, 3, i + 10) - plt.scatter(t, actual[i], color='k', marker='.') - plt.scatter(t, measuredAcc[i], color=color, marker='.') - plt.xlabel('specific force ' + label) + ax = self.axes[3][i] + ax.scatter(t, actual[i], color='k', marker='.') + ax.scatter(t, measuredAcc[i], color=color, marker='.') + ax.set_xlabel('specific force ' + label) - def plotGroundTruthPose(self, t): + def plotGroundTruthPose(self, t, scale=0.3, time_interval=0.01): # plot ground truth pose, as well as prediction from integrated IMU measurements actualPose = self.scenario.pose(t) - plot_pose3(POSES_FIG, actualPose, 0.3) + plot_pose3(POSES_FIG, actualPose, scale) t = actualPose.translation() - self.maxDim = max([abs(t.x()), abs(t.y()), abs(t.z()), self.maxDim]) + self.maxDim = max([max(np.abs(t)), self.maxDim]) ax = plt.gca() ax.set_xlim3d(-self.maxDim, self.maxDim) ax.set_ylim3d(-self.maxDim, self.maxDim) ax.set_zlim3d(-self.maxDim, self.maxDim) - plt.pause(0.01) + plt.pause(time_interval) - def run(self): + def run(self, T=12): # simulate the loop - T = 12 for i, t in enumerate(np.arange(0, T, self.dt)): measuredOmega = self.runner.measuredAngularVelocity(t) measuredAcc = self.runner.measuredSpecificForce(t) diff --git a/cython/gtsam/examples/README.md b/python/gtsam/examples/README.md similarity index 55% rename from cython/gtsam/examples/README.md rename to python/gtsam/examples/README.md index 99bce00e2..2a2c9f2ef 100644 --- a/cython/gtsam/examples/README.md +++ b/python/gtsam/examples/README.md @@ -1,57 +1,64 @@ -These examples are almost identical to the old handwritten python wrapper -examples. However, there are just some slight name changes, for example -`noiseModel.Diagonal` becomes `noiseModel_Diagonal` etc... -Also, instead of `gtsam.Symbol('b', 0)` we can simply say `gtsam.symbol_shorthand_B(0)` or `B(0)` if we use python aliasing. - # Porting Progress | C++ Example Name | Ported | |-------------------------------------------------------|--------| | CameraResectioning | | +| CombinedImuFactorsExample | | | CreateSFMExampleData | | -| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through cython | -| easyPoint2KalmanFilter | ExtendedKalmanFilter not exposed through cython | -| elaboratePoint2KalmanFilter | GaussianSequentialSolver not exposed through cython | -| ImuFactorExample2 | X | +| DiscreteBayesNetExample | | +| DiscreteBayesNet_FG | none of the required discrete functionality is exposed through Python | +| easyPoint2KalmanFilter | ExtendedKalmanFilter not yet exposed through Python | +| elaboratePoint2KalmanFilter | GaussianSequentialSolver not yet exposed through Python | +| FisheyeExample | | +| HMMExample | | +| ImuFactorsExample2 | :heavy_check_mark: | | ImuFactorsExample | | +| IMUKittiExampleGPS | | +| InverseKinematicsExampleExpressions.cpp | | | ISAM2Example_SmartFactor | | | ISAM2_SmartFactorStereo_IMU | | | LocalizationExample | impossible? | | METISOrderingExample | | -| OdometryExample | X | -| PlanarSLAMExample | X | -| Pose2SLAMExample | X | +| OdometryExample | :heavy_check_mark: | +| PlanarSLAMExample | :heavy_check_mark: | +| Pose2SLAMExample | :heavy_check_mark: | | Pose2SLAMExampleExpressions | | -| Pose2SLAMExample_g2o | X | +| Pose2SLAMExample_g2o | :heavy_check_mark: | | Pose2SLAMExample_graph | | | Pose2SLAMExample_graphviz | | -| Pose2SLAMExample_lago | lago not exposed through cython | +| Pose2SLAMExample_lago | lago not yet exposed through Python | | Pose2SLAMStressTest | | | Pose2SLAMwSPCG | | +| Pose3Localization | | | Pose3SLAMExample_changeKeys | | | Pose3SLAMExampleExpressions_BearingRangeWithTransform | | -| Pose3SLAMExample_g2o | X | -| Pose3SLAMExample_initializePose3Chordal | | +| Pose3SLAMExample_g2o | :heavy_check_mark: | +| Pose3SLAMExample_initializePose3Chordal | :heavy_check_mark: | | Pose3SLAMExample_initializePose3Gradient | | | RangeISAMExample_plaza2 | | | SelfCalibrationExample | | +| SFMdata | | | SFMExample_bal_COLAMD_METIS | | -| SFMExample_bal | | -| SFMExample | X | +| SFMExample_bal | :heavy_check_mark: | +| SFMExample | :heavy_check_mark: | | SFMExampleExpressions_bal | | | SFMExampleExpressions | | | SFMExample_SmartFactor | | | SFMExample_SmartFactorPCG | | -| SimpleRotation | X | +| ShonanAveragingCLI | :heavy_check_mark: | +| SimpleRotation | :heavy_check_mark: | | SolverComparer | | | StereoVOExample | | | StereoVOExample_large | | | TimeTBB | | -| UGM_chain | discrete functionality not exposed | -| UGM_small | discrete functionality not exposed | -| VisualISAM2Example | X | -| VisualISAMExample | X | +| UGM_chain | discrete functionality not yet exposed | +| UGM_small | discrete functionality not yet exposed | +| VisualISAM2Example | :heavy_check_mark: | +| VisualISAMExample | :heavy_check_mark: | Extra Examples (with no C++ equivalent) +- DogLegOptimizerExample +- GPSFactorExample - PlanarManipulatorExample +- PreintegrationExample - SFMData diff --git a/cython/gtsam/examples/SFMExample.py b/python/gtsam/examples/SFMExample.py similarity index 91% rename from cython/gtsam/examples/SFMExample.py rename to python/gtsam/examples/SFMExample.py index e02def2f9..f0c4c82ba 100644 --- a/cython/gtsam/examples/SFMExample.py +++ b/python/gtsam/examples/SFMExample.py @@ -13,14 +13,15 @@ import gtsam import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X +from gtsam import symbol_shorthand +L = symbol_shorthand.L +X = symbol_shorthand.X + from gtsam.examples import SFMdata -from gtsam.gtsam import (Cal3_S2, DoglegOptimizer, +from gtsam import (Cal3_S2, DoglegOptimizer, GenericProjectionFactorCal3_S2, Marginals, NonlinearFactorGraph, PinholeCameraCal3_S2, Point3, - Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, - SimpleCamera, Values) + Pose3, PriorFactorPoint3, PriorFactorPose3, Rot3, Values) from gtsam.utils import plot @@ -56,7 +57,7 @@ def main(): K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - measurement_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v + measurement_noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() @@ -69,7 +70,7 @@ def main(): # Add a prior on pose x1. This indirectly specifies where the origin is. # 0.3 rad std on roll,pitch,yaw and 0.1m on x,y,z - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) @@ -85,7 +86,7 @@ def main(): # Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained # Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance # between the first camera and the first landmark. All other landmark positions are interpreted using this scale. - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) graph.print_('Factor Graph:\n') @@ -97,7 +98,7 @@ def main(): transformed_pose = pose.retract(0.1*np.random.randn(6,1)) initial_estimate.insert(X(i), transformed_pose) for j, point in enumerate(points): - transformed_point = Point3(point.vector() + 0.1*np.random.randn(3)) + transformed_point = point + 0.1*np.random.randn(3) initial_estimate.insert(L(j), transformed_point) initial_estimate.print_('Initial Estimates:\n') diff --git a/python/gtsam/examples/SFMExample_bal.py b/python/gtsam/examples/SFMExample_bal.py new file mode 100644 index 000000000..dfe8b523c --- /dev/null +++ b/python/gtsam/examples/SFMExample_bal.py @@ -0,0 +1,118 @@ +""" + GTSAM Copyright 2010, Georgia Tech Research Corporation, + Atlanta, Georgia 30332-0415 + All Rights Reserved + Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + See LICENSE for the license information + + Solve a structure-from-motion problem from a "Bundle Adjustment in the Large" file + Author: Frank Dellaert (Python: Akshay Krishnan, John Lambert) +""" + +import argparse +import logging +import sys + +import matplotlib.pyplot as plt +import numpy as np + +import gtsam +from gtsam import ( + GeneralSFMFactorCal3Bundler, + PinholeCameraCal3Bundler, + PriorFactorPinholeCameraCal3Bundler, + readBal, + symbol_shorthand +) + +C = symbol_shorthand.C +P = symbol_shorthand.P + + +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) + +def run(args): + """ Run LM optimization with BAL input data and report resulting error """ + input_file = gtsam.findExampleDataFile(args.input_file) + + # Load the SfM data from file + scene_data = readBal(input_file) + logging.info(f"read {scene_data.number_tracks()} tracks on {scene_data.number_cameras()} cameras\n") + + # Create a factor graph + graph = gtsam.NonlinearFactorGraph() + + # We share *one* noiseModel between all projection factors + noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v + + # Add measurements to the factor graph + j = 0 + for t_idx in range(scene_data.number_tracks()): + track = scene_data.track(t_idx) # SfmTrack + # retrieve the SfmMeasurement objects + for m_idx in range(track.number_measurements()): + # i represents the camera index, and uv is the 2d measurement + i, uv = track.measurement(m_idx) + # note use of shorthand symbols C and P + graph.add(GeneralSFMFactorCal3Bundler(uv, noise, C(i), P(j))) + j += 1 + + # Add a prior on pose x1. This indirectly specifies where the origin is. + graph.push_back( + gtsam.PriorFactorPinholeCameraCal3Bundler( + C(0), scene_data.camera(0), gtsam.noiseModel.Isotropic.Sigma(9, 0.1) + ) + ) + # Also add a prior on the position of the first landmark to fix the scale + graph.push_back( + gtsam.PriorFactorPoint3( + P(0), scene_data.track(0).point3(), gtsam.noiseModel.Isotropic.Sigma(3, 0.1) + ) + ) + + # Create initial estimate + initial = gtsam.Values() + + i = 0 + # add each PinholeCameraCal3Bundler + for cam_idx in range(scene_data.number_cameras()): + camera = scene_data.camera(cam_idx) + initial.insert(C(i), camera) + i += 1 + + j = 0 + # add each SfmTrack + for t_idx in range(scene_data.number_tracks()): + track = scene_data.track(t_idx) + initial.insert(P(j), track.point3()) + j += 1 + + # Optimize the graph and print results + try: + params = gtsam.LevenbergMarquardtParams() + params.setVerbosityLM("ERROR") + lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) + result = lm.optimize() + except Exception as e: + logging.exception("LM Optimization failed") + return + # Error drops from ~2764.22 to ~0.046 + logging.info(f"final error: {graph.error(result)}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + '-i', + '--input_file', + type=str, + default="dubrovnik-3-7-pre", + help='Read SFM data from the specified BAL file' + 'The data format is described here: https://grail.cs.washington.edu/projects/bal/.' + 'BAL files contain (nrPoses, nrPoints, nrObservations), followed by (i,j,u,v) tuples, ' + 'then (wx,wy,wz,tx,ty,tz,f,k1,k1) as Bundler camera calibrations w/ Rodrigues vector' + 'and (x,y,z) 3d point initializations.' + ) + run(parser.parse_args()) + diff --git a/cython/gtsam/examples/SFMdata.py b/python/gtsam/examples/SFMdata.py similarity index 92% rename from cython/gtsam/examples/SFMdata.py rename to python/gtsam/examples/SFMdata.py index c586f7e52..6ac9c5726 100644 --- a/cython/gtsam/examples/SFMdata.py +++ b/python/gtsam/examples/SFMdata.py @@ -33,7 +33,8 @@ def createPoses(K): poses = [] for theta in angles: position = gtsam.Point3(radius*np.cos(theta), - radius*np.sin(theta), height) + radius*np.sin(theta), + height) camera = gtsam.PinholeCameraCal3_S2.Lookat(position, target, up, K) poses.append(camera.pose()) return poses diff --git a/python/gtsam/examples/ShonanAveragingCLI.py b/python/gtsam/examples/ShonanAveragingCLI.py new file mode 100644 index 000000000..7f8839cc0 --- /dev/null +++ b/python/gtsam/examples/ShonanAveragingCLI.py @@ -0,0 +1,125 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +Shonan Rotation Averaging CLI reads a *pose* graph, extracts the +rotation constraints, and runs the Shonan algorithm. + +Author: Frank Dellaert +Date: August 2020 +""" +# pylint: disable=invalid-name, E1101 + +import argparse + +import matplotlib.pyplot as plt +import numpy as np + +import gtsam +from gtsam.utils import plot + +def estimate_poses_given_rot(factors: gtsam.BetweenFactorPose3s, + rotations: gtsam.Values, + d: int = 3): + """ Estimate Poses from measurements, given rotations. From SfmProblem in shonan. + + Arguments: + factors -- data structure with many BetweenFactorPose3 factors + rotations {Values} -- Estimated rotations + + Returns: + Values -- Estimated Poses + """ + + I_d = np.eye(d) + + def R(j): + return rotations.atRot3(j) if d == 3 else rotations.atRot2(j) + + def pose(R, t): + return gtsam.Pose3(R, t) if d == 3 else gtsam.Pose2(R, t) + + graph = gtsam.GaussianFactorGraph() + model = gtsam.noiseModel.Unit.Create(d) + + # Add a factor anchoring t_0 + graph.add(0, I_d, np.zeros((d,)), model) + + # Add a factor saying t_j - t_i = Ri*t_ij for all edges (i,j) + for factor in factors: + keys = factor.keys() + i, j, Tij = keys[0], keys[1], factor.measured() + measured = R(i).rotate(Tij.translation()) + graph.add(j, I_d, i, -I_d, measured, model) + + # Solve linear system + translations = graph.optimize() + + # Convert to Values. + result = gtsam.Values() + for j in range(rotations.size()): + tj = translations.at(j) + result.insert(j, pose(R(j), tj)) + + return result + + +def run(args): + """Run Shonan averaging and then recover translations linearly before saving result.""" + + # Get input file + if args.input_file: + input_file = args.input_file + else: + if args.named_dataset == "": + raise ValueError( + "You must either specify a named dataset or an input file") + input_file = gtsam.findExampleDataFile(args.named_dataset) + + if args.dimension == 2: + print("Running Shonan averaging for SO(2) on ", input_file) + shonan = gtsam.ShonanAveraging2(input_file) + if shonan.nrUnknowns() == 0: + raise ValueError("No 2D pose constraints found, try -d 3.") + initial = shonan.initializeRandomly() + rotations, _ = shonan.run(initial, 2, 10) + factors = gtsam.parse2DFactors(input_file) + elif args.dimension == 3: + print("Running Shonan averaging for SO(3) on ", input_file) + shonan = gtsam.ShonanAveraging3(input_file) + if shonan.nrUnknowns() == 0: + raise ValueError("No 3D pose constraints found, try -d 2.") + initial = shonan.initializeRandomly() + rotations, _ = shonan.run(initial, 3, 10) + factors = gtsam.parse3DFactors(input_file) + else: + raise ValueError("Can only run SO(2) or SO(3) averaging") + + print("Recovering translations") + poses = estimate_poses_given_rot(factors, rotations, args.dimension) + + print("Writing result to ", args.output_file) + gtsam.writeG2o(gtsam.NonlinearFactorGraph(), poses, args.output_file) + + plot.plot_trajectory(1, poses, scale=0.2) + plot.set_axes_equal(1) + plt.show() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('-n', '--named_dataset', type=str, default="pose3example-grid", + help='Find and read frome example dataset file') + parser.add_argument('-i', '--input_file', type=str, default="", + help='Read pose constraints graph from the specified file') + parser.add_argument('-o', '--output_file', type=str, default="shonan.g2o", + help='Write solution to the specified file') + parser.add_argument('-d', '--dimension', type=int, default=3, + help='Optimize over 2D or 3D rotations') + parser.add_argument("-p", "--plot", action="store_true", default=True, + help="Plot result") + run(parser.parse_args()) diff --git a/cython/gtsam/examples/SimpleRotation.py b/python/gtsam/examples/SimpleRotation.py similarity index 96% rename from cython/gtsam/examples/SimpleRotation.py rename to python/gtsam/examples/SimpleRotation.py index 4e82d3778..0fef261f8 100644 --- a/cython/gtsam/examples/SimpleRotation.py +++ b/python/gtsam/examples/SimpleRotation.py @@ -10,10 +10,9 @@ a single variable with a single factor. """ -import gtsam import numpy as np -from gtsam import symbol_shorthand_X as X - +import gtsam +from gtsam.symbol_shorthand import X def main(): """ @@ -33,7 +32,7 @@ def main(): """ prior = gtsam.Rot2.fromAngle(np.deg2rad(30)) prior.print_('goal angle') - model = gtsam.noiseModel_Isotropic.Sigma(dim=1, sigma=np.deg2rad(1)) + model = gtsam.noiseModel.Isotropic.Sigma(dim=1, sigma=np.deg2rad(1)) key = X(1) factor = gtsam.PriorFactorRot2(key, prior, model) diff --git a/python/gtsam/examples/TranslationAveragingExample.py b/python/gtsam/examples/TranslationAveragingExample.py new file mode 100644 index 000000000..054b61126 --- /dev/null +++ b/python/gtsam/examples/TranslationAveragingExample.py @@ -0,0 +1,144 @@ +""" +GTSAM Copyright 2010-2018, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved +Authors: Frank Dellaert, et al. (see THANKS for the full author list) + +See LICENSE for the license information + +This example shows how 1dsfm uses outlier rejection (MFAS) and optimization (translation recovery) +together for estimating global translations from relative translation directions and global rotations. +The purpose of this example is to illustrate the connection between these two classes using a small SfM dataset. + +Author: Akshay Krishnan +Date: September 2020 +""" + +from collections import defaultdict +from typing import List, Tuple + +import numpy as np + +import gtsam +from gtsam.examples import SFMdata + +# Hyperparameters for 1dsfm, values used from Kyle Wilson's code. +MAX_1DSFM_PROJECTION_DIRECTIONS = 48 +OUTLIER_WEIGHT_THRESHOLD = 0.1 + + +def get_data() -> Tuple[gtsam.Values, List[gtsam.BinaryMeasurementUnit3]]: + """"Returns global rotations and unit translation directions between 8 cameras + that lie on a circle and face the center. The poses of 8 cameras are obtained from SFMdata + and the unit translations directions between some camera pairs are computed from their + global translations. """ + fx, fy, s, u0, v0 = 50.0, 50.0, 0.0, 50.0, 50.0 + wTc_list = SFMdata.createPoses(gtsam.Cal3_S2(fx, fy, s, u0, v0)) + # Rotations of the cameras in the world frame. + wRc_values = gtsam.Values() + # Normalized translation directions from camera i to camera j + # in the coordinate frame of camera i. + i_iZj_list = [] + for i in range(0, len(wTc_list) - 2): + # Add the rotation. + wRi = wTc_list[i].rotation() + wRc_values.insert(i, wRi) + # Create unit translation measurements with next two poses. + for j in range(i + 1, i + 3): + # Compute the translation from pose i to pose j, in the world reference frame. + w_itj = wTc_list[j].translation() - wTc_list[i].translation() + # Obtain the translation in the camera i's reference frame. + i_itj = wRi.unrotate(w_itj) + # Compute the normalized unit translation direction. + i_iZj = gtsam.Unit3(i_itj) + i_iZj_list.append(gtsam.BinaryMeasurementUnit3( + i, j, i_iZj, gtsam.noiseModel.Isotropic.Sigma(3, 0.01))) + # Add the last two rotations. + wRc_values.insert(len(wTc_list) - 1, wTc_list[-1].rotation()) + wRc_values.insert(len(wTc_list) - 2, wTc_list[-2].rotation()) + return wRc_values, i_iZj_list + + +def filter_outliers(w_iZj_list: gtsam.BinaryMeasurementsUnit3) -> gtsam.BinaryMeasurementsUnit3: + """Removes outliers from a list of Unit3 measurements that are the + translation directions from camera i to camera j in the world frame.""" + + # Indices of measurements that are to be used as projection directions. + # These are randomly chosen. All sampled directions must be unique. + num_directions_to_sample = min( + MAX_1DSFM_PROJECTION_DIRECTIONS, len(w_iZj_list)) + sampled_indices = np.random.choice( + len(w_iZj_list), num_directions_to_sample, replace=False) + + # Sample projection directions from the measurements. + projection_directions = [w_iZj_list[idx].measured() + for idx in sampled_indices] + + outlier_weights = [] + # Find the outlier weights for each direction using MFAS. + for direction in projection_directions: + algorithm = gtsam.MFAS(w_iZj_list, direction) + outlier_weights.append(algorithm.computeOutlierWeights()) + + # Compute average of outlier weights. Each outlier weight is a map from a pair of Keys + # (camera IDs) to a weight, where weights are proportional to the probability of the edge + # being an outlier. + avg_outlier_weights = defaultdict(float) + for outlier_weight_dict in outlier_weights: + for keypair, weight in outlier_weight_dict.items(): + avg_outlier_weights[keypair] += weight / len(outlier_weights) + + # Remove w_iZj that have weight greater than threshold, these are outliers. + w_iZj_inliers = gtsam.BinaryMeasurementsUnit3() + [w_iZj_inliers.append(w_iZj) for w_iZj in w_iZj_list if avg_outlier_weights[( + w_iZj.key1(), w_iZj.key2())] < OUTLIER_WEIGHT_THRESHOLD] + + return w_iZj_inliers + + +def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3, + wRc_values: gtsam.Values) -> gtsam.Values: + """Estimate poses given rotations and normalized translation directions between cameras. + + Args: + i_iZj_list: List of normalized translation direction measurements between camera pairs, + Z here refers to measurements. The measurements are of camera j with reference + to camera i (iZj), in camera i's coordinate frame (i_). iZj represents a unit + vector to j in i's frame and is not a transformation. + wRc_values: Rotations of the cameras in the world frame. + + Returns: + gtsam.Values: Estimated poses. + """ + + # Convert the translation direction measurements to world frame using the rotations. + w_iZj_list = gtsam.BinaryMeasurementsUnit3() + for i_iZj in i_iZj_list: + w_iZj = gtsam.Unit3(wRc_values.atRot3(i_iZj.key1()) + .rotate(i_iZj.measured().point3())) + w_iZj_list.append(gtsam.BinaryMeasurementUnit3( + i_iZj.key1(), i_iZj.key2(), w_iZj, i_iZj.noiseModel())) + + # Remove the outliers in the unit translation directions. + w_iZj_inliers = filter_outliers(w_iZj_list) + + # Run the optimizer to obtain translations for normalized directions. + wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run() + + wTc_values = gtsam.Values() + for key in wRc_values.keys(): + wTc_values.insert(key, gtsam.Pose3( + wRc_values.atRot3(key), wtc_values.atPoint3(key))) + return wTc_values + + +def main(): + wRc_values, i_iZj_list = get_data() + wTc_values = estimate_poses(i_iZj_list, wRc_values) + print("**** Translation averaging output ****") + print(wTc_values) + print("**************************************") + + +if __name__ == '__main__': + main() diff --git a/cython/gtsam/examples/VisualISAM2Example.py b/python/gtsam/examples/VisualISAM2Example.py similarity index 94% rename from cython/gtsam/examples/VisualISAM2Example.py rename to python/gtsam/examples/VisualISAM2Example.py index 49e6ca95c..bacf510ec 100644 --- a/cython/gtsam/examples/VisualISAM2Example.py +++ b/python/gtsam/examples/VisualISAM2Example.py @@ -17,8 +17,7 @@ import gtsam.utils.plot as gtsam_plot import matplotlib.pyplot as plt import numpy as np -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X +from gtsam.symbol_shorthand import L, X from gtsam.examples import SFMdata from mpl_toolkits.mplot3d import Axes3D # pylint: disable=W0611 @@ -64,7 +63,7 @@ def visual_ISAM2_example(): K = gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - measurement_noise = gtsam.noiseModel_Isotropic.Sigma( + measurement_noise = gtsam.noiseModel.Isotropic.Sigma( 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks @@ -110,12 +109,12 @@ def visual_ISAM2_example(): # at least twice before adding it to iSAM. if i == 0: # Add a prior on pose x0 - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array( + pose_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array( [0.1, 0.1, 0.1, 0.3, 0.3, 0.3])) # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw graph.push_back(gtsam.PriorFactorPose3(X(0), poses[0], pose_noise)) # Add a prior on landmark l0 - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) graph.push_back(gtsam.PriorFactorPoint3( L(0), points[0], point_noise)) # add directly to graph @@ -123,7 +122,7 @@ def visual_ISAM2_example(): # Intentionally initialize the variables off from the ground truth for j, point in enumerate(points): initial_estimate.insert(L(j), gtsam.Point3( - point.x()-0.25, point.y()+0.20, point.z()+0.15)) + point[0]-0.25, point[1]+0.20, point[2]+0.15)) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) diff --git a/cython/gtsam/examples/VisualISAMExample.py b/python/gtsam/examples/VisualISAMExample.py similarity index 77% rename from cython/gtsam/examples/VisualISAMExample.py rename to python/gtsam/examples/VisualISAMExample.py index 5cc37867b..f99d3f3e6 100644 --- a/cython/gtsam/examples/VisualISAMExample.py +++ b/python/gtsam/examples/VisualISAMExample.py @@ -15,13 +15,11 @@ import numpy as np import gtsam from gtsam.examples import SFMdata -from gtsam.gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, - NonlinearFactorGraph, NonlinearISAM, Point3, Pose3, - PriorFactorPoint3, PriorFactorPose3, Rot3, - PinholeCameraCal3_S2, Values) -from gtsam import symbol_shorthand_L as L -from gtsam import symbol_shorthand_X as X - +from gtsam import (Cal3_S2, GenericProjectionFactorCal3_S2, + NonlinearFactorGraph, NonlinearISAM, Pose3, + PriorFactorPoint3, PriorFactorPose3, Rot3, + PinholeCameraCal3_S2, Values, Point3) +from gtsam.symbol_shorthand import X, L def main(): """ @@ -34,7 +32,8 @@ def main(): K = Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0) # Define the camera observation noise model - camera_noise = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) # one pixel in u and v + camera_noise = gtsam.noiseModel.Isotropic.Sigma( + 2, 1.0) # one pixel in u and v # Create the set of ground-truth landmarks points = SFMdata.createPoints() @@ -55,11 +54,13 @@ def main(): # Add factors for each landmark observation for j, point in enumerate(points): measurement = camera.project(point) - factor = GenericProjectionFactorCal3_S2(measurement, camera_noise, X(i), L(j), K) + factor = GenericProjectionFactorCal3_S2( + measurement, camera_noise, X(i), L(j), K) graph.push_back(factor) # Intentionally initialize the variables off from the ground truth - noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), t=Point3(0.05, -0.10, 0.20)) + noise = Pose3(r=Rot3.Rodrigues(-0.1, 0.2, 0.25), + t=Point3(0.05, -0.10, 0.20)) initial_xi = pose.compose(noise) # Add an initial guess for the current pose @@ -71,12 +72,13 @@ def main(): # adding it to iSAM. if i == 0: # Add a prior on pose x0, with 0.3 rad std on roll,pitch,yaw and 0.1m x,y,z - pose_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) + pose_noise = gtsam.noiseModel.Diagonal.Sigmas( + np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) factor = PriorFactorPose3(X(0), poses[0], pose_noise) graph.push_back(factor) # Add a prior on landmark l0 - point_noise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) + point_noise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) factor = PriorFactorPoint3(L(0), points[0], point_noise) graph.push_back(factor) @@ -84,8 +86,8 @@ def main(): noise = np.array([-0.25, 0.20, 0.15]) for j, point in enumerate(points): # Intentionally initialize the variables off from the ground truth - initial_lj = points[j].vector() + noise - initial_estimate.insert(L(j), Point3(initial_lj)) + initial_lj = points[j] + noise + initial_estimate.insert(L(j), initial_lj) else: # Update iSAM with the new factors isam.update(graph, initial_estimate) diff --git a/cython/gtsam/examples/__init__.py b/python/gtsam/examples/__init__.py similarity index 100% rename from cython/gtsam/examples/__init__.py rename to python/gtsam/examples/__init__.py diff --git a/python/gtsam/gtsam.tpl b/python/gtsam/gtsam.tpl new file mode 100644 index 000000000..634a81e90 --- /dev/null +++ b/python/gtsam/gtsam.tpl @@ -0,0 +1,48 @@ +/** + * @file gtsam.cpp + * @brief The auto-generated wrapper C++ source code. + * @author Duy-Nguyen Ta, Fan Jiang, Matthew Sklar + * @date Aug. 18, 2020 + * + * ** THIS FILE IS AUTO-GENERATED, DO NOT MODIFY! ** + */ + +// Include relevant boost libraries required by GTSAM +{include_boost} + +#include +#include +#include +#include "gtsam/config.h" +#include "gtsam/base/serialization.h" +#include "gtsam/nonlinear/utilities.h" // for RedirectCout. + +// These are the included headers listed in `gtsam.i` +{includes} +#include + +// Export classes for serialization +{boost_class_export} + +// Holder type for pybind11 +{hoder_type} + +// Preamble for STL classes +// TODO(fan): make this automatic +#include "python/gtsam/preamble.h" + +using namespace std; + +namespace py = pybind11; + +PYBIND11_MODULE({module_name}, m_) {{ + m_.doc() = "pybind11 wrapper of {module_name}"; + +{wrapped_namespace} + +// Specializations for STL classes +// TODO(fan): make this automatic +#include "python/gtsam/specializations.h" + +}} + diff --git a/python/gtsam/imuBias.py b/python/gtsam/imuBias.py new file mode 100644 index 000000000..399cefb98 --- /dev/null +++ b/python/gtsam/imuBias.py @@ -0,0 +1,4 @@ +# This trick is to allow direct import of sub-modules +# without this, we can only do `from gtsam.gtsam.imuBias import X` +# with this trick, we can do `from gtsam.imuBias import X` +from .gtsam.imuBias import * diff --git a/python/gtsam/noiseModel.py b/python/gtsam/noiseModel.py new file mode 100644 index 000000000..6e1b43488 --- /dev/null +++ b/python/gtsam/noiseModel.py @@ -0,0 +1,4 @@ +# This trick is to allow direct import of sub-modules +# without this, we can only do `from gtsam.gtsam.noiseModel import X` +# with this trick, we can do `from gtsam.noiseModel import X` +from .gtsam.noiseModel import * \ No newline at end of file diff --git a/python/gtsam/preamble.h b/python/gtsam/preamble.h new file mode 100644 index 000000000..b56766c72 --- /dev/null +++ b/python/gtsam/preamble.h @@ -0,0 +1,14 @@ +// Please refer to: https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html +// These are required to save one copy operation on Python calls +#ifdef GTSAM_ALLOCATOR_TBB +PYBIND11_MAKE_OPAQUE(std::vector>); +#else +PYBIND11_MAKE_OPAQUE(std::vector); +#endif +PYBIND11_MAKE_OPAQUE(std::vector >); +PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(std::vector > >); +PYBIND11_MAKE_OPAQUE(std::vector > >); +PYBIND11_MAKE_OPAQUE(std::vector); +PYBIND11_MAKE_OPAQUE(gtsam::CameraSet >); +PYBIND11_MAKE_OPAQUE(gtsam::CameraSet >); diff --git a/python/gtsam/specializations.h b/python/gtsam/specializations.h new file mode 100644 index 000000000..431697aac --- /dev/null +++ b/python/gtsam/specializations.h @@ -0,0 +1,17 @@ +// Please refer to: https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html +// These are required to save one copy operation on Python calls +#ifdef GTSAM_ALLOCATOR_TBB +py::bind_vector > >(m_, "KeyVector"); +#else +py::bind_vector >(m_, "KeyVector"); +#endif +py::bind_vector > >(m_, "Point2Vector"); +py::bind_vector >(m_, "Pose3Vector"); +py::bind_vector > > >(m_, "BetweenFactorPose3s"); +py::bind_vector > > >(m_, "BetweenFactorPose2s"); +py::bind_vector > >(m_, "BinaryMeasurementsUnit3"); +py::bind_map(m_, "IndexPairSetMap"); +py::bind_vector(m_, "IndexPairVector"); +py::bind_map(m_, "KeyPairDoubleMap"); +py::bind_vector > >(m_, "CameraSetCal3_S2"); +py::bind_vector > >(m_, "CameraSetCal3Bundler"); diff --git a/python/gtsam/symbol_shorthand.py b/python/gtsam/symbol_shorthand.py new file mode 100644 index 000000000..748d36558 --- /dev/null +++ b/python/gtsam/symbol_shorthand.py @@ -0,0 +1,4 @@ +# This trick is to allow direct import of sub-modules +# without this, we can only do `from gtsam.gtsam.symbol_shorthand import X` +# with this trick, we can do `from gtsam.symbol_shorthand import X` +from .gtsam.symbol_shorthand import * \ No newline at end of file diff --git a/cython/gtsam/tests/testScenarioRunner.py b/python/gtsam/tests/testScenarioRunner.py similarity index 96% rename from cython/gtsam/tests/testScenarioRunner.py rename to python/gtsam/tests/testScenarioRunner.py index 97a97b0ec..2af16a794 100644 --- a/cython/gtsam/tests/testScenarioRunner.py +++ b/python/gtsam/tests/testScenarioRunner.py @@ -32,7 +32,7 @@ def test_loop_runner(self): dt = 0.1 params = gtsam.PreintegrationParams.MakeSharedU(self.g) - bias = gtsam.imuBias_ConstantBias() + bias = gtsam.imuBias.ConstantBias() runner = gtsam.ScenarioRunner( scenario, params, dt, bias) diff --git a/cython/gtsam/tests/test_Cal3Unified.py b/python/gtsam/tests/test_Cal3Unified.py similarity index 100% rename from cython/gtsam/tests/test_Cal3Unified.py rename to python/gtsam/tests/test_Cal3Unified.py diff --git a/python/gtsam/tests/test_FrobeniusFactor.py b/python/gtsam/tests/test_FrobeniusFactor.py new file mode 100644 index 000000000..e808627f5 --- /dev/null +++ b/python/gtsam/tests/test_FrobeniusFactor.py @@ -0,0 +1,56 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +FrobeniusFactor unit tests. +Author: Frank Dellaert +""" +# pylint: disable=no-name-in-module, import-error, invalid-name +import unittest + +import numpy as np +from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4, + ShonanFactor3, SOn) + +id = SO4() +v1 = np.array([0, 0, 0, 0.1, 0, 0]) +Q1 = SO4.Expmap(v1) +v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03]) +Q2 = SO4.Expmap(v2) + + +class TestFrobeniusFactorSO4(unittest.TestCase): + """Test FrobeniusFactor factors.""" + + def test_frobenius_factor(self): + """Test creation of a factor that calculates the Frobenius norm.""" + factor = FrobeniusFactorSO4(1, 2) + actual = factor.evaluateError(Q1, Q2) + expected = (Q2.matrix()-Q1.matrix()).transpose().reshape((16,)) + np.testing.assert_array_equal(actual, expected) + + def test_frobenius_between_factor(self): + """Test creation of a Frobenius BetweenFactor.""" + factor = FrobeniusBetweenFactorSO4(1, 2, Q1.between(Q2)) + actual = factor.evaluateError(Q1, Q2) + expected = np.zeros((16,)) + np.testing.assert_array_almost_equal(actual, expected) + + def test_frobenius_wormhole_factor(self): + """Test creation of a factor that calculates Shonan error.""" + R1 = SO3.Expmap(v1[3:]) + R2 = SO3.Expmap(v2[3:]) + factor = ShonanFactor3(1, 2, Rot3(R1.between(R2).matrix()), p=4) + I4 = SOn(4) + Q1 = I4.retract(v1) + Q2 = I4.retract(v2) + actual = factor.evaluateError(Q1, Q2) + expected = np.zeros((12,)) + np.testing.assert_array_almost_equal(actual, expected, decimal=4) + + +if __name__ == "__main__": + unittest.main() diff --git a/cython/gtsam/tests/test_GaussianFactorGraph.py b/python/gtsam/tests/test_GaussianFactorGraph.py similarity index 94% rename from cython/gtsam/tests/test_GaussianFactorGraph.py rename to python/gtsam/tests/test_GaussianFactorGraph.py index 983825d8b..a29b0f263 100644 --- a/cython/gtsam/tests/test_GaussianFactorGraph.py +++ b/python/gtsam/tests/test_GaussianFactorGraph.py @@ -16,7 +16,7 @@ import gtsam import numpy as np -from gtsam import symbol_shorthand_X as X +from gtsam.symbol_shorthand import X from gtsam.utils.test_case import GtsamTestCase @@ -28,8 +28,8 @@ def create_graph(): x1 = X(1) x2 = X(2) - BETWEEN_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1)) - PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1)) + BETWEEN_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) + PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.ones(1)) graph.add(x1, np.eye(1), x0, -np.eye(1), np.ones(1), BETWEEN_NOISE) graph.add(x2, np.eye(1), x1, -np.eye(1), 2*np.ones(1), BETWEEN_NOISE) diff --git a/cython/gtsam/tests/test_JacobianFactor.py b/python/gtsam/tests/test_JacobianFactor.py similarity index 90% rename from cython/gtsam/tests/test_JacobianFactor.py rename to python/gtsam/tests/test_JacobianFactor.py index 04433492b..79f512f60 100644 --- a/cython/gtsam/tests/test_JacobianFactor.py +++ b/python/gtsam/tests/test_JacobianFactor.py @@ -19,7 +19,7 @@ class TestJacobianFactor(GtsamTestCase): def test_eliminate(self): - # Recommended way to specify a matrix (see cython/README) + # Recommended way to specify a matrix (see python/README) Ax2 = np.array( [[-5., 0.], [+0., -5.], @@ -48,7 +48,7 @@ def test_eliminate(self): # the RHS b2 = np.array([-1., 1.5, 2., -1.]) sigmas = np.array([1., 1., 1., 1.]) - model4 = gtsam.noiseModel_Diagonal.Sigmas(sigmas) + model4 = gtsam.noiseModel.Diagonal.Sigmas(sigmas) combined = gtsam.JacobianFactor(x2, Ax2, l1, Al1, x1, Ax1, b2, model4) # eliminate the first variable (x2) in the combined factor, destructive @@ -66,7 +66,7 @@ def test_eliminate(self): [+0.00, -8.94427]]) d = np.array([2.23607, -1.56525]) expectedCG = gtsam.GaussianConditional( - x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel_Unit.Create(2)) + x2, d, R11, l1, S12, x1, S13, gtsam.noiseModel.Unit.Create(2)) # check if the result matches self.gtsamAssertEquals(actualCG, expectedCG, 1e-4) @@ -82,7 +82,7 @@ def test_eliminate(self): # the RHS b1 = np.array([0.0, 0.894427]) - model2 = gtsam.noiseModel_Diagonal.Sigmas(np.array([1., 1.])) + model2 = gtsam.noiseModel.Diagonal.Sigmas(np.array([1., 1.])) expectedLF = gtsam.JacobianFactor(l1, Bl1, x1, Bx1, b1, model2) # check if the result matches the combined (reduced) factor diff --git a/cython/gtsam/tests/test_KalmanFilter.py b/python/gtsam/tests/test_KalmanFilter.py similarity index 95% rename from cython/gtsam/tests/test_KalmanFilter.py rename to python/gtsam/tests/test_KalmanFilter.py index 94c41df72..48a91b96c 100644 --- a/cython/gtsam/tests/test_KalmanFilter.py +++ b/python/gtsam/tests/test_KalmanFilter.py @@ -22,13 +22,13 @@ def test_KalmanFilter(self): F = np.eye(2) B = np.eye(2) u = np.array([1.0, 0.0]) - modelQ = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1])) + modelQ = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1])) Q = 0.01 * np.eye(2) H = np.eye(2) z1 = np.array([1.0, 0.0]) z2 = np.array([2.0, 0.0]) z3 = np.array([3.0, 0.0]) - modelR = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1])) + modelR = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1])) R = 0.01 * np.eye(2) # Create the set of expected output TestValues diff --git a/cython/gtsam/tests/test_KarcherMeanFactor.py b/python/gtsam/tests/test_KarcherMeanFactor.py similarity index 96% rename from cython/gtsam/tests/test_KarcherMeanFactor.py rename to python/gtsam/tests/test_KarcherMeanFactor.py index 6976decc1..a315a506c 100644 --- a/cython/gtsam/tests/test_KarcherMeanFactor.py +++ b/python/gtsam/tests/test_KarcherMeanFactor.py @@ -18,7 +18,7 @@ from gtsam.utils.test_case import GtsamTestCase KEY = 0 -MODEL = gtsam.noiseModel_Unit.Create(3) +MODEL = gtsam.noiseModel.Unit.Create(3) def find_Karcher_mean_Rot3(rotations): @@ -59,8 +59,8 @@ def test_factor(self): R12 = R.compose(R.compose(R)) graph.add(gtsam.BetweenFactorRot3(1, 2, R12, MODEL)) keys = gtsam.KeyVector() - keys.push_back(1) - keys.push_back(2) + keys.append(1) + keys.append(2) graph.add(gtsam.KarcherMeanFactorRot3(keys)) initial = gtsam.Values() diff --git a/cython/gtsam/tests/test_LocalizationExample.py b/python/gtsam/tests/test_LocalizationExample.py similarity index 94% rename from cython/gtsam/tests/test_LocalizationExample.py rename to python/gtsam/tests/test_LocalizationExample.py index 6ce65f087..8ae3583f0 100644 --- a/cython/gtsam/tests/test_LocalizationExample.py +++ b/python/gtsam/tests/test_LocalizationExample.py @@ -26,7 +26,7 @@ def test_LocalizationExample(self): # Add two odometry factors # create a measurement for both factors (the same in this case) odometry = gtsam.Pose2(2.0, 0.0, 0.0) - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas( + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta graph.add(gtsam.BetweenFactorPose2(0, 1, odometry, odometryNoise)) graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise)) @@ -37,7 +37,7 @@ def test_LocalizationExample(self): groundTruth.insert(0, gtsam.Pose2(0.0, 0.0, 0.0)) groundTruth.insert(1, gtsam.Pose2(2.0, 0.0, 0.0)) groundTruth.insert(2, gtsam.Pose2(4.0, 0.0, 0.0)) - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.1, 0.1, 10.])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.1, 0.1, 10.])) for i in range(3): graph.add(gtsam.PriorFactorPose2(i, groundTruth.atPose2(i), model)) diff --git a/cython/gtsam/tests/test_NonlinearOptimizer.py b/python/gtsam/tests/test_NonlinearOptimizer.py similarity index 75% rename from cython/gtsam/tests/test_NonlinearOptimizer.py rename to python/gtsam/tests/test_NonlinearOptimizer.py index efefb218a..e9234a43b 100644 --- a/cython/gtsam/tests/test_NonlinearOptimizer.py +++ b/python/gtsam/tests/test_NonlinearOptimizer.py @@ -15,10 +15,11 @@ import unittest import gtsam -from gtsam import (DoglegOptimizer, DoglegParams, GaussNewtonOptimizer, +from gtsam import (DoglegOptimizer, DoglegParams, + DummyPreconditionerParameters, GaussNewtonOptimizer, GaussNewtonParams, LevenbergMarquardtOptimizer, LevenbergMarquardtParams, NonlinearFactorGraph, Ordering, - Point2, PriorFactorPoint2, Values) + PCGSolverParameters, Point2, PriorFactorPoint2, Values) from gtsam.utils.test_case import GtsamTestCase KEY1 = 1 @@ -29,7 +30,7 @@ class TestScenario(GtsamTestCase): def test_optimize(self): """Do trivial test with three optimizer variants.""" fg = NonlinearFactorGraph() - model = gtsam.noiseModel_Unit.Create(2) + model = gtsam.noiseModel.Unit.Create(2) fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model)) # test error at minimum @@ -61,6 +62,16 @@ def test_optimize(self): fg, initial_values, lmParams).optimize() self.assertAlmostEqual(0, fg.error(actual2)) + # Levenberg-Marquardt + lmParams = LevenbergMarquardtParams.CeresDefaults() + lmParams.setLinearSolverType("ITERATIVE") + cgParams = PCGSolverParameters() + cgParams.setPreconditionerParams(DummyPreconditionerParameters()) + lmParams.setIterativeParams(cgParams) + actual2 = LevenbergMarquardtOptimizer( + fg, initial_values, lmParams).optimize() + self.assertAlmostEqual(0, fg.error(actual2)) + # Dogleg dlParams = DoglegParams() dlParams.setOrdering(ordering) diff --git a/cython/gtsam/tests/test_OdometryExample.py b/python/gtsam/tests/test_OdometryExample.py similarity index 94% rename from cython/gtsam/tests/test_OdometryExample.py rename to python/gtsam/tests/test_OdometryExample.py index c8ea95588..72e532f20 100644 --- a/cython/gtsam/tests/test_OdometryExample.py +++ b/python/gtsam/tests/test_OdometryExample.py @@ -25,7 +25,7 @@ def test_OdometryExample(self): # Add a Gaussian prior on pose x_1 priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior mean is at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas( + priorNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.1])) # 30cm std on x,y, 0.1 rad on theta # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) @@ -33,7 +33,7 @@ def test_OdometryExample(self): # Add two odometry factors # create a measurement for both factors (the same in this case) odometry = gtsam.Pose2(2.0, 0.0, 0.0) - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas( + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.2, 0.2, 0.1])) # 20cm std on x,y, 0.1 rad on theta graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise)) graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise)) diff --git a/cython/gtsam/tests/test_PlanarSLAMExample.py b/python/gtsam/tests/test_PlanarSLAMExample.py similarity index 93% rename from cython/gtsam/tests/test_PlanarSLAMExample.py rename to python/gtsam/tests/test_PlanarSLAMExample.py index ae813d35c..8cb3ad2ac 100644 --- a/cython/gtsam/tests/test_PlanarSLAMExample.py +++ b/python/gtsam/tests/test_PlanarSLAMExample.py @@ -32,13 +32,13 @@ def test_PlanarSLAM(self): # Add prior # gaussian for prior priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) # Add odometry # general noisemodel for odometry - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise)) graph.add(gtsam.BetweenFactorPose2( @@ -49,7 +49,7 @@ def test_PlanarSLAM(self): 4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) # Add pose constraint - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model)) # Initialize to noisy points diff --git a/python/gtsam/tests/test_Point2.py b/python/gtsam/tests/test_Point2.py new file mode 100644 index 000000000..52ac92970 --- /dev/null +++ b/python/gtsam/tests/test_Point2.py @@ -0,0 +1,29 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Point2 unit tests. +Author: Frank Dellaert & Fan Jiang +""" +import unittest + +import gtsam +import numpy as np +from gtsam.utils.test_case import GtsamTestCase + + +class TestPoint2(GtsamTestCase): + """Test selected Point2 methods.""" + + def test_constructors(self): + """Test constructors from doubles and vectors.""" + expected = gtsam.Point2(1, 2) + actual = gtsam.Point2(np.array([1, 2])) + np.testing.assert_array_equal(actual, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/gtsam/tests/test_Point3.py b/python/gtsam/tests/test_Point3.py new file mode 100644 index 000000000..f7a1c0f06 --- /dev/null +++ b/python/gtsam/tests/test_Point3.py @@ -0,0 +1,29 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Point3 unit tests. +Author: Frank Dellaert & Fan Jiang +""" +import unittest + +import gtsam +import numpy as np +from gtsam.utils.test_case import GtsamTestCase + + +class TestPoint3(GtsamTestCase): + """Test selected Point3 methods.""" + + def test_constructors(self): + """Test constructors from doubles and vectors.""" + expected = gtsam.Point3(1, 2, 3) + actual = gtsam.Point3(np.array([1, 2, 3])) + np.testing.assert_array_equal(actual, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/cython/gtsam/tests/test_Pose2.py b/python/gtsam/tests/test_Pose2.py similarity index 100% rename from cython/gtsam/tests/test_Pose2.py rename to python/gtsam/tests/test_Pose2.py diff --git a/cython/gtsam/tests/test_Pose2SLAMExample.py b/python/gtsam/tests/test_Pose2SLAMExample.py similarity index 93% rename from cython/gtsam/tests/test_Pose2SLAMExample.py rename to python/gtsam/tests/test_Pose2SLAMExample.py index a79b6b18c..e47b9fbff 100644 --- a/cython/gtsam/tests/test_Pose2SLAMExample.py +++ b/python/gtsam/tests/test_Pose2SLAMExample.py @@ -32,13 +32,13 @@ def test_Pose2SLAMExample(self): # Add prior # gaussian for prior priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin - priorNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) # add directly to graph graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise)) # Add odometry # general noisemodel for odometry - odometryNoise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2( 1, 2, gtsam.Pose2(2.0, 0.0, 0.0), odometryNoise)) graph.add(gtsam.BetweenFactorPose2( @@ -49,7 +49,7 @@ def test_Pose2SLAMExample(self): 4, 5, gtsam.Pose2(2.0, 0.0, pi / 2), odometryNoise)) # Add pose constraint - model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) + model = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1])) graph.add(gtsam.BetweenFactorPose2(5, 2, gtsam.Pose2(2.0, 0.0, pi / 2), model)) # Initialize to noisy points diff --git a/cython/gtsam/tests/test_Pose3.py b/python/gtsam/tests/test_Pose3.py similarity index 86% rename from cython/gtsam/tests/test_Pose3.py rename to python/gtsam/tests/test_Pose3.py index 138f1ff13..e07b904a9 100644 --- a/cython/gtsam/tests/test_Pose3.py +++ b/python/gtsam/tests/test_Pose3.py @@ -65,6 +65,14 @@ def test_adjoint(self): actual = Pose3.adjoint_(xi, xi) np.testing.assert_array_equal(actual, expected) + def test_serialization(self): + """Test if serialization is working normally""" + expected = Pose3(Rot3.Ypr(0.0, 1.0, 0.0), Point3(1, 1, 0)) + actual = Pose3() + serialized = expected.serialize() + actual.deserialize(serialized) + self.gtsamAssertEquals(expected, actual, 1e-10) + if __name__ == "__main__": unittest.main() diff --git a/cython/gtsam/tests/test_Pose3SLAMExample.py b/python/gtsam/tests/test_Pose3SLAMExample.py similarity index 97% rename from cython/gtsam/tests/test_Pose3SLAMExample.py rename to python/gtsam/tests/test_Pose3SLAMExample.py index 1e9eaac67..fce171b55 100644 --- a/cython/gtsam/tests/test_Pose3SLAMExample.py +++ b/python/gtsam/tests/test_Pose3SLAMExample.py @@ -30,7 +30,7 @@ def test_Pose3SLAMExample(self): fg = gtsam.NonlinearFactorGraph() fg.add(gtsam.NonlinearEqualityPose3(0, p0)) delta = p0.between(p1) - covariance = gtsam.noiseModel_Diagonal.Sigmas( + covariance = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05, 5. * pi / 180, 5. * pi / 180, 5. * pi / 180])) fg.add(gtsam.BetweenFactorPose3(0, 1, delta, covariance)) fg.add(gtsam.BetweenFactorPose3(1, 2, delta, covariance)) diff --git a/cython/gtsam/tests/test_PriorFactor.py b/python/gtsam/tests/test_PriorFactor.py similarity index 88% rename from cython/gtsam/tests/test_PriorFactor.py rename to python/gtsam/tests/test_PriorFactor.py index 66207b800..0582cf5d7 100644 --- a/cython/gtsam/tests/test_PriorFactor.py +++ b/python/gtsam/tests/test_PriorFactor.py @@ -23,14 +23,14 @@ def test_PriorFactor(self): key = 5 priorPose3 = gtsam.Pose3() - model = gtsam.noiseModel_Unit.Create(6) + model = gtsam.noiseModel.Unit.Create(6) factor = gtsam.PriorFactorPose3(key, priorPose3, model) values.insert(key, priorPose3) self.assertEqual(factor.error(values), 0) key = 3 priorVector = np.array([0., 0., 0.]) - model = gtsam.noiseModel_Unit.Create(3) + model = gtsam.noiseModel.Unit.Create(3) factor = gtsam.PriorFactorVector(key, priorVector, model) values.insert(key, priorVector) self.assertEqual(factor.error(values), 0) @@ -45,14 +45,14 @@ def test_AddPrior(self): # define and add Pose3 prior key = 5 priorPose3 = gtsam.Pose3() - model = gtsam.noiseModel_Unit.Create(6) + model = gtsam.noiseModel.Unit.Create(6) graph.addPriorPose3(key, priorPose3, model) self.assertEqual(graph.size(), 1) # define and add Vector prior key = 3 priorVector = np.array([0., 0., 0.]) - model = gtsam.noiseModel_Unit.Create(3) + model = gtsam.noiseModel.Unit.Create(3) graph.addPriorVector(key, priorVector, model) self.assertEqual(graph.size(), 2) diff --git a/cython/gtsam/tests/test_SFMExample.py b/python/gtsam/tests/test_SFMExample.py similarity index 72% rename from cython/gtsam/tests/test_SFMExample.py rename to python/gtsam/tests/test_SFMExample.py index e8fa46186..47a3cbe3e 100644 --- a/cython/gtsam/tests/test_SFMExample.py +++ b/python/gtsam/tests/test_SFMExample.py @@ -15,8 +15,9 @@ import gtsam import gtsam.utils.visual_data_generator as generator from gtsam import symbol +from gtsam.noiseModel import Isotropic, Diagonal from gtsam.utils.test_case import GtsamTestCase - +from gtsam.symbol_shorthand import X, P class TestSFMExample(GtsamTestCase): @@ -34,29 +35,29 @@ def test_SFMExample(self): graph = gtsam.NonlinearFactorGraph() # Add factors for all measurements - measurementNoise = gtsam.noiseModel_Isotropic.Sigma(2, measurementNoiseSigma) + measurementNoise = Isotropic.Sigma(2, measurementNoiseSigma) for i in range(len(data.Z)): for k in range(len(data.Z[i])): j = data.J[i][k] graph.add(gtsam.GenericProjectionFactorCal3_S2( data.Z[i][k], measurementNoise, - symbol(ord('x'), i), symbol(ord('p'), j), data.K)) + X(i), P(j), data.K)) - posePriorNoise = gtsam.noiseModel_Diagonal.Sigmas(poseNoiseSigmas) - graph.add(gtsam.PriorFactorPose3(symbol(ord('x'), 0), + posePriorNoise = Diagonal.Sigmas(poseNoiseSigmas) + graph.add(gtsam.PriorFactorPose3(X(0), truth.cameras[0].pose(), posePriorNoise)) - pointPriorNoise = gtsam.noiseModel_Isotropic.Sigma(3, pointNoiseSigma) - graph.add(gtsam.PriorFactorPoint3(symbol(ord('p'), 0), + pointPriorNoise = Isotropic.Sigma(3, pointNoiseSigma) + graph.add(gtsam.PriorFactorPoint3(P(0), truth.points[0], pointPriorNoise)) # Initial estimate initialEstimate = gtsam.Values() for i in range(len(truth.cameras)): pose_i = truth.cameras[i].pose() - initialEstimate.insert(symbol(ord('x'), i), pose_i) + initialEstimate.insert(X(i), pose_i) for j in range(len(truth.points)): point_j = truth.points[j] - initialEstimate.insert(symbol(ord('p'), j), point_j) + initialEstimate.insert(P(j), point_j) # Optimization optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initialEstimate) @@ -66,16 +67,16 @@ def test_SFMExample(self): # Marginalization marginals = gtsam.Marginals(graph, result) - marginals.marginalCovariance(symbol(ord('p'), 0)) - marginals.marginalCovariance(symbol(ord('x'), 0)) + marginals.marginalCovariance(P(0)) + marginals.marginalCovariance(X(0)) # Check optimized results, should be equal to ground truth for i in range(len(truth.cameras)): - pose_i = result.atPose3(symbol(ord('x'), i)) + pose_i = result.atPose3(X(i)) self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) for j in range(len(truth.points)): - point_j = result.atPoint3(symbol(ord('p'), j)) + point_j = result.atPoint3(P(j)) self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) if __name__ == "__main__": diff --git a/cython/gtsam/tests/test_SO4.py b/python/gtsam/tests/test_SO4.py similarity index 100% rename from cython/gtsam/tests/test_SO4.py rename to python/gtsam/tests/test_SO4.py diff --git a/cython/gtsam/tests/test_SOn.py b/python/gtsam/tests/test_SOn.py similarity index 100% rename from cython/gtsam/tests/test_SOn.py rename to python/gtsam/tests/test_SOn.py diff --git a/cython/gtsam/tests/test_Scenario.py b/python/gtsam/tests/test_Scenario.py similarity index 90% rename from cython/gtsam/tests/test_Scenario.py rename to python/gtsam/tests/test_Scenario.py index 09601fba5..fc5965829 100644 --- a/cython/gtsam/tests/test_Scenario.py +++ b/python/gtsam/tests/test_Scenario.py @@ -43,8 +43,11 @@ def test_loop(self): # R = v/w, so test if loop crests at 2*R R = v / w T30 = scenario.pose(T) + xyz = T30.rotation().xyz() + if xyz[0] < 0: + xyz = -xyz np.testing.assert_almost_equal( - np.array([math.pi, 0, math.pi]), T30.rotation().xyz()) + np.array([math.pi, 0, math.pi]), xyz) self.gtsamAssertEquals(gtsam.Point3( 0, 0, 2.0 * R), T30.translation(), 1e-9) diff --git a/python/gtsam/tests/test_SfmData.py b/python/gtsam/tests/test_SfmData.py new file mode 100644 index 000000000..9c965ddc5 --- /dev/null +++ b/python/gtsam/tests/test_SfmData.py @@ -0,0 +1,79 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for testing dataset access. +Author: Frank Dellaert (Python: Sushmita Warrier) +""" +# pylint: disable=invalid-name, no-name-in-module, no-member + +from __future__ import print_function + +import unittest + +import numpy as np + +import gtsam +from gtsam.utils.test_case import GtsamTestCase + + +class TestSfmData(GtsamTestCase): + """Tests for SfmData and SfmTrack modules.""" + + def setUp(self): + """Initialize SfmData and SfmTrack""" + self.data = gtsam.SfmData() + # initialize SfmTrack with 3D point + self.tracks = gtsam.SfmTrack() + + def test_tracks(self): + """Test functions in SfmTrack""" + # measurement is of format (camera_idx, imgPoint) + # create arbitrary camera indices for two cameras + i1, i2 = 4,5 + # create arbitrary image measurements for cameras i1 and i2 + uv_i1 = gtsam.Point2(12.6, 82) + # translating point uv_i1 along X-axis + uv_i2 = gtsam.Point2(24.88, 82) + # add measurements to the track + self.tracks.add_measurement(i1, uv_i1) + self.tracks.add_measurement(i2, uv_i2) + # Number of measurements in the track is 2 + self.assertEqual(self.tracks.number_measurements(), 2) + # camera_idx in the first measurement of the track corresponds to i1 + cam_idx, img_measurement = self.tracks.measurement(0) + self.assertEqual(cam_idx, i1) + np.testing.assert_array_almost_equal( + gtsam.Point3(0.,0.,0.), + self.tracks.point3() + ) + + + def test_data(self): + """Test functions in SfmData""" + # Create new track with 3 measurements + i1, i2, i3 = 3,5,6 + uv_i1 = gtsam.Point2(21.23, 45.64) + # translating along X-axis + uv_i2 = gtsam.Point2(45.7, 45.64) + uv_i3 = gtsam.Point2(68.35, 45.64) + # add measurements and arbitrary point to the track + measurements = [(i1, uv_i1), (i2, uv_i2), (i3, uv_i3)] + pt = gtsam.Point3(1.0, 6.0, 2.0) + track2 = gtsam.SfmTrack(pt) + track2.add_measurement(i1, uv_i1) + track2.add_measurement(i2, uv_i2) + track2.add_measurement(i3, uv_i3) + self.data.add_track(self.tracks) + self.data.add_track(track2) + # Number of tracks in SfmData is 2 + self.assertEqual(self.data.number_tracks(), 2) + # camera idx of first measurement of second track corresponds to i1 + cam_idx, img_measurement = self.data.track(1).measurement(0) + self.assertEqual(cam_idx, i1) + +if __name__ == '__main__': + unittest.main() diff --git a/python/gtsam/tests/test_ShonanAveraging.py b/python/gtsam/tests/test_ShonanAveraging.py new file mode 100644 index 000000000..4c423574d --- /dev/null +++ b/python/gtsam/tests/test_ShonanAveraging.py @@ -0,0 +1,139 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Unit tests for Shonan Rotation Averaging. +Author: Frank Dellaert +""" +# pylint: disable=invalid-name, no-name-in-module, no-member + +import unittest + +import gtsam +from gtsam import ShonanAveraging3, ShonanAveragingParameters3 +from gtsam.utils.test_case import GtsamTestCase + +DEFAULT_PARAMS = ShonanAveragingParameters3( + gtsam.LevenbergMarquardtParams.CeresDefaults()) + + +def fromExampleName(name: str, parameters=DEFAULT_PARAMS): + g2oFile = gtsam.findExampleDataFile(name) + return ShonanAveraging3(g2oFile, parameters) + + +class TestShonanAveraging(GtsamTestCase): + """Tests for Shonan Rotation Averaging.""" + + def setUp(self): + """Set up common variables.""" + self.shonan = fromExampleName("toyExample.g2o") + + def test_checkConstructor(self): + self.assertEqual(5, self.shonan.nrUnknowns()) + + D = self.shonan.denseD() + self.assertEqual((15, 15), D.shape) + + Q = self.shonan.denseQ() + self.assertEqual((15, 15), Q.shape) + + L = self.shonan.denseL() + self.assertEqual((15, 15), L.shape) + + def test_buildGraphAt(self): + graph = self.shonan.buildGraphAt(5) + self.assertEqual(7, graph.size()) + + def test_checkOptimality(self): + random = self.shonan.initializeRandomlyAt(4) + lambdaMin = self.shonan.computeMinEigenValue(random) + self.assertAlmostEqual(-414.87376657555996, + lambdaMin, places=3) # Regression test + self.assertFalse(self.shonan.checkOptimality(random)) + + def test_tryOptimizingAt3(self): + initial = self.shonan.initializeRandomlyAt(3) + self.assertFalse(self.shonan.checkOptimality(initial)) + result = self.shonan.tryOptimizingAt(3, initial) + self.assertTrue(self.shonan.checkOptimality(result)) + lambdaMin = self.shonan.computeMinEigenValue(result) + self.assertAlmostEqual(-5.427688831332745e-07, + lambdaMin, places=3) # Regression test + self.assertAlmostEqual(0, self.shonan.costAt(3, result), places=3) + SO3Values = self.shonan.roundSolution(result) + self.assertAlmostEqual(0, self.shonan.cost(SO3Values), places=3) + + def test_tryOptimizingAt4(self): + random = self.shonan.initializeRandomlyAt(4) + result = self.shonan.tryOptimizingAt(4, random) + self.assertTrue(self.shonan.checkOptimality(result)) + self.assertAlmostEqual(0, self.shonan.costAt(4, result), places=2) + lambdaMin = self.shonan.computeMinEigenValue(result) + self.assertAlmostEqual(-5.427688831332745e-07, + lambdaMin, places=3) # Regression test + SO3Values = self.shonan.roundSolution(result) + self.assertAlmostEqual(0, self.shonan.cost(SO3Values), places=3) + + def test_initializeWithDescent(self): + random = self.shonan.initializeRandomlyAt(3) + Qstar3 = self.shonan.tryOptimizingAt(3, random) + lambdaMin, minEigenVector = self.shonan.computeMinEigenVector(Qstar3) + initialQ4 = self.shonan.initializeWithDescent( + 4, Qstar3, minEigenVector, lambdaMin) + self.assertAlmostEqual(5, initialQ4.size()) + + def test_run(self): + initial = self.shonan.initializeRandomly() + result, lambdaMin = self.shonan.run(initial, 5, 10) + self.assertAlmostEqual(0, self.shonan.cost(result), places=2) + self.assertAlmostEqual(-5.427688831332745e-07, + lambdaMin, places=3) # Regression test + + def test_runKlausKarcher(self): + # Load 2D toy example + lmParams = gtsam.LevenbergMarquardtParams.CeresDefaults() + # lmParams.setVerbosityLM("SUMMARY") + g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt") + parameters = gtsam.ShonanAveragingParameters2(lmParams) + shonan = gtsam.ShonanAveraging2(g2oFile, parameters) + self.assertAlmostEqual(4, shonan.nrUnknowns()) + + # Check graph building + graph = shonan.buildGraphAt(2) + self.assertAlmostEqual(6, graph.size()) + initial = shonan.initializeRandomly() + result, lambdaMin = shonan.run(initial, 2, 10) + self.assertAlmostEqual(0.0008211, shonan.cost(result), places=5) + self.assertAlmostEqual(0, lambdaMin, places=9) # certificate! + + # Test alpha/beta/gamma prior weighting. + def test_PriorWeights(self): + lmParams = gtsam.LevenbergMarquardtParams.CeresDefaults() + params = ShonanAveragingParameters3(lmParams) + self.assertAlmostEqual(0, params.getAnchorWeight(), 1e-9) + self.assertAlmostEqual(1, params.getKarcherWeight(), 1e-9) + self.assertAlmostEqual(0, params.getGaugesWeight(), 1e-9) + alpha, beta, gamma = 100.0, 200.0, 300.0 + params.setAnchorWeight(alpha) + params.setKarcherWeight(beta) + params.setGaugesWeight(gamma) + self.assertAlmostEqual(alpha, params.getAnchorWeight(), 1e-9) + self.assertAlmostEqual(beta, params.getKarcherWeight(), 1e-9) + self.assertAlmostEqual(gamma, params.getGaugesWeight(), 1e-9) + params.setKarcherWeight(0) + shonan = fromExampleName("Klaus3.g2o", params) + + initial = gtsam.Values() + for i in range(3): + initial.insert(i, gtsam.Rot3()) + self.assertAlmostEqual(3.0756, shonan.cost(initial), places=3) + result, _lambdaMin = shonan.run(initial, 3, 3) + self.assertAlmostEqual(0.0015, shonan.cost(result), places=3) + + +if __name__ == '__main__': + unittest.main() diff --git a/cython/gtsam/tests/test_SimpleCamera.py b/python/gtsam/tests/test_SimpleCamera.py similarity index 71% rename from cython/gtsam/tests/test_SimpleCamera.py rename to python/gtsam/tests/test_SimpleCamera.py index a3654a5f1..358eb1f48 100644 --- a/cython/gtsam/tests/test_SimpleCamera.py +++ b/python/gtsam/tests/test_SimpleCamera.py @@ -5,7 +5,7 @@ See LICENSE for the license information -PinholeCameraCal3_S2 unit tests. +SimpleCamera unit tests. Author: Frank Dellaert & Duy Nguyen Ta (Python) """ import math @@ -14,30 +14,31 @@ import numpy as np import gtsam -from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, PinholeCameraCal3_S2 +from gtsam import Cal3_S2, Point3, Pose2, Pose3, Rot3, PinholeCameraCal3_S2 as SimpleCamera from gtsam.utils.test_case import GtsamTestCase K = Cal3_S2(625, 625, 0, 0, 0) + class TestSimpleCamera(GtsamTestCase): def test_constructor(self): pose1 = Pose3(Rot3(np.diag([1, -1, -1])), Point3(0, 0, 0.5)) - camera = PinholeCameraCal3_S2(pose1, K) + camera = SimpleCamera(pose1, K) self.gtsamAssertEquals(camera.calibration(), K, 1e-9) self.gtsamAssertEquals(camera.pose(), pose1, 1e-9) def test_level2(self): # Create a level camera, looking in Y-direction - pose2 = Pose2(0.4,0.3,math.pi/2.0) - camera = PinholeCameraCal3_S2.Level(K, pose2, 0.1) + pose2 = Pose2(0.4, 0.3, math.pi/2.0) + camera = SimpleCamera.Level(K, pose2, 0.1) # expected - x = Point3(1,0,0) - y = Point3(0,0,-1) - z = Point3(0,1,0) - wRc = Rot3(x,y,z) - expected = Pose3(wRc,Point3(0.4,0.3,0.1)) + x = Point3(1, 0, 0) + y = Point3(0, 0, -1) + z = Point3(0, 1, 0) + wRc = Rot3(x, y, z) + expected = Pose3(wRc, Point3(0.4, 0.3, 0.1)) self.gtsamAssertEquals(camera.pose(), expected, 1e-9) diff --git a/cython/gtsam/tests/test_StereoVOExample.py b/python/gtsam/tests/test_StereoVOExample.py similarity index 92% rename from cython/gtsam/tests/test_StereoVOExample.py rename to python/gtsam/tests/test_StereoVOExample.py index 3f5f57522..cefc08aab 100644 --- a/cython/gtsam/tests/test_StereoVOExample.py +++ b/python/gtsam/tests/test_StereoVOExample.py @@ -28,11 +28,11 @@ def test_StereoVOExample(self): # - No noise on measurements ## Create keys for variables - x1 = symbol(ord('x'),1) - x2 = symbol(ord('x'),2) - l1 = symbol(ord('l'),1) - l2 = symbol(ord('l'),2) - l3 = symbol(ord('l'),3) + x1 = symbol('x',1) + x2 = symbol('x',2) + l1 = symbol('l',1) + l2 = symbol('l',2) + l3 = symbol('l',3) ## Create graph container and add factors to it graph = gtsam.NonlinearFactorGraph() @@ -44,7 +44,7 @@ def test_StereoVOExample(self): ## Create realistic calibration and measurement noise model # format: fx fy skew cx cy baseline K = gtsam.Cal3_S2Stereo(1000, 1000, 0, 320, 240, 0.2) - stereo_model = gtsam.noiseModel_Diagonal.Sigmas(np.array([1.0, 1.0, 1.0])) + stereo_model = gtsam.noiseModel.Diagonal.Sigmas(np.array([1.0, 1.0, 1.0])) ## Add measurements # pose 1 diff --git a/python/gtsam/tests/test_TranslationRecovery.py b/python/gtsam/tests/test_TranslationRecovery.py new file mode 100644 index 000000000..0fb0518b6 --- /dev/null +++ b/python/gtsam/tests/test_TranslationRecovery.py @@ -0,0 +1,63 @@ +from __future__ import print_function + +import numpy as np +import unittest + +import gtsam + +""" Returns example pose values of 3 points A, B and C in the world frame """ +def ExampleValues(): + T = [] + T.append(gtsam.Point3(np.array([3.14, 1.59, 2.65]))) + T.append(gtsam.Point3(np.array([-1.0590e+00, -3.6017e-02, -1.5720e+00]))) + T.append(gtsam.Point3(np.array([8.5034e+00, 6.7499e+00, -3.6383e+00]))) + + data = gtsam.Values() + for i in range(len(T)): + data.insert(i, gtsam.Pose3(gtsam.Rot3(), T[i])) + return data + +""" Returns binary measurements for the points in the given edges.""" +def SimulateMeasurements(gt_poses, graph_edges): + measurements = gtsam.BinaryMeasurementsUnit3() + for edge in graph_edges: + Ta = gt_poses.atPose3(edge[0]).translation() + Tb = gt_poses.atPose3(edge[1]).translation() + measurements.append(gtsam.BinaryMeasurementUnit3( \ + edge[0], edge[1], gtsam.Unit3(Tb - Ta), \ + gtsam.noiseModel.Isotropic.Sigma(3, 0.01))) + return measurements + +""" Tests for the translation recovery class """ +class TestTranslationRecovery(unittest.TestCase): + """Test selected Translation Recovery methods.""" + + def test_constructor(self): + """Construct from binary measurements.""" + algorithm = gtsam.TranslationRecovery(gtsam.BinaryMeasurementsUnit3()) + self.assertIsInstance(algorithm, gtsam.TranslationRecovery) + + def test_run(self): + gt_poses = ExampleValues() + measurements = SimulateMeasurements(gt_poses, [[0, 1], [0, 2], [1, 2]]) + + # Set verbosity to Silent for tests + lmParams = gtsam.LevenbergMarquardtParams() + lmParams.setVerbosityLM("silent") + + algorithm = gtsam.TranslationRecovery(measurements, lmParams) + scale = 2.0 + result = algorithm.run(scale) + + w_aTc = gt_poses.atPose3(2).translation() - gt_poses.atPose3(0).translation() + w_aTb = gt_poses.atPose3(1).translation() - gt_poses.atPose3(0).translation() + w_aTc_expected = w_aTc*scale/np.linalg.norm(w_aTb) + w_aTb_expected = w_aTb*scale/np.linalg.norm(w_aTb) + + np.testing.assert_array_almost_equal(result.atPoint3(0), np.array([0,0,0]), 6, "Origin result is incorrect.") + np.testing.assert_array_almost_equal(result.atPoint3(1), w_aTb_expected, 6, "Point B result is incorrect.") + np.testing.assert_array_almost_equal(result.atPoint3(2), w_aTc_expected, 6, "Point C result is incorrect.") + +if __name__ == "__main__": + unittest.main() + diff --git a/python/gtsam/tests/test_Triangulation.py b/python/gtsam/tests/test_Triangulation.py new file mode 100644 index 000000000..399cf019d --- /dev/null +++ b/python/gtsam/tests/test_Triangulation.py @@ -0,0 +1,134 @@ +""" +GTSAM Copyright 2010-2019, Georgia Tech Research Corporation, +Atlanta, Georgia 30332-0415 +All Rights Reserved + +See LICENSE for the license information + +Test Triangulation +Author: Frank Dellaert & Fan Jiang (Python) +""" +import unittest + +import numpy as np + +import gtsam +from gtsam import (Cal3_S2, Cal3Bundler, CameraSetCal3_S2, + CameraSetCal3Bundler, PinholeCameraCal3_S2, + PinholeCameraCal3Bundler, Point2Vector, Point3, Pose3, + Pose3Vector, Rot3) +from gtsam.utils.test_case import GtsamTestCase + + +class TestVisualISAMExample(GtsamTestCase): + """ Tests for triangulation with shared and individual calibrations """ + + def setUp(self): + """ Set up two camera poses """ + # Looking along X-axis, 1 meter above ground plane (x-y) + upright = Rot3.Ypr(-np.pi / 2, 0., -np.pi / 2) + pose1 = Pose3(upright, Point3(0, 0, 1)) + + # create second camera 1 meter to the right of first camera + pose2 = pose1.compose(Pose3(Rot3(), Point3(1, 0, 0))) + # twoPoses + self.poses = Pose3Vector() + self.poses.append(pose1) + self.poses.append(pose2) + + # landmark ~5 meters infront of camera + self.landmark = Point3(5, 0.5, 1.2) + + def generate_measurements(self, calibration, camera_model, cal_params, camera_set=None): + """ + Generate vector of measurements for given calibration and camera model. + + Args: + calibration: Camera calibration e.g. Cal3_S2 + camera_model: Camera model e.g. PinholeCameraCal3_S2 + cal_params: Iterable of camera parameters for `calibration` e.g. [K1, K2] + camera_set: Cameraset object (for individual calibrations) + Returns: + list of measurements and list/CameraSet object for cameras + """ + if camera_set is not None: + cameras = camera_set() + else: + cameras = [] + measurements = Point2Vector() + + for k, pose in zip(cal_params, self.poses): + K = calibration(*k) + camera = camera_model(pose, K) + cameras.append(camera) + z = camera.project(self.landmark) + measurements.append(z) + + return measurements, cameras + + def test_TriangulationExample(self): + """ Tests triangulation with shared Cal3_S2 calibration""" + # Some common constants + sharedCal = (1500, 1200, 0, 640, 480) + + measurements, _ = self.generate_measurements(Cal3_S2, + PinholeCameraCal3_S2, + (sharedCal, sharedCal)) + + triangulated_landmark = gtsam.triangulatePoint3(self.poses, + Cal3_S2(sharedCal), + measurements, + rank_tol=1e-9, + optimize=True) + self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-9) + + # Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814) + measurements_noisy = Point2Vector() + measurements_noisy.append(measurements[0] - np.array([0.1, 0.5])) + measurements_noisy.append(measurements[1] - np.array([-0.2, 0.3])) + + triangulated_landmark = gtsam.triangulatePoint3(self.poses, + Cal3_S2(sharedCal), + measurements_noisy, + rank_tol=1e-9, + optimize=True) + + self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-2) + + def test_distinct_Ks(self): + """ Tests triangulation with individual Cal3_S2 calibrations """ + # two camera parameters + K1 = (1500, 1200, 0, 640, 480) + K2 = (1600, 1300, 0, 650, 440) + + measurements, cameras = self.generate_measurements(Cal3_S2, + PinholeCameraCal3_S2, + (K1, K2), + camera_set=CameraSetCal3_S2) + + triangulated_landmark = gtsam.triangulatePoint3(cameras, + measurements, + rank_tol=1e-9, + optimize=True) + self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-9) + + def test_distinct_Ks_Bundler(self): + """ Tests triangulation with individual Cal3Bundler calibrations""" + # two camera parameters + K1 = (1500, 0, 0, 640, 480) + K2 = (1600, 0, 0, 650, 440) + + measurements, cameras = self.generate_measurements(Cal3Bundler, + PinholeCameraCal3Bundler, + (K1, K2), + camera_set=CameraSetCal3Bundler) + + triangulated_landmark = gtsam.triangulatePoint3(cameras, + measurements, + rank_tol=1e-9, + optimize=True) + self.gtsamAssertEquals(self.landmark, triangulated_landmark, 1e-9) + + +if __name__ == "__main__": + unittest.main() diff --git a/cython/gtsam/tests/test_Values.py b/python/gtsam/tests/test_Values.py similarity index 94% rename from cython/gtsam/tests/test_Values.py rename to python/gtsam/tests/test_Values.py index 20634a21c..dddd11c40 100644 --- a/cython/gtsam/tests/test_Values.py +++ b/python/gtsam/tests/test_Values.py @@ -15,8 +15,7 @@ import gtsam from gtsam import (Cal3_S2, Cal3Bundler, Cal3DS2, EssentialMatrix, Point2, - Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values, - imuBias_ConstantBias) + Point3, Pose2, Pose3, Rot2, Rot3, Unit3, Values, imuBias) from gtsam.utils.test_case import GtsamTestCase @@ -37,7 +36,7 @@ def test_values(self): values.insert(7, Cal3DS2()) values.insert(8, Cal3Bundler()) values.insert(9, E) - values.insert(10, imuBias_ConstantBias()) + values.insert(10, imuBias.ConstantBias()) # Special cases for Vectors and Matrices # Note that gtsam's Eigen Vectors and Matrices requires double-precision @@ -70,8 +69,8 @@ def test_values(self): self.gtsamAssertEquals(values.atCal3DS2(7), Cal3DS2(), tol) self.gtsamAssertEquals(values.atCal3Bundler(8), Cal3Bundler(), tol) self.gtsamAssertEquals(values.atEssentialMatrix(9), E, tol) - self.gtsamAssertEquals(values.atimuBias_ConstantBias( - 10), imuBias_ConstantBias(), tol) + self.gtsamAssertEquals(values.atConstantBias( + 10), imuBias.ConstantBias(), tol) # special cases for Vector and Matrix: actualVector = values.atVector(11) diff --git a/cython/gtsam/tests/test_VisualISAMExample.py b/python/gtsam/tests/test_VisualISAMExample.py similarity index 93% rename from cython/gtsam/tests/test_VisualISAMExample.py rename to python/gtsam/tests/test_VisualISAMExample.py index 99d7e6160..6eb05eeee 100644 --- a/cython/gtsam/tests/test_VisualISAMExample.py +++ b/python/gtsam/tests/test_VisualISAMExample.py @@ -46,11 +46,11 @@ def test_VisualISAMExample(self): isam, result = visual_isam.step(data, isam, result, truth, currentPose) for i in range(len(truth.cameras)): - pose_i = result.atPose3(symbol(ord('x'), i)) + pose_i = result.atPose3(symbol('x', i)) self.gtsamAssertEquals(pose_i, truth.cameras[i].pose(), 1e-5) for j in range(len(truth.points)): - point_j = result.atPoint3(symbol(ord('l'), j)) + point_j = result.atPoint3(symbol('l', j)) self.gtsamAssertEquals(point_j, truth.points[j], 1e-5) if __name__ == "__main__": diff --git a/cython/gtsam/tests/test_dataset.py b/python/gtsam/tests/test_dataset.py similarity index 87% rename from cython/gtsam/tests/test_dataset.py rename to python/gtsam/tests/test_dataset.py index 60fb9450d..87fc2ad54 100644 --- a/cython/gtsam/tests/test_dataset.py +++ b/python/gtsam/tests/test_dataset.py @@ -15,7 +15,7 @@ import unittest import gtsam -from gtsam import BetweenFactorPose3, BetweenFactorPose3s +from gtsam import BetweenFactorPose3 from gtsam.utils.test_case import GtsamTestCase @@ -37,8 +37,8 @@ def test_readG2o3D(self): def test_parse3Dfactors(self): """Test parsing into data structure.""" factors = gtsam.parse3DFactors(self.pose3_example_g2o_file) - self.assertEqual(factors.size(), 6) - self.assertIsInstance(factors.at(0), BetweenFactorPose3) + self.assertEqual(len(factors), 6) + self.assertIsInstance(factors[0], BetweenFactorPose3) if __name__ == '__main__': diff --git a/cython/gtsam/tests/test_dsf_map.py b/python/gtsam/tests/test_dsf_map.py similarity index 70% rename from cython/gtsam/tests/test_dsf_map.py rename to python/gtsam/tests/test_dsf_map.py index 73ddcb050..e36657178 100644 --- a/cython/gtsam/tests/test_dsf_map.py +++ b/python/gtsam/tests/test_dsf_map.py @@ -35,6 +35,20 @@ def key(index_pair): dsf.merge(pair1, pair2) self.assertEqual(key(dsf.find(pair1)), key(dsf.find(pair2))) + def test_sets(self): + from gtsam import IndexPair + dsf = gtsam.DSFMapIndexPair() + dsf.merge(IndexPair(0, 1), IndexPair(1,2)) + dsf.merge(IndexPair(0, 1), IndexPair(3,4)) + dsf.merge(IndexPair(4,5), IndexPair(6,8)) + sets = dsf.sets() + + for i in sets: + s = sets[i] + for val in gtsam.IndexPairSetAsArray(s): + val.i() + val.j() + if __name__ == '__main__': unittest.main() diff --git a/cython/gtsam/tests/test_initialize_pose3.py b/python/gtsam/tests/test_initialize_pose3.py similarity index 96% rename from cython/gtsam/tests/test_initialize_pose3.py rename to python/gtsam/tests/test_initialize_pose3.py index 3aa7e3470..6d7f66653 100644 --- a/cython/gtsam/tests/test_initialize_pose3.py +++ b/python/gtsam/tests/test_initialize_pose3.py @@ -24,7 +24,7 @@ class TestValues(GtsamTestCase): def setUp(self): - model = gtsam.noiseModel_Isotropic.Sigma(6, 0.1) + model = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) # We consider a small graph: # symbolic FG @@ -64,9 +64,8 @@ def test_buildPose3graph(self): def test_orientations(self): pose3Graph = gtsam.InitializePose3.buildPose3graph(self.graph) - initial = gtsam.InitializePose3.computeOrientationsChordal(pose3Graph) - + # comparison is up to M_PI, that's why we add some multiples of 2*M_PI self.gtsamAssertEquals(initial.atRot3(x0), self.R0, 1e-6) self.gtsamAssertEquals(initial.atRot3(x1), self.R1, 1e-6) @@ -77,7 +76,7 @@ def test_initializePoses(self): g2oFile = gtsam.findExampleDataFile("pose3example-grid") is3D = True inputGraph, expectedValues = gtsam.readG2o(g2oFile, is3D) - priorModel = gtsam.noiseModel_Unit.Create(6) + priorModel = gtsam.noiseModel.Unit.Create(6) inputGraph.add(gtsam.PriorFactorPose3(0, Pose3(), priorModel)) initial = gtsam.InitializePose3.initialize(inputGraph) diff --git a/cython/gtsam/tests/test_logging_optimizer.py b/python/gtsam/tests/test_logging_optimizer.py similarity index 72% rename from cython/gtsam/tests/test_logging_optimizer.py rename to python/gtsam/tests/test_logging_optimizer.py index c857a6f7d..47eb32e7b 100644 --- a/cython/gtsam/tests/test_logging_optimizer.py +++ b/python/gtsam/tests/test_logging_optimizer.py @@ -4,6 +4,12 @@ """ # pylint: disable=invalid-name +import sys +if sys.version_info.major >= 3: + from io import StringIO +else: + from cStringIO import StringIO + import unittest from datetime import datetime @@ -15,7 +21,7 @@ from gtsam.utils.logging_optimizer import gtsam_optimize KEY = 0 -MODEL = gtsam.noiseModel_Unit.Create(3) +MODEL = gtsam.noiseModel.Unit.Create(3) class TestOptimizeComet(GtsamTestCase): @@ -37,11 +43,24 @@ def setUp(self): self.optimizer = gtsam.GaussNewtonOptimizer( graph, initial, self.params) + self.lmparams = gtsam.LevenbergMarquardtParams() + self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer( + graph, initial, self.lmparams + ) + + # setup output capture + self.capturedOutput = StringIO() + sys.stdout = self.capturedOutput + + def tearDown(self): + """Reset print capture.""" + sys.stdout = sys.__stdout__ + def test_simple_printing(self): """Test with a simple hook.""" # Provide a hook that just prints - def hook(_, error: float): + def hook(_, error): print(error) # Only thing we require from optimizer is an iterate method @@ -51,6 +70,16 @@ def hook(_, error: float): actual = self.optimizer.values() self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6) + def test_lm_simple_printing(self): + """Make sure we are properly terminating LM""" + def hook(_, error): + print(error) + + gtsam_optimize(self.lmoptimizer, self.lmparams, hook) + + actual = self.lmoptimizer.values() + self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6) + @unittest.skip("Not a test we want run every time, as needs comet.ml account") def test_comet(self): """Test with a comet hook.""" @@ -65,7 +94,7 @@ def test_comet(self): + str(time.hour)+":"+str(time.minute)+":"+str(time.second)) # I want to do some comet thing here - def hook(optimizer, error: float): + def hook(optimizer, error): comet.log_metric("Karcher error", error, optimizer.iterations()) @@ -76,4 +105,4 @@ def hook(optimizer, error: float): self.gtsamAssertEquals(actual.atRot3(KEY), self.expected) if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/python/gtsam/utils/__init__.py b/python/gtsam/utils/__init__.py new file mode 100644 index 000000000..5ee733ba4 --- /dev/null +++ b/python/gtsam/utils/__init__.py @@ -0,0 +1,22 @@ +import glob +import os.path as osp + + +def findExampleDataFile(name): + """ + Find the example data file specified by `name`. + """ + # This is okay since gtsam will already be loaded + # before this function is accessed. Thus no effect. + import gtsam + + site_package_path = gtsam.__path__[0] + # add the * at the end to glob the entire directory + data_path = osp.join(site_package_path, "Data", "*") + + extensions = ("", ".graph", ".txt", ".out", ".xml", ".g2o") + + for data_file_path in glob.glob(data_path, recursive=True): + for ext in extensions: + if (name + ext) == osp.basename(data_file_path): + return data_file_path diff --git a/cython/gtsam/utils/circlePose3.py b/python/gtsam/utils/circlePose3.py similarity index 79% rename from cython/gtsam/utils/circlePose3.py rename to python/gtsam/utils/circlePose3.py index 7012548f4..e1def9427 100644 --- a/cython/gtsam/utils/circlePose3.py +++ b/python/gtsam/utils/circlePose3.py @@ -1,9 +1,10 @@ import gtsam +import math import numpy as np -from math import pi, cos, sin +from math import pi -def circlePose3(numPoses=8, radius=1.0, symbolChar=0): +def circlePose3(numPoses=8, radius=1.0, symbolChar='\0'): """ circlePose3 generates a set of poses in a circle. This function returns those poses inside a gtsam.Values object, with sequential @@ -18,10 +19,6 @@ def circlePose3(numPoses=8, radius=1.0, symbolChar=0): Vehicle at p0 is looking towards y axis (X-axis points towards world y) """ - # Force symbolChar to be a single character - if type(symbolChar) is str: - symbolChar = ord(symbolChar[0]) - values = gtsam.Values() theta = 0.0 dtheta = 2 * pi / numPoses @@ -29,7 +26,7 @@ def circlePose3(numPoses=8, radius=1.0, symbolChar=0): np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]], order='F')) for i in range(numPoses): key = gtsam.symbol(symbolChar, i) - gti = gtsam.Point3(radius * cos(theta), radius * sin(theta), 0) + gti = gtsam.Point3(radius * math.cos(theta), radius * math.sin(theta), 0) oRi = gtsam.Rot3.Yaw( -theta) # negative yaw goes counterclockwise, with Z down ! gTi = gtsam.Pose3(gRo.compose(oRi), gti) diff --git a/cython/gtsam/utils/logging_optimizer.py b/python/gtsam/utils/logging_optimizer.py similarity index 84% rename from cython/gtsam/utils/logging_optimizer.py rename to python/gtsam/utils/logging_optimizer.py index b201bb8aa..3d9175951 100644 --- a/cython/gtsam/utils/logging_optimizer.py +++ b/python/gtsam/utils/logging_optimizer.py @@ -4,15 +4,11 @@ """ # pylint: disable=invalid-name -from typing import TypeVar - from gtsam import NonlinearOptimizer, NonlinearOptimizerParams import gtsam -T = TypeVar('T') - -def optimize(optimizer: T, check_convergence, hook): +def optimize(optimizer, check_convergence, hook): """ Given an optimizer and a convergence check, iterate until convergence. After each iteration, hook(optimizer, error) is called. After the function, use values and errors to get the result. @@ -36,8 +32,8 @@ def optimize(optimizer: T, check_convergence, hook): current_error = new_error -def gtsam_optimize(optimizer: NonlinearOptimizer, - params: NonlinearOptimizerParams, +def gtsam_optimize(optimizer, + params, hook): """ Given an optimizer and params, iterate until convergence. After each iteration, hook(optimizer) is called. @@ -50,5 +46,7 @@ def gtsam_optimize(optimizer: NonlinearOptimizer, def check_convergence(optimizer, current_error, new_error): return (optimizer.iterations() >= params.getMaxIterations()) or ( gtsam.checkConvergence(params.getRelativeErrorTol(), params.getAbsoluteErrorTol(), params.getErrorTol(), - current_error, new_error)) + current_error, new_error)) or ( + isinstance(optimizer, gtsam.LevenbergMarquardtOptimizer) and optimizer.lambda_() > params.getlambdaUpperBound()) optimize(optimizer, check_convergence, hook) + return optimizer.values() diff --git a/cython/gtsam/utils/plot.py b/python/gtsam/utils/plot.py similarity index 65% rename from cython/gtsam/utils/plot.py rename to python/gtsam/utils/plot.py index b67444fc1..7f48d03a3 100644 --- a/cython/gtsam/utils/plot.py +++ b/python/gtsam/utils/plot.py @@ -1,9 +1,11 @@ """Various plotting utlities.""" -import numpy as np +# pylint: disable=no-member, invalid-name + import matplotlib.pyplot as plt +import numpy as np from matplotlib import patches -from mpl_toolkits.mplot3d import Axes3D +from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import import gtsam @@ -34,18 +36,15 @@ def set_axes_equal(fignum): ax.set_zlim3d([origin[2] - radius, origin[2] + radius]) -def ellipsoid(xc, yc, zc, rx, ry, rz, n): +def ellipsoid(rx, ry, rz, n): """ Numpy equivalent of Matlab's ellipsoid function. Args: - xc (double): Center of ellipsoid in X-axis. - yc (double): Center of ellipsoid in Y-axis. - zc (double): Center of ellipsoid in Z-axis. rx (double): Radius of ellipsoid in X-axis. ry (double): Radius of ellipsoid in Y-axis. rz (double): Radius of ellipsoid in Z-axis. - n (int): The granularity of the ellipsoid plotted. + n (int): The granularity of the ellipsoid plotted. Returns: tuple[numpy.ndarray]: The points in the x, y and z axes to use for the surface plot. @@ -70,7 +69,8 @@ def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5): Args: axes (matplotlib.axes.Axes): Matplotlib axes. origin (gtsam.Point3): The origin in the world frame. - P (numpy.ndarray): The marginal covariance matrix of the 3D point which will be represented as an ellipse. + P (numpy.ndarray): The marginal covariance matrix of the 3D point + which will be represented as an ellipse. scale (float): Scaling factor of the radii of the covariance ellipse. n (int): Defines the granularity of the ellipse. Higher values indicate finer ellipses. alpha (float): Transparency value for the plotted surface in the range [0, 1]. @@ -83,7 +83,7 @@ def plot_covariance_ellipse_3d(axes, origin, P, scale=1, n=8, alpha=0.5): rx, ry, rz = radii # generate data for "unrotated" ellipsoid - xc, yc, zc = ellipsoid(0, 0, 0, rx, ry, rz, n) + xc, yc, zc = ellipsoid(rx, ry, rz, n) # rotate data with orientation matrix U and center c data = np.kron(U[:, 0:1], xc) + np.kron(U[:, 1:2], yc) + \ @@ -104,12 +104,13 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): axes (matplotlib.axes.Axes): Matplotlib axes. pose (gtsam.Pose2): The pose to be plotted. axis_length (float): The length of the camera axes. - covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + covariance (numpy.ndarray): Marginal covariance matrix to plot + the uncertainty of the estimation. """ # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global t = pose.translation() - origin = np.array([t.x(), t.y()]) + origin = t # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -135,7 +136,8 @@ def plot_pose2_on_axes(axes, pose, axis_length=0.1, covariance=None): axes.add_patch(e1) -def plot_pose2(fignum, pose, axis_length=0.1, covariance=None): +def plot_pose2(fignum, pose, axis_length=0.1, covariance=None, + axis_labels=('X axis', 'Y axis', 'Z axis')): """ Plot a 2D pose on given figure with given `axis_length`. @@ -143,7 +145,9 @@ def plot_pose2(fignum, pose, axis_length=0.1, covariance=None): fignum (int): Integer representing the figure number to use for plotting. pose (gtsam.Pose2): The pose to be plotted. axis_length (float): The length of the camera axes. - covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + covariance (numpy.ndarray): Marginal covariance matrix to plot + the uncertainty of the estimation. + axis_labels (iterable[string]): List of axis labels to set. """ # get figure object fig = plt.figure(fignum) @@ -151,6 +155,11 @@ def plot_pose2(fignum, pose, axis_length=0.1, covariance=None): plot_pose2_on_axes(axes, pose, axis_length=axis_length, covariance=covariance) + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + + return fig + def plot_point3_on_axes(axes, point, linespec, P=None): """ @@ -162,12 +171,13 @@ def plot_point3_on_axes(axes, point, linespec, P=None): linespec (string): String representing formatting options for Matplotlib. P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. """ - axes.plot([point.x()], [point.y()], [point.z()], linespec) + axes.plot([point[0]], [point[1]], [point[2]], linespec) if P is not None: - plot_covariance_ellipse_3d(axes, point.vector(), P) + plot_covariance_ellipse_3d(axes, point, P) -def plot_point3(fignum, point, linespec, P=None): +def plot_point3(fignum, point, linespec, P=None, + axis_labels=('X axis', 'Y axis', 'Z axis')): """ Plot a 3D point on given figure with given `linespec`. @@ -176,13 +186,25 @@ def plot_point3(fignum, point, linespec, P=None): point (gtsam.Point3): The point to be plotted. linespec (string): String representing formatting options for Matplotlib. P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + axis_labels (iterable[string]): List of axis labels to set. + + Returns: + fig: The matplotlib figure. + """ fig = plt.figure(fignum) axes = fig.gca(projection='3d') plot_point3_on_axes(axes, point, linespec, P) + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + axes.set_zlabel(axis_labels[2]) + + return fig -def plot_3d_points(fignum, values, linespec="g*", marginals=None): + +def plot_3d_points(fignum, values, linespec="g*", marginals=None, + title="3D Points", axis_labels=('X axis', 'Y axis', 'Z axis')): """ Plots the Point3s in `values`, with optional covariances. Finds all the Point3 objects in the given Values object and plots them. @@ -193,27 +215,34 @@ def plot_3d_points(fignum, values, linespec="g*", marginals=None): fignum (int): Integer representing the figure number to use for plotting. values (gtsam.Values): Values dictionary consisting of points to be plotted. linespec (string): String representing formatting options for Matplotlib. - covariance (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + marginals (numpy.ndarray): Marginal covariance matrix to plot the + uncertainty of the estimation. + title (string): The title of the plot. + axis_labels (iterable[string]): List of axis labels to set. """ keys = values.keys() # Plot points and covariance matrices - for i in range(keys.size()): + for key in keys: try: - key = keys.at(i) point = values.atPoint3(key) if marginals is not None: covariance = marginals.marginalCovariance(key) else: covariance = None - plot_point3(fignum, point, linespec, covariance) + fig = plot_point3(fignum, point, linespec, covariance, + axis_labels=axis_labels) except RuntimeError: continue # I guess it's not a Point3 + fig = plt.figure(fignum) + fig.suptitle(title) + fig.canvas.set_window_title(title.lower()) + def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): """ @@ -227,7 +256,7 @@ def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): """ # get rotation and translation (center) gRp = pose.rotation().matrix() # rotation from pose to global - origin = pose.translation().vector() + origin = pose.translation() # draw the camera axes x_axis = origin + gRp[:, 0] * axis_length @@ -251,7 +280,8 @@ def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1): plot_covariance_ellipse_3d(axes, origin, gPp) -def plot_pose3(fignum, pose, axis_length=0.1, P=None): +def plot_pose3(fignum, pose, axis_length=0.1, P=None, + axis_labels=('X axis', 'Y axis', 'Z axis')): """ Plot a 3D pose on given figure with given `axis_length`. @@ -260,6 +290,10 @@ def plot_pose3(fignum, pose, axis_length=0.1, P=None): pose (gtsam.Pose3): 3D pose to be plotted. linespec (string): String representing formatting options for Matplotlib. P (numpy.ndarray): Marginal covariance matrix to plot the uncertainty of the estimation. + axis_labels (iterable[string]): List of axis labels to set. + + Returns: + fig: The matplotlib figure. """ # get figure object fig = plt.figure(fignum) @@ -267,59 +301,94 @@ def plot_pose3(fignum, pose, axis_length=0.1, P=None): plot_pose3_on_axes(axes, pose, P=P, axis_length=axis_length) + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + axes.set_zlabel(axis_labels[2]) -def plot_trajectory(fignum, values, scale=1, marginals=None): + return fig + + +def plot_trajectory(fignum, values, scale=1, marginals=None, + title="Plot Trajectory", axis_labels=('X axis', 'Y axis', 'Z axis')): """ - Plot a complete 3D trajectory using poses in `values`. + Plot a complete 2D/3D trajectory using poses in `values`. Args: fignum (int): Integer representing the figure number to use for plotting. - values (gtsam.Values): Values dict containing the poses. + values (gtsam.Values): Values containing some Pose2 and/or Pose3 values. scale (float): Value to scale the poses by. marginals (gtsam.Marginals): Marginalized probability values of the estimation. Used to plot uncertainty bounds. + title (string): The title of the plot. + axis_labels (iterable[string]): List of axis labels to set. + """ + fig = plt.figure(fignum) + axes = fig.gca(projection='3d') + + axes.set_xlabel(axis_labels[0]) + axes.set_ylabel(axis_labels[1]) + axes.set_zlabel(axis_labels[2]) + + # Plot 2D poses, if any + poses = gtsam.utilities.allPose2s(values) + for key in poses.keys(): + pose = poses.atPose2(key) + if marginals: + covariance = marginals.marginalCovariance(key) + else: + covariance = None + + plot_pose2_on_axes(axes, pose, covariance=covariance, + axis_length=scale) + + # Then 3D poses, if any + poses = gtsam.utilities.allPose3s(values) + for key in poses.keys(): + pose = poses.atPose3(key) + if marginals: + covariance = marginals.marginalCovariance(key) + else: + covariance = None + + plot_pose3_on_axes(axes, pose, P=covariance, + axis_length=scale) + + fig.suptitle(title) + fig.canvas.set_window_title(title.lower()) + + +def plot_incremental_trajectory(fignum, values, start=0, + scale=1, marginals=None, + time_interval=0.0): """ - pose3Values = gtsam.utilities_allPose3s(values) - keys = gtsam.KeyVector(pose3Values.keys()) - lastIndex = None + Incrementally plot a complete 3D trajectory using poses in `values`. - for i in range(keys.size()): - key = keys.at(i) - try: - pose = pose3Values.atPose3(key) - except: - print("Warning: no Pose3 at key: {0}".format(key)) - - if lastIndex is not None: - lastKey = keys.at(lastIndex) - try: - lastPose = pose3Values.atPose3(lastKey) - except: - print("Warning: no Pose3 at key: {0}".format(lastKey)) - pass - - if marginals: - covariance = marginals.marginalCovariance(lastKey) - else: - covariance = None + Args: + fignum (int): Integer representing the figure number to use for plotting. + values (gtsam.Values): Values dict containing the poses. + start (int): Starting index to start plotting from. + scale (float): Value to scale the poses by. + marginals (gtsam.Marginals): Marginalized probability values of the estimation. + Used to plot uncertainty bounds. + time_interval (float): Time in seconds to pause between each rendering. + Used to create animation effect. + """ + fig = plt.figure(fignum) + axes = fig.gca(projection='3d') - plot_pose3(fignum, lastPose, P=covariance, - axis_length=scale) + poses = gtsam.utilities.allPose3s(values) + keys = gtsam.KeyVector(poses.keys()) - lastIndex = i + for key in keys[start:]: + if values.exists(key): + pose_i = values.atPose3(key) + plot_pose3(fignum, pose_i, scale) - # Draw final pose - if lastIndex is not None: - lastKey = keys.at(lastIndex) - try: - lastPose = pose3Values.atPose3(lastKey) - if marginals: - covariance = marginals.marginalCovariance(lastKey) - else: - covariance = None + # Update the plot space to encompass all plotted points + axes.autoscale() - plot_pose3(fignum, lastPose, P=covariance, - axis_length=scale) + # Set the 3 axes equal + set_axes_equal(fignum) - except: - pass + # Pause for a fixed amount of seconds + plt.pause(time_interval) diff --git a/cython/gtsam/utils/test_case.py b/python/gtsam/utils/test_case.py similarity index 80% rename from cython/gtsam/utils/test_case.py rename to python/gtsam/utils/test_case.py index 7df1e6ee9..3effd7f65 100644 --- a/cython/gtsam/utils/test_case.py +++ b/python/gtsam/utils/test_case.py @@ -21,7 +21,11 @@ def gtsamAssertEquals(self, actual, expected, tol=1e-9): Keyword Arguments: tol {float} -- tolerance passed to 'equals', default 1e-9 """ - equal = actual.equals(expected, tol) + import numpy + if isinstance(expected, numpy.ndarray): + equal = numpy.allclose(actual, expected, atol=tol) + else: + equal = actual.equals(expected, tol) if not equal: raise self.failureException( "Values are not equal:\n{}!={}".format(actual, expected)) diff --git a/cython/gtsam/utils/visual_data_generator.py b/python/gtsam/utils/visual_data_generator.py similarity index 79% rename from cython/gtsam/utils/visual_data_generator.py rename to python/gtsam/utils/visual_data_generator.py index f04588e70..32ccbc8fa 100644 --- a/cython/gtsam/utils/visual_data_generator.py +++ b/python/gtsam/utils/visual_data_generator.py @@ -1,9 +1,10 @@ from __future__ import print_function import numpy as np - +import math +from math import pi import gtsam -from gtsam import Cal3_S2, PinholeCameraCal3_S2, Point2, Point3, Pose3 +from gtsam import Point3, Pose3, PinholeCameraCal3_S2, Cal3_S2 class Options: @@ -31,7 +32,7 @@ class GroundTruth: def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K self.cameras = [Pose3()] * nrCameras - self.points = [Point3()] * nrPoints + self.points = [Point3(0, 0, 0)] * nrPoints def print_(self, s=""): print(s) @@ -55,20 +56,20 @@ class NoiseModels: def __init__(self, K=Cal3_S2(), nrCameras=3, nrPoints=4): self.K = K - self.Z = [x[:] for x in [[Point2()] * nrPoints] * nrCameras] + self.Z = [x[:] for x in [[gtsam.Point2()] * nrPoints] * nrCameras] self.J = [x[:] for x in [[0] * nrPoints] * nrCameras] self.odometry = [Pose3()] * nrCameras # Set Noise parameters self.noiseModels = Data.NoiseModels() - self.noiseModels.posePrior = gtsam.noiseModel_Diagonal.Sigmas( + self.noiseModels.posePrior = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.001, 0.001, 0.001, 0.1, 0.1, 0.1])) - # noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas( + # noiseModels.odometry = gtsam.noiseModel.Diagonal.Sigmas( # np.array([0.001,0.001,0.001,0.1,0.1,0.1])) - self.noiseModels.odometry = gtsam.noiseModel_Diagonal.Sigmas( + self.noiseModels.odometry = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05, 0.2, 0.2, 0.2])) - self.noiseModels.pointPrior = gtsam.noiseModel_Isotropic.Sigma(3, 0.1) - self.noiseModels.measurement = gtsam.noiseModel_Isotropic.Sigma(2, 1.0) + self.noiseModels.pointPrior = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) + self.noiseModels.measurement = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) def generate_data(options): @@ -84,8 +85,8 @@ def generate_data(options): if options.triangle: # Create a triangle target, just 3 points on a plane r = 10 for j in range(len(truth.points)): - theta = j * 2 * np.pi / nrPoints - truth.points[j] = Point3(r * np.cos(theta), r * np.sin(theta), 0) + theta = j * 2 * pi / nrPoints + truth.points[j] = Point3(r * math.cos(theta), r * math.sin(theta), 0) else: # 3D landmarks as vertices of a cube truth.points = [ Point3(10, 10, 10), Point3(-10, 10, 10), @@ -98,10 +99,10 @@ def generate_data(options): height = 10 r = 40 for i in range(options.nrCameras): - theta = i * 2 * np.pi / options.nrCameras - t = Point3(r * np.cos(theta), r * np.sin(theta), height) + theta = i * 2 * pi / options.nrCameras + t = Point3(r * math.cos(theta), r * math.sin(theta), height) truth.cameras[i] = PinholeCameraCal3_S2.Lookat(t, - Point3(), + Point3(0, 0, 0), Point3(0, 0, 1), truth.K) # Create measurements diff --git a/cython/gtsam/utils/visual_isam.py b/python/gtsam/utils/visual_isam.py similarity index 88% rename from cython/gtsam/utils/visual_isam.py rename to python/gtsam/utils/visual_isam.py index b0ebe68c3..a8fed4b23 100644 --- a/cython/gtsam/utils/visual_isam.py +++ b/python/gtsam/utils/visual_isam.py @@ -25,7 +25,7 @@ def initialize(data, truth, options): newFactors = gtsam.NonlinearFactorGraph() initialEstimates = gtsam.Values() for i in range(2): - ii = symbol(ord('x'), i) + ii = symbol('x', i) if i == 0: if options.hardConstraint: # add hard constraint newFactors.add( @@ -41,10 +41,10 @@ def initialize(data, truth, options): # Add visual measurement factors from two first poses and initialize # observed landmarks for i in range(2): - ii = symbol(ord('x'), i) + ii = symbol('x', i) for k in range(len(data.Z[i])): j = data.J[i][k] - jj = symbol(ord('l'), j) + jj = symbol('l', j) newFactors.add( gtsam.GenericProjectionFactorCal3_S2(data.Z[i][ k], data.noiseModels.measurement, ii, jj, data.K)) @@ -59,8 +59,8 @@ def initialize(data, truth, options): # Add odometry between frames 0 and 1 newFactors.add( gtsam.BetweenFactorPose3( - symbol(ord('x'), 0), - symbol(ord('x'), 1), data.odometry[1], data.noiseModels.odometry)) + symbol('x', 0), + symbol('x', 1), data.odometry[1], data.noiseModels.odometry)) # Update ISAM if options.batchInitialization: # Do a full optimize for first two poses @@ -98,28 +98,28 @@ def step(data, isam, result, truth, currPoseIndex): odometry = data.odometry[prevPoseIndex] newFactors.add( gtsam.BetweenFactorPose3( - symbol(ord('x'), prevPoseIndex), - symbol(ord('x'), currPoseIndex), odometry, + symbol('x', prevPoseIndex), + symbol('x', currPoseIndex), odometry, data.noiseModels.odometry)) # Add visual measurement factors and initializations as necessary for k in range(len(data.Z[currPoseIndex])): zij = data.Z[currPoseIndex][k] j = data.J[currPoseIndex][k] - jj = symbol(ord('l'), j) + jj = symbol('l', j) newFactors.add( gtsam.GenericProjectionFactorCal3_S2( zij, data.noiseModels.measurement, - symbol(ord('x'), currPoseIndex), jj, data.K)) + symbol('x', currPoseIndex), jj, data.K)) # TODO: initialize with something other than truth if not result.exists(jj) and not initialEstimates.exists(jj): lmInit = truth.points[j] initialEstimates.insert(jj, lmInit) # Initial estimates for the new pose. - prevPose = result.atPose3(symbol(ord('x'), prevPoseIndex)) + prevPose = result.atPose3(symbol('x', prevPoseIndex)) initialEstimates.insert( - symbol(ord('x'), currPoseIndex), prevPose.compose(odometry)) + symbol('x', currPoseIndex), prevPose.compose(odometry)) # Update ISAM # figure(1)tic diff --git a/cython/gtsam_unstable/__init__.py b/python/gtsam_unstable/__init__.py similarity index 100% rename from cython/gtsam_unstable/__init__.py rename to python/gtsam_unstable/__init__.py diff --git a/cython/gtsam_unstable/examples/FixedLagSmootherExample.py b/python/gtsam_unstable/examples/FixedLagSmootherExample.py similarity index 84% rename from cython/gtsam_unstable/examples/FixedLagSmootherExample.py rename to python/gtsam_unstable/examples/FixedLagSmootherExample.py index 786701e0f..7d2cea8ae 100644 --- a/cython/gtsam_unstable/examples/FixedLagSmootherExample.py +++ b/python/gtsam_unstable/examples/FixedLagSmootherExample.py @@ -16,16 +16,6 @@ import gtsam import gtsam_unstable - -def _timestamp_key_value(key, value): - """ - - """ - return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue( - key, value - ) - - def BatchFixedLagSmootherExample(): """ Runs a batch fixed smoother on an agent with two odometry @@ -45,21 +35,21 @@ def BatchFixedLagSmootherExample(): # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) - prior_noise = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) + prior_noise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1])) X1 = 0 new_factors.push_back(gtsam.PriorFactorPose2(X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert(_timestamp_key_value(X1, 0.0)) + new_timestamps.insert((X1, 0.0)) delta_time = 0.25 time = 0.25 while time <= 3.0: - previous_key = 1000 * (time - delta_time) - current_key = 1000 * time + previous_key = int(1000 * (time - delta_time)) + current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert(_timestamp_key_value(current_key, time)) + new_timestamps.insert((current_key, time)) # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * 2[m/s] @@ -69,14 +59,14 @@ def BatchFixedLagSmootherExample(): # Add odometry factors from two different sources with different error # stats odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02) - odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.05])) new_factors.push_back(gtsam.BetweenFactorPose2( previous_key, current_key, odometry_measurement_1, odometry_noise_1 )) odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01) - odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05])) new_factors.push_back(gtsam.BetweenFactorPose2( previous_key, current_key, odometry_measurement_2, odometry_noise_2 diff --git a/cython/gtsam_unstable/examples/TimeOfArrivalExample.py b/python/gtsam_unstable/examples/TimeOfArrivalExample.py similarity index 98% rename from cython/gtsam_unstable/examples/TimeOfArrivalExample.py rename to python/gtsam_unstable/examples/TimeOfArrivalExample.py index 6ba06f0f2..59f008a05 100644 --- a/cython/gtsam_unstable/examples/TimeOfArrivalExample.py +++ b/python/gtsam_unstable/examples/TimeOfArrivalExample.py @@ -12,7 +12,7 @@ # pylint: disable=invalid-name, no-name-in-module from gtsam import (LevenbergMarquardtOptimizer, LevenbergMarquardtParams, - NonlinearFactorGraph, Point3, Values, noiseModel_Isotropic) + NonlinearFactorGraph, Point3, Values, noiseModel) from gtsam_unstable import Event, TimeOfArrival, TOAFactor # units @@ -64,7 +64,7 @@ def create_graph(microphones, simulatedTOA): graph = NonlinearFactorGraph() # Create a noise model for the TOA error - model = noiseModel_Isotropic.Sigma(1, 0.5 * MS) + model = noiseModel.Isotropic.Sigma(1, 0.5 * MS) K = len(microphones) key = 0 diff --git a/cython/gtsam/tests/__init__.py b/python/gtsam_unstable/examples/__init__.py similarity index 100% rename from cython/gtsam/tests/__init__.py rename to python/gtsam_unstable/examples/__init__.py diff --git a/python/gtsam_unstable/gtsam_unstable.tpl b/python/gtsam_unstable/gtsam_unstable.tpl new file mode 100644 index 000000000..1d9dfaa40 --- /dev/null +++ b/python/gtsam_unstable/gtsam_unstable.tpl @@ -0,0 +1,44 @@ +/** + * @file gtsam.cpp + * @brief The auto-generated wrapper C++ source code. + * @author Duy-Nguyen Ta, Fan Jiang, Matthew Sklar + * @date Aug. 18, 2020 + * + * ** THIS FILE IS AUTO-GENERATED, DO NOT MODIFY! ** + */ + +// Include relevant boost libraries required by GTSAM +{include_boost} + +#include +#include +#include +#include "gtsam/base/serialization.h" +#include "gtsam/nonlinear/utilities.h" // for RedirectCout. + +// These are the included headers listed in `gtsam_unstable.i` +{includes} +#include + +{boost_class_export} + +{hoder_type} + +#include "python/gtsam_unstable/preamble.h" + +using namespace std; + +namespace py = pybind11; + +PYBIND11_MODULE({module_name}, m_) {{ + m_.doc() = "pybind11 wrapper of {module_name}"; + + // Note here we need to import the dependent library + py::module::import("gtsam"); + +{wrapped_namespace} + +#include "python/gtsam_unstable/specializations.h" + +}} + diff --git a/cython/gtsam/utils/__init__.py b/python/gtsam_unstable/preamble.h similarity index 100% rename from cython/gtsam/utils/__init__.py rename to python/gtsam_unstable/preamble.h diff --git a/cython/gtsam_unstable/examples/__init__.py b/python/gtsam_unstable/specializations.h similarity index 100% rename from cython/gtsam_unstable/examples/__init__.py rename to python/gtsam_unstable/specializations.h diff --git a/cython/gtsam_unstable/tests/__init__.py b/python/gtsam_unstable/tests/__init__.py similarity index 100% rename from cython/gtsam_unstable/tests/__init__.py rename to python/gtsam_unstable/tests/__init__.py diff --git a/cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py b/python/gtsam_unstable/tests/test_FixedLagSmootherExample.py similarity index 88% rename from cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py rename to python/gtsam_unstable/tests/test_FixedLagSmootherExample.py index 8d3af311f..c1ccd1ea1 100644 --- a/cython/gtsam_unstable/tests/test_FixedLagSmootherExample.py +++ b/python/gtsam_unstable/tests/test_FixedLagSmootherExample.py @@ -16,13 +16,6 @@ import gtsam_unstable from gtsam.utils.test_case import GtsamTestCase - -def _timestamp_key_value(key, value): - return gtsam_unstable.FixedLagSmootherKeyTimestampMapValue( - key, value - ) - - class TestFixedLagSmootherExample(GtsamTestCase): ''' Tests the fixed lag smoother wrapper @@ -47,14 +40,14 @@ def test_FixedLagSmootherExample(self): # Create a prior on the first pose, placing it at the origin prior_mean = gtsam.Pose2(0, 0, 0) - prior_noise = gtsam.noiseModel_Diagonal.Sigmas( + prior_noise = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.3, 0.3, 0.1])) X1 = 0 new_factors.push_back( gtsam.PriorFactorPose2( X1, prior_mean, prior_noise)) new_values.insert(X1, prior_mean) - new_timestamps.insert(_timestamp_key_value(X1, 0.0)) + new_timestamps.insert((X1, 0.0)) delta_time = 0.25 time = 0.25 @@ -80,11 +73,11 @@ def test_FixedLagSmootherExample(self): # and its two odometers measure the change. The smoothed # result is then compared to the ground truth while time <= 3.0: - previous_key = 1000 * (time - delta_time) - current_key = 1000 * time + previous_key = int(1000 * (time - delta_time)) + current_key = int(1000 * time) # assign current key to the current timestamp - new_timestamps.insert(_timestamp_key_value(current_key, time)) + new_timestamps.insert((current_key, time)) # Add a guess for this pose to the new values # Assume that the robot moves at 2 m/s. Position is time[s] * @@ -95,7 +88,7 @@ def test_FixedLagSmootherExample(self): # Add odometry factors from two different sources with different # error stats odometry_measurement_1 = gtsam.Pose2(0.61, -0.08, 0.02) - odometry_noise_1 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_1 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.1, 0.1, 0.05])) new_factors.push_back( gtsam.BetweenFactorPose2( @@ -105,7 +98,7 @@ def test_FixedLagSmootherExample(self): odometry_noise_1)) odometry_measurement_2 = gtsam.Pose2(0.47, 0.03, 0.01) - odometry_noise_2 = gtsam.noiseModel_Diagonal.Sigmas( + odometry_noise_2 = gtsam.noiseModel.Diagonal.Sigmas( np.array([0.05, 0.05, 0.05])) new_factors.push_back( gtsam.BetweenFactorPose2( diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 000000000..481d27d8e --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,2 @@ +numpy>=1.11.0 +pyparsing>=2.4.2 diff --git a/cython/setup.py.in b/python/setup.py.in similarity index 63% rename from cython/setup.py.in rename to python/setup.py.in index df92b564c..9aa4b71f4 100644 --- a/cython/setup.py.in +++ b/python/setup.py.in @@ -1,11 +1,24 @@ -import os -import sys +"""Setup file to install the GTSAM package.""" + try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup, find_packages -packages = find_packages() +packages = find_packages(where=".") +print("PACKAGES: ", packages) + +package_data = { + '': [ + "./*.so", + "./*.dll", + "Data/*" # Add the data files to the package + "Data/**/*" # Add the data files in subdirectories + ] +} + +# Cleaner to read in the contents rather than copy them over. +readme_contents = open("${GTSAM_SOURCE_DIR}/README.md").read() setup( name='gtsam', @@ -16,9 +29,8 @@ setup( author_email='frank.dellaert@gtsam.org', license='Simplified BSD license', keywords='slam sam robotics localization mapping optimization', - long_description='''${README_CONTENTS}''', long_description_content_type='text/markdown', - python_requires='>=2.7', + long_description=readme_contents, # https://pypi.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 5 - Production/Stable', @@ -32,13 +44,10 @@ setup( 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], - packages=packages, - package_data={package: - [f for f in os.listdir(package.replace('.', os.path.sep)) if os.path.splitext(f)[1] in ('.so', '.pyd')] - for package in packages - }, - install_requires=[line.strip() for line in ''' -${CYTHON_INSTALL_REQUIREMENTS} -'''.splitlines() if len(line.strip()) > 0 and not line.strip().startswith('#')] + include_package_data=True, + package_data=package_data, + test_suite="gtsam.tests", + install_requires=open("${GTSAM_SOURCE_DIR}/python/requirements.txt").readlines(), + zip_safe=False, ) diff --git a/tests/simulated2D.h b/tests/simulated2D.h index c4930a55b..ed412bba8 100644 --- a/tests/simulated2D.h +++ b/tests/simulated2D.h @@ -139,14 +139,14 @@ namespace simulated2D { } /// Return error and optional derivative - Vector evaluateError(const Pose& x, boost::optional H = boost::none) const { + Vector evaluateError(const Pose& x, boost::optional H = boost::none) const override { return (prior(x, H) - measured_); } virtual ~GenericPrior() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -185,14 +185,14 @@ namespace simulated2D { /// Evaluate error and optionally return derivatives Vector evaluateError(const Pose& x1, const Pose& x2, boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + boost::optional H2 = boost::none) const override { return (odo(x1, x2, H1, H2) - measured_); } virtual ~GenericOdometry() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -232,14 +232,14 @@ namespace simulated2D { /// Evaluate error and optionally return derivatives Vector evaluateError(const Pose& x1, const Landmark& x2, boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + boost::optional H2 = boost::none) const override { return (mea(x1, x2, H1, H2) - measured_); } virtual ~GenericMeasurement() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } diff --git a/tests/simulated2DConstraints.h b/tests/simulated2DConstraints.h index deb9292f7..cb8c09fc8 100644 --- a/tests/simulated2DConstraints.h +++ b/tests/simulated2DConstraints.h @@ -60,7 +60,7 @@ namespace simulated2D { virtual ~ScalarCoordConstraint1() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -87,8 +87,8 @@ namespace simulated2D { * @param x is the estimate of the constrained variable being evaluated * @param H is an optional Jacobian, linearized at x */ - virtual double value(const Point& x, boost::optional H = - boost::none) const { + double value(const Point& x, boost::optional H = + boost::none) const override { if (H) { Matrix D = Matrix::Zero(1, traits::GetDimension(x)); D(0, IDX) = 1.0; @@ -128,7 +128,7 @@ namespace simulated2D { virtual ~MaxDistanceConstraint() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -150,9 +150,9 @@ namespace simulated2D { * @param H2 is an optional Jacobian in x2 * @return the distance between the variables */ - virtual double value(const Point& x1, const Point& x2, + double value(const Point& x1, const Point& x2, boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + boost::optional H2 = boost::none) const override { if (H1) *H1 = numericalDerivative21(range_trait, x1, x2, 1e-5); if (H1) *H2 = numericalDerivative22(range_trait, x1, x2, 1e-5); return range_trait(x1, x2); @@ -177,7 +177,7 @@ namespace simulated2D { virtual ~MinDistanceConstraint() {} /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } @@ -200,9 +200,9 @@ namespace simulated2D { * @param H2 is an optional Jacobian in x2 * @return the distance between the variables */ - virtual double value(const Pose& x1, const Point& x2, + double value(const Pose& x1, const Point& x2, boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + boost::optional H2 = boost::none) const override { if (H1) *H1 = numericalDerivative21(range_trait, x1, x2, 1e-5); if (H1) *H2 = numericalDerivative22(range_trait, x1, x2, 1e-5); return range_trait(x1, x2); diff --git a/tests/simulated2DOriented.h b/tests/simulated2DOriented.h index 948a87ce5..27932415b 100644 --- a/tests/simulated2DOriented.h +++ b/tests/simulated2DOriented.h @@ -119,12 +119,12 @@ namespace simulated2DOriented { /// Evaluate error and optionally derivative Vector evaluateError(const VALUE& x1, const VALUE& x2, boost::optional H1 = boost::none, - boost::optional H2 = boost::none) const { + boost::optional H2 = boost::none) const override { return measured_.localCoordinates(odo(x1, x2, H1, H2)); } /// @return a deep copy of this factor - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new This(*this))); } diff --git a/tests/simulated3D.h b/tests/simulated3D.h index 3c92e733e..3b4afb106 100644 --- a/tests/simulated3D.h +++ b/tests/simulated3D.h @@ -90,7 +90,7 @@ struct PointPrior3D: public NoiseModelFactor1 { * @return Vector error between prior value and x (Dimension: 3) */ Vector evaluateError(const Point3& x, boost::optional H = - boost::none) const { + boost::none) const override { return prior(x, H) - measured_; } }; @@ -121,7 +121,7 @@ struct Simulated3DMeasurement: public NoiseModelFactor2 { * @return vector error between measurement and prediction (Dimension: 3) */ Vector evaluateError(const Point3& x1, const Point3& x2, - boost::optional H1 = boost::none, boost::optional H2 = boost::none) const { + boost::optional H1 = boost::none, boost::optional H2 = boost::none) const override { return mea(x1, x2, H1, H2) - measured_; } }; diff --git a/tests/smallExample.h b/tests/smallExample.h index 2b29a6d10..944899e70 100644 --- a/tests/smallExample.h +++ b/tests/smallExample.h @@ -337,15 +337,30 @@ struct UnaryFactor: public gtsam::NoiseModelFactor1 { gtsam::NoiseModelFactor1(model, key), z_(z) { } - Vector evaluateError(const Point2& x, boost::optional A = boost::none) const { + Vector evaluateError(const Point2& x, boost::optional A = boost::none) const override { if (A) *A = H(x); return (h(x) - z_); } + gtsam::NonlinearFactor::shared_ptr clone() const override { + return boost::static_pointer_cast( + gtsam::NonlinearFactor::shared_ptr(new UnaryFactor(*this))); } }; } +/* ************************************************************************* */ +inline NonlinearFactorGraph nonlinearFactorGraphWithGivenSigma(const double sigma) { + using symbol_shorthand::X; + using symbol_shorthand::L; + boost::shared_ptr fg(new NonlinearFactorGraph); + Point2 z(1.0, 0.0); + boost::shared_ptr factor( + new smallOptimize::UnaryFactor(z, noiseModel::Isotropic::Sigma(2,sigma), X(1))); + fg->push_back(factor); + return *fg; +} + /* ************************************************************************* */ inline boost::shared_ptr sharedReallyNonlinearFactorGraph() { using symbol_shorthand::X; @@ -363,6 +378,54 @@ inline NonlinearFactorGraph createReallyNonlinearFactorGraph() { return *sharedReallyNonlinearFactorGraph(); } +/* ************************************************************************* */ +inline NonlinearFactorGraph sharedNonRobustFactorGraphWithOutliers() { + using symbol_shorthand::X; + boost::shared_ptr fg(new NonlinearFactorGraph); + Point2 z(0.0, 0.0); + double sigma = 0.1; + + boost::shared_ptr> factor( + new PriorFactor(X(1), z, noiseModel::Isotropic::Sigma(2,sigma))); + // 3 noiseless inliers + fg->push_back(factor); + fg->push_back(factor); + fg->push_back(factor); + + // 1 outlier + Point2 z_out(1.0, 0.0); + boost::shared_ptr> factor_out( + new PriorFactor(X(1), z_out, noiseModel::Isotropic::Sigma(2,sigma))); + fg->push_back(factor_out); + + return *fg; +} + +/* ************************************************************************* */ +inline NonlinearFactorGraph sharedRobustFactorGraphWithOutliers() { + using symbol_shorthand::X; + boost::shared_ptr fg(new NonlinearFactorGraph); + Point2 z(0.0, 0.0); + double sigma = 0.1; + auto gmNoise = noiseModel::Robust::Create( + noiseModel::mEstimator::GemanMcClure::Create(1.0), noiseModel::Isotropic::Sigma(2,sigma)); + boost::shared_ptr> factor( + new PriorFactor(X(1), z, gmNoise)); + // 3 noiseless inliers + fg->push_back(factor); + fg->push_back(factor); + fg->push_back(factor); + + // 1 outlier + Point2 z_out(1.0, 0.0); + boost::shared_ptr> factor_out( + new PriorFactor(X(1), z_out, gmNoise)); + fg->push_back(factor_out); + + return *fg; +} + + /* ************************************************************************* */ inline std::pair createNonlinearSmoother(int T) { using namespace impl; diff --git a/tests/testExpressionFactor.cpp b/tests/testExpressionFactor.cpp index d33c7ba1d..e3e37e7c7 100644 --- a/tests/testExpressionFactor.cpp +++ b/tests/testExpressionFactor.cpp @@ -630,6 +630,103 @@ TEST(ExpressionFactor, MultiplyWithInverseFunction) { EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-5, 1e-5); } + +/* ************************************************************************* */ +// Test N-ary variadic template +class TestNaryFactor + : public gtsam::ExpressionFactorN { +private: + using This = TestNaryFactor; + using Base = + gtsam::ExpressionFactorN; + +public: + /// default constructor + TestNaryFactor() = default; + ~TestNaryFactor() override = default; + + TestNaryFactor(gtsam::Key kR1, gtsam::Key kV1, gtsam::Key kR2, gtsam::Key kV2, + const gtsam::SharedNoiseModel &model, const gtsam::Point3& measured) + : Base({kR1, kV1, kR2, kV2}, model, measured) { + this->initialize(expression({kR1, kV1, kR2, kV2})); + } + + /// @return a deep copy of this factor + gtsam::NonlinearFactor::shared_ptr clone() const override { + return boost::static_pointer_cast( + gtsam::NonlinearFactor::shared_ptr(new This(*this))); + } + + // Return measurement expression + gtsam::Expression expression( + const std::array &keys) const override { + gtsam::Expression R1_(keys[0]); + gtsam::Expression V1_(keys[1]); + gtsam::Expression R2_(keys[2]); + gtsam::Expression V2_(keys[3]); + return {gtsam::rotate(R1_, V1_) - gtsam::rotate(R2_, V2_)}; + } + + /** print */ + void print(const std::string &s, + const gtsam::KeyFormatter &keyFormatter = + gtsam::DefaultKeyFormatter) const override { + std::cout << s << "TestNaryFactor(" + << keyFormatter(Factor::keys_[0]) << "," + << keyFormatter(Factor::keys_[1]) << "," + << keyFormatter(Factor::keys_[2]) << "," + << keyFormatter(Factor::keys_[3]) << ")\n"; + gtsam::traits::Print(measured_, " measured: "); + this->noiseModel_->print(" noise model: "); + } + + /** equals */ + bool equals(const gtsam::NonlinearFactor &expected, + double tol = 1e-9) const override { + const This *e = dynamic_cast(&expected); + return e != nullptr && Base::equals(*e, tol) && + gtsam::traits::Equals(measured_,e->measured_, tol); + } + +private: + /** Serialization function */ + friend class boost::serialization::access; + template + void serialize(ARCHIVE &ar, const unsigned int /*version*/) { + ar &boost::serialization::make_nvp( + "TestNaryFactor", + boost::serialization::base_object(*this)); + ar &BOOST_SERIALIZATION_NVP(measured_); + } +}; + +TEST(ExpressionFactor, variadicTemplate) { + using gtsam::symbol_shorthand::R; + using gtsam::symbol_shorthand::V; + + // Create factor + TestNaryFactor f(R(0),V(0), R(1), V(1), noiseModel::Unit::Create(3), Point3(0,0,0)); + + // Create some values + Values values; + values.insert(R(0), Rot3::Ypr(0.1, 0.2, 0.3)); + values.insert(V(0), Point3(1, 2, 3)); + values.insert(R(1), Rot3::Ypr(0.2, 0.5, 0.2)); + values.insert(V(1), Point3(5, 6, 7)); + + // Check unwhitenedError + std::vector H(4); + Vector actual = f.unwhitenedError(values, H); + EXPECT_LONGS_EQUAL(4, H.size()); + EXPECT(assert_equal(Eigen::Vector3d(-5.63578115, -4.85353243, -1.4801204), actual, 1e-5)); + + EXPECT_CORRECT_FACTOR_JACOBIANS(f, values, 1e-8, 1e-5); +} + + /* ************************************************************************* */ int main() { TestResult tr; diff --git a/tests/testExtendedKalmanFilter.cpp b/tests/testExtendedKalmanFilter.cpp index d759e83e1..0ea507a36 100644 --- a/tests/testExtendedKalmanFilter.cpp +++ b/tests/testExtendedKalmanFilter.cpp @@ -153,14 +153,14 @@ class NonlinearMotionModel : public NoiseModelFactor2 { } /* print */ - virtual void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << ": NonlinearMotionModel\n"; std::cout << " TestKey1: " << keyFormatter(key1()) << std::endl; std::cout << " TestKey2: " << keyFormatter(key2()) << std::endl; } /** Check if two factors are equal. Note type is IndexFactor and needs cast. */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { + bool equals(const NonlinearFactor& f, double tol = 1e-9) const override { const This *e = dynamic_cast (&f); return (e != nullptr) && (key1() == e->key1()) && (key2() == e->key2()); } @@ -169,13 +169,13 @@ class NonlinearMotionModel : public NoiseModelFactor2 { * calculate the error of the factor * Override for systems with unusual noise models */ - virtual double error(const Values& c) const { + double error(const Values& c) const override { Vector w = whitenedError(c); return 0.5 * w.dot(w); } /** get the dimension of the factor (number of rows on linearization) */ - size_t dim() const { + size_t dim() const override { return 2; } @@ -189,7 +189,7 @@ class NonlinearMotionModel : public NoiseModelFactor2 { * Ax-b \approx h(x1+dx1,x2+dx2)-z = h(x1,x2) + A2*dx1 + A2*dx2 - z * Hence b = z - h(x1,x2) = - error_vector(x) */ - boost::shared_ptr linearize(const Values& c) const { + boost::shared_ptr linearize(const Values& c) const override { const X1& x1 = c.at(key1()); const X2& x2 = c.at(key2()); Matrix A1, A2; @@ -212,7 +212,7 @@ class NonlinearMotionModel : public NoiseModelFactor2 { /** vector of errors */ Vector evaluateError(const Point2& p1, const Point2& p2, boost::optional H1 = boost::none, boost::optional H2 = - boost::none) const { + boost::none) const override { // error = p2 - f(p1) // H1 = d error / d p1 = -d f/ d p1 = -F @@ -284,13 +284,13 @@ class NonlinearMeasurementModel : public NoiseModelFactor1 { } /* print */ - virtual void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const { + void print(const std::string& s = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { std::cout << s << ": NonlinearMeasurementModel\n"; std::cout << " TestKey: " << keyFormatter(key()) << std::endl; } /** Check if two factors are equal. Note type is IndexFactor and needs cast. */ - virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const { + bool equals(const NonlinearFactor& f, double tol = 1e-9) const override { const This *e = dynamic_cast (&f); return (e != nullptr) && Base::equals(f); } @@ -299,13 +299,13 @@ class NonlinearMeasurementModel : public NoiseModelFactor1 { * calculate the error of the factor * Override for systems with unusual noise models */ - virtual double error(const Values& c) const { + double error(const Values& c) const override { Vector w = whitenedError(c); return 0.5 * w.dot(w); } /** get the dimension of the factor (number of rows on linearization) */ - size_t dim() const { + size_t dim() const override { return 1; } @@ -319,7 +319,7 @@ class NonlinearMeasurementModel : public NoiseModelFactor1 { * Ax-b \approx h(x1+dx1)-z = h(x1) + A1*dx1 - z * Hence b = z - h(x1) = - error_vector(x) */ - boost::shared_ptr linearize(const Values& c) const { + boost::shared_ptr linearize(const Values& c) const override { const X& x1 = c.at(key()); Matrix A1; Vector b = -evaluateError(x1, A1); @@ -336,7 +336,7 @@ class NonlinearMeasurementModel : public NoiseModelFactor1 { } /** vector of errors */ - Vector evaluateError(const Point2& p, boost::optional H1 = boost::none) const { + Vector evaluateError(const Point2& p, boost::optional H1 = boost::none) const override { // error = z - h(p) // H = d error / d p = -d h/ d p = -H Vector z_hat = h(p); diff --git a/tests/testGncOptimizer.cpp b/tests/testGncOptimizer.cpp new file mode 100644 index 000000000..738c77936 --- /dev/null +++ b/tests/testGncOptimizer.cpp @@ -0,0 +1,640 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testGncOptimizer.cpp + * @brief Unit tests for GncOptimizer class + * @author Jingnan Shi + * @author Luca Carlone + * @author Frank Dellaert + * + * Implementation of the paper: Yang, Antonante, Tzoumas, Carlone, "Graduated + * Non-Convexity for Robust Spatial Perception: From Non-Minimal Solvers to + * Global Outlier Rejection", ICRA/RAL, 2020. (arxiv version: + * https://arxiv.org/pdf/1909.08605.pdf) + * + * See also: + * Antonante, Tzoumas, Yang, Carlone, "Outlier-Robust Estimation: Hardness, + * Minimally-Tuned Algorithms, and Applications", arxiv: + * https://arxiv.org/pdf/2007.15109.pdf, 2020. + */ + +#include +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +using symbol_shorthand::L; +using symbol_shorthand::X; +static double tol = 1e-7; + +/* ************************************************************************* */ +TEST(GncOptimizer, gncParamsConstructor) { + // check params are correctly parsed + LevenbergMarquardtParams lmParams; + GncParams gncParams1(lmParams); + CHECK(lmParams.equals(gncParams1.baseOptimizerParams)); + + // check also default constructor + GncParams gncParams1b; + CHECK(lmParams.equals(gncParams1b.baseOptimizerParams)); + + // and check params become different if we change lmParams + lmParams.setVerbosity("DELTA"); + CHECK(!lmParams.equals(gncParams1.baseOptimizerParams)); + + // and same for GN + GaussNewtonParams gnParams; + GncParams gncParams2(gnParams); + CHECK(gnParams.equals(gncParams2.baseOptimizerParams)); + + // check default constructor + GncParams gncParams2b; + CHECK(gnParams.equals(gncParams2b.baseOptimizerParams)); + + // change something at the gncParams level + GncParams gncParams2c(gncParams2b); + gncParams2c.setLossType(GncLossType::GM); + CHECK(!gncParams2c.equals(gncParams2b.baseOptimizerParams)); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, gncConstructor) { + // has to have Gaussian noise models ! + auto fg = example::createReallyNonlinearFactorGraph(); // just a unary factor + // on a 2D point + + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + GncParams gncParams; + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + CHECK(gnc.getFactors().equals(fg)); + CHECK(gnc.getState().equals(initial)); + CHECK(gnc.getParams().equals(gncParams)); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, gncConstructorWithRobustGraphAsInput) { + auto fg = example::sharedNonRobustFactorGraphWithOutliers(); + // same graph with robust noise model + auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); + + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + GncParams gncParams; + auto gnc = GncOptimizer>(fg_robust, + initial, + gncParams); + + // make sure that when parsing the graph is transformed into one without + // robust loss + CHECK(fg.equals(gnc.getFactors())); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, initializeMu) { + auto fg = example::createReallyNonlinearFactorGraph(); + + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + // testing GM mu initialization + GncParams gncParams; + gncParams.setLossType(GncLossType::GM); + auto gnc_gm = GncOptimizer>(fg, initial, + gncParams); + // according to rmk 5 in the gnc paper: m0 = 2 rmax^2 / barcSq + // (barcSq=1 in this example) + EXPECT_DOUBLES_EQUAL(gnc_gm.initializeMu(), 2 * 198.999, 1e-3); + + // testing TLS mu initialization + gncParams.setLossType(GncLossType::TLS); + auto gnc_tls = GncOptimizer>(fg, initial, + gncParams); + // according to rmk 5 in the gnc paper: m0 = barcSq / (2 * rmax^2 - barcSq) + // (barcSq=1 in this example) + EXPECT_DOUBLES_EQUAL(gnc_tls.initializeMu(), 1 / (2 * 198.999 - 1), 1e-3); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, updateMuGM) { + // has to have Gaussian noise models ! + auto fg = example::createReallyNonlinearFactorGraph(); + + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + GncParams gncParams; + gncParams.setLossType(GncLossType::GM); + gncParams.setMuStep(1.4); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + double mu = 5.0; + EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu / 1.4, tol); + + // check it correctly saturates to 1 for GM + mu = 1.2; + EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), 1.0, tol); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, updateMuTLS) { + // has to have Gaussian noise models ! + auto fg = example::createReallyNonlinearFactorGraph(); + + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + GncParams gncParams; + gncParams.setMuStep(1.4); + gncParams.setLossType(GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + double mu = 5.0; + EXPECT_DOUBLES_EQUAL(gnc.updateMu(mu), mu * 1.4, tol); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, checkMuConvergence) { + // has to have Gaussian noise models ! + auto fg = example::createReallyNonlinearFactorGraph(); + + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + { + GncParams gncParams; + gncParams.setLossType(GncLossType::GM); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + double mu = 1.0; + CHECK(gnc.checkMuConvergence(mu)); + } + { + GncParams gncParams; + gncParams.setLossType( + GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + double mu = 1.0; + CHECK(!gnc.checkMuConvergence(mu)); //always false for TLS + } +} + +/* ************************************************************************* */ +TEST(GncOptimizer, checkCostConvergence) { + // has to have Gaussian noise models ! + auto fg = example::createReallyNonlinearFactorGraph(); + + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + { + GncParams gncParams; + gncParams.setRelativeCostTol(0.49); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + double prev_cost = 1.0; + double cost = 0.5; + // relative cost reduction = 0.5 > 0.49, hence checkCostConvergence = false + CHECK(!gnc.checkCostConvergence(cost, prev_cost)); + } + { + GncParams gncParams; + gncParams.setRelativeCostTol(0.51); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + double prev_cost = 1.0; + double cost = 0.5; + // relative cost reduction = 0.5 < 0.51, hence checkCostConvergence = true + CHECK(gnc.checkCostConvergence(cost, prev_cost)); + } +} + +/* ************************************************************************* */ +TEST(GncOptimizer, checkWeightsConvergence) { + // has to have Gaussian noise models ! + auto fg = example::createReallyNonlinearFactorGraph(); + + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + { + GncParams gncParams; + gncParams.setLossType(GncLossType::GM); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + Vector weights = Vector::Ones(fg.size()); + CHECK(!gnc.checkWeightsConvergence(weights)); //always false for GM + } + { + GncParams gncParams; + gncParams.setLossType( + GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + Vector weights = Vector::Ones(fg.size()); + // weights are binary, so checkWeightsConvergence = true + CHECK(gnc.checkWeightsConvergence(weights)); + } + { + GncParams gncParams; + gncParams.setLossType( + GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + Vector weights = Vector::Ones(fg.size()); + weights[0] = 0.9; // more than weightsTol = 1e-4 from 1, hence checkWeightsConvergence = false + CHECK(!gnc.checkWeightsConvergence(weights)); + } + { + GncParams gncParams; + gncParams.setLossType( + GncLossType::TLS); + gncParams.setWeightsTol(0.1); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + Vector weights = Vector::Ones(fg.size()); + weights[0] = 0.9; // exactly weightsTol = 0.1 from 1, hence checkWeightsConvergence = true + CHECK(gnc.checkWeightsConvergence(weights)); + } +} + +/* ************************************************************************* */ +TEST(GncOptimizer, checkConvergenceTLS) { + // has to have Gaussian noise models ! + auto fg = example::createReallyNonlinearFactorGraph(); + + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + GncParams gncParams; + gncParams.setRelativeCostTol(1e-5); + gncParams.setLossType(GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + CHECK(gnc.checkCostConvergence(1.0, 1.0)); + CHECK(!gnc.checkCostConvergence(1.0, 2.0)); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, calculateWeightsGM) { + auto fg = example::sharedNonRobustFactorGraphWithOutliers(); + + Point2 p0(0, 0); + Values initial; + initial.insert(X(1), p0); + + // we have 4 factors, 3 with zero errors (inliers), 1 with error 50 = 0.5 * + // 1/sigma^2 || [1;0] - [0;0] ||^2 (outlier) + Vector weights_expected = Vector::Zero(4); + weights_expected[0] = 1.0; // zero error + weights_expected[1] = 1.0; // zero error + weights_expected[2] = 1.0; // zero error + weights_expected[3] = std::pow(1.0 / (50.0 + 1.0), 2); // outlier, error = 50 + + GaussNewtonParams gnParams; + GncParams gncParams(gnParams); + gncParams.setLossType(GncLossType::GM); + auto gnc = GncOptimizer>(fg, initial, gncParams); + double mu = 1.0; + Vector weights_actual = gnc.calculateWeights(initial, mu); + CHECK(assert_equal(weights_expected, weights_actual, tol)); + + mu = 2.0; + double barcSq = 5.0; + weights_expected[3] = std::pow(mu * barcSq / (50.0 + mu * barcSq), 2); // outlier, error = 50 + gncParams.setInlierCostThreshold(barcSq); + auto gnc2 = GncOptimizer>(fg, initial, + gncParams); + weights_actual = gnc2.calculateWeights(initial, mu); + CHECK(assert_equal(weights_expected, weights_actual, tol)); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, calculateWeightsTLS) { + auto fg = example::sharedNonRobustFactorGraphWithOutliers(); + + Point2 p0(0, 0); + Values initial; + initial.insert(X(1), p0); + + // we have 4 factors, 3 with zero errors (inliers), 1 with error + Vector weights_expected = Vector::Zero(4); + weights_expected[0] = 1.0; // zero error + weights_expected[1] = 1.0; // zero error + weights_expected[2] = 1.0; // zero error + weights_expected[3] = 0; // outliers + + GaussNewtonParams gnParams; + GncParams gncParams(gnParams); + gncParams.setLossType(GncLossType::TLS); + auto gnc = GncOptimizer>(fg, initial, gncParams); + double mu = 1.0; + Vector weights_actual = gnc.calculateWeights(initial, mu); + CHECK(assert_equal(weights_expected, weights_actual, tol)); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, calculateWeightsTLS2) { + + // create values + Point2 x_val(0.0, 0.0); + Point2 x_prior(1.0, 0.0); + Values initial; + initial.insert(X(1), x_val); + + // create very simple factor graph with a single factor 0.5 * 1/sigma^2 * || x - [1;0] ||^2 + double sigma = 1; + SharedDiagonal noise = noiseModel::Diagonal::Sigmas(Vector2(sigma, sigma)); + NonlinearFactorGraph nfg; + nfg.add(PriorFactor(X(1), x_prior, noise)); + + // cost of the factor: + DOUBLES_EQUAL(0.5 * 1 / (sigma * sigma), nfg.error(initial), tol); + + // check the TLS weights are correct: CASE 1: residual below barcsq + { + // expected: + Vector weights_expected = Vector::Zero(1); + weights_expected[0] = 1.0; // inlier + // actual: + GaussNewtonParams gnParams; + GncParams gncParams(gnParams); + gncParams.setLossType(GncLossType::TLS); + gncParams.setInlierCostThreshold(0.51); // if inlier threshold is slightly larger than 0.5, then measurement is inlier + auto gnc = GncOptimizer>(nfg, initial, + gncParams); + double mu = 1e6; + Vector weights_actual = gnc.calculateWeights(initial, mu); + CHECK(assert_equal(weights_expected, weights_actual, tol)); + } + // check the TLS weights are correct: CASE 2: residual above barcsq + { + // expected: + Vector weights_expected = Vector::Zero(1); + weights_expected[0] = 0.0; // outlier + // actual: + GaussNewtonParams gnParams; + GncParams gncParams(gnParams); + gncParams.setLossType(GncLossType::TLS); + gncParams.setInlierCostThreshold(0.49); // if inlier threshold is slightly below 0.5, then measurement is outlier + auto gnc = GncOptimizer>(nfg, initial, + gncParams); + double mu = 1e6; // very large mu recovers original TLS cost + Vector weights_actual = gnc.calculateWeights(initial, mu); + CHECK(assert_equal(weights_expected, weights_actual, tol)); + } + // check the TLS weights are correct: CASE 2: residual at barcsq + { + // expected: + Vector weights_expected = Vector::Zero(1); + weights_expected[0] = 0.5; // undecided + // actual: + GaussNewtonParams gnParams; + GncParams gncParams(gnParams); + gncParams.setLossType(GncLossType::TLS); + gncParams.setInlierCostThreshold(0.5); // if inlier threshold is slightly below 0.5, then measurement is outlier + auto gnc = GncOptimizer>(nfg, initial, + gncParams); + double mu = 1e6; // very large mu recovers original TLS cost + Vector weights_actual = gnc.calculateWeights(initial, mu); + CHECK(assert_equal(weights_expected, weights_actual, 1e-5)); + } +} + +/* ************************************************************************* */ +TEST(GncOptimizer, makeWeightedGraph) { + // create original factor + double sigma1 = 0.1; + NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma( + sigma1); + + // create expected + double sigma2 = 10; + NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma( + sigma2); + + // create weights + Vector weights = Vector::Ones(1); // original info:1/0.1^2 = 100. New info: 1/10^2 = 0.01. Ratio is 10-4 + weights[0] = 1e-4; + + // create actual + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + LevenbergMarquardtParams lmParams; + GncParams gncParams(lmParams); + auto gnc = GncOptimizer>(nfg, initial, + gncParams); + NonlinearFactorGraph actual = gnc.makeWeightedGraph(weights); + + // check it's all good + CHECK(assert_equal(expected, actual)); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, optimizeSimple) { + auto fg = example::createReallyNonlinearFactorGraph(); + + Point2 p0(3, 3); + Values initial; + initial.insert(X(1), p0); + + LevenbergMarquardtParams lmParams; + GncParams gncParams(lmParams); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + Values actual = gnc.optimize(); + DOUBLES_EQUAL(0, fg.error(actual), tol); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, optimize) { + auto fg = example::sharedNonRobustFactorGraphWithOutliers(); + + Point2 p0(1, 0); + Values initial; + initial.insert(X(1), p0); + + // try with nonrobust cost function and standard GN + GaussNewtonParams gnParams; + GaussNewtonOptimizer gn(fg, initial, gnParams); + Values gn_results = gn.optimize(); + // converges to incorrect point due to lack of robustness to an outlier, ideal + // solution is Point2(0,0) + CHECK(assert_equal(Point2(0.25, 0.0), gn_results.at(X(1)), 1e-3)); + + // try with robust loss function and standard GN + auto fg_robust = example::sharedRobustFactorGraphWithOutliers(); // same as fg, but with + // factors wrapped in + // Geman McClure losses + GaussNewtonOptimizer gn2(fg_robust, initial, gnParams); + Values gn2_results = gn2.optimize(); + // converges to incorrect point, this time due to the nonconvexity of the loss + CHECK(assert_equal(Point2(0.999706, 0.0), gn2_results.at(X(1)), 1e-3)); + + // .. but graduated nonconvexity ensures both robustness and convergence in + // the face of nonconvexity + GncParams gncParams(gnParams); + // gncParams.setVerbosityGNC(GncParams::Verbosity::SUMMARY); + auto gnc = GncOptimizer>(fg, initial, gncParams); + Values gnc_result = gnc.optimize(); + CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at(X(1)), 1e-3)); +} + +/* ************************************************************************* */ +TEST(GncOptimizer, optimizeWithKnownInliers) { + auto fg = example::sharedNonRobustFactorGraphWithOutliers(); + + Point2 p0(1, 0); + Values initial; + initial.insert(X(1), p0); + + std::vector knownInliers; + knownInliers.push_back(0); + knownInliers.push_back(1); + knownInliers.push_back(2); + + // nonconvexity with known inliers + { + GncParams gncParams; + gncParams.setKnownInliers(knownInliers); + gncParams.setLossType(GncLossType::GM); + //gncParams.setVerbosityGNC(GncParams::Verbosity::SUMMARY); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + Values gnc_result = gnc.optimize(); + CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at(X(1)), 1e-3)); + + // check weights were actually fixed: + Vector finalWeights = gnc.getWeights(); + DOUBLES_EQUAL(1.0, finalWeights[0], tol); + DOUBLES_EQUAL(1.0, finalWeights[1], tol); + DOUBLES_EQUAL(1.0, finalWeights[2], tol); + } + { + GncParams gncParams; + gncParams.setKnownInliers(knownInliers); + gncParams.setLossType(GncLossType::TLS); + // gncParams.setVerbosityGNC(GncParams::Verbosity::SUMMARY); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + Values gnc_result = gnc.optimize(); + CHECK(assert_equal(Point2(0.0, 0.0), gnc_result.at(X(1)), 1e-3)); + + // check weights were actually fixed: + Vector finalWeights = gnc.getWeights(); + DOUBLES_EQUAL(1.0, finalWeights[0], tol); + DOUBLES_EQUAL(1.0, finalWeights[1], tol); + DOUBLES_EQUAL(1.0, finalWeights[2], tol); + DOUBLES_EQUAL(0.0, finalWeights[3], tol); + } + { + // if we set the threshold large, they are all inliers + GncParams gncParams; + gncParams.setKnownInliers(knownInliers); + gncParams.setLossType(GncLossType::TLS); + //gncParams.setVerbosityGNC(GncParams::Verbosity::VALUES); + gncParams.setInlierCostThreshold(100.0); + auto gnc = GncOptimizer>(fg, initial, + gncParams); + + Values gnc_result = gnc.optimize(); + CHECK(assert_equal(Point2(0.25, 0.0), gnc_result.at(X(1)), 1e-3)); + + // check weights were actually fixed: + Vector finalWeights = gnc.getWeights(); + DOUBLES_EQUAL(1.0, finalWeights[0], tol); + DOUBLES_EQUAL(1.0, finalWeights[1], tol); + DOUBLES_EQUAL(1.0, finalWeights[2], tol); + DOUBLES_EQUAL(1.0, finalWeights[3], tol); + } +} + +/* ************************************************************************* */ +TEST(GncOptimizer, optimizeSmallPoseGraph) { + /// load small pose graph + const string filename = findExampleDataFile("w100.graph"); + NonlinearFactorGraph::shared_ptr graph; + Values::shared_ptr initial; + boost::tie(graph, initial) = load2D(filename); + // Add a Gaussian prior on first poses + Pose2 priorMean(0.0, 0.0, 0.0); // prior at origin + SharedDiagonal priorNoise = noiseModel::Diagonal::Sigmas( + Vector3(0.01, 0.01, 0.01)); + graph->addPrior(0, priorMean, priorNoise); + + /// get expected values by optimizing outlier-free graph + Values expected = LevenbergMarquardtOptimizer(*graph, *initial).optimize(); + + // add a few outliers + SharedDiagonal betweenNoise = noiseModel::Diagonal::Sigmas( + Vector3(0.1, 0.1, 0.01)); + graph->push_back(BetweenFactor(90, 50, Pose2(), betweenNoise)); // some arbitrary and incorrect between factor + + /// get expected values by optimizing outlier-free graph + Values expectedWithOutliers = LevenbergMarquardtOptimizer(*graph, *initial) + .optimize(); + // as expected, the following test fails due to the presence of an outlier! + // CHECK(assert_equal(expected, expectedWithOutliers, 1e-3)); + + // GNC + // Note: in difficult instances, we set the odometry measurements to be + // inliers, but this problem is simple enought to succeed even without that + // assumption std::vector knownInliers; + GncParams gncParams; + auto gnc = GncOptimizer>(*graph, *initial, + gncParams); + Values actual = gnc.optimize(); + + // compare + CHECK(assert_equal(expected, actual, 1e-3)); // yay! we are robust to outliers! +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/tests/testManifold.cpp b/tests/testManifold.cpp index d0b4a1ffa..ac3a09e36 100644 --- a/tests/testManifold.cpp +++ b/tests/testManifold.cpp @@ -149,34 +149,6 @@ TEST(Manifold, DefaultChart) { EXPECT(assert_equal((Vector) Z_3x1, traits::Local(R, R))); } -#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4 -//****************************************************************************** -typedef ProductManifold MyPoint2Pair; - -// Define any direct product group to be a model of the multiplicative Group concept -namespace gtsam { -template<> struct traits : internal::ManifoldTraits { - static void Print(const MyPoint2Pair& m, const string& s = "") { - cout << s << "(" << m.first << "," << m.second << ")" << endl; - } - static bool Equals(const MyPoint2Pair& m1, const MyPoint2Pair& m2, double tol = 1e-8) { - return m1 == m2; - } -}; -} - -TEST(Manifold, ProductManifold) { - BOOST_CONCEPT_ASSERT((IsManifold)); - MyPoint2Pair pair1(Point2(0,0),Point2(0,0)); - Vector4 d; - d << 1,2,3,4; - MyPoint2Pair expected(Point2(1,2),Point2(3,4)); - MyPoint2Pair pair2 = pair1.retract(d); - EXPECT(assert_equal(expected,pair2,1e-9)); - EXPECT(assert_equal(d, pair1.localCoordinates(pair2),1e-9)); -} -#endif - //****************************************************************************** int main() { TestResult tr; diff --git a/tests/testNonlinearFactor.cpp b/tests/testNonlinearFactor.cpp index e1156ceba..84bba850b 100644 --- a/tests/testNonlinearFactor.cpp +++ b/tests/testNonlinearFactor.cpp @@ -233,18 +233,38 @@ TEST( NonlinearFactor, linearize_constraint2 ) CHECK(assert_equal((const GaussianFactor&)expected, *actual)); } +/* ************************************************************************* */ +TEST( NonlinearFactor, cloneWithNewNoiseModel ) +{ + // create original factor + double sigma1 = 0.1; + NonlinearFactorGraph nfg = example::nonlinearFactorGraphWithGivenSigma(sigma1); + + // create expected + double sigma2 = 10; + NonlinearFactorGraph expected = example::nonlinearFactorGraphWithGivenSigma(sigma2); + + // create actual + NonlinearFactorGraph actual; + SharedNoiseModel noise2 = noiseModel::Isotropic::Sigma(2,sigma2); + actual.push_back( boost::dynamic_pointer_cast(nfg[0])->cloneWithNewNoiseModel(noise2) ); + + // check it's all good + CHECK(assert_equal(expected, actual)); +} + /* ************************************************************************* */ class TestFactor4 : public NoiseModelFactor4 { public: typedef NoiseModelFactor4 Base; TestFactor4() : Base(noiseModel::Diagonal::Sigmas((Vector(1) << 2.0).finished()), X(1), X(2), X(3), X(4)) {} - virtual Vector + Vector evaluateError(const double& x1, const double& x2, const double& x3, const double& x4, boost::optional H1 = boost::none, boost::optional H2 = boost::none, boost::optional H3 = boost::none, - boost::optional H4 = boost::none) const { + boost::optional H4 = boost::none) const override { if(H1) { *H1 = (Matrix(1, 1) << 1.0).finished(); *H2 = (Matrix(1, 1) << 2.0).finished(); @@ -254,7 +274,7 @@ class TestFactor4 : public NoiseModelFactor4 { return (Vector(1) << x1 + x2 + x3 + x4).finished(); } - virtual gtsam::NonlinearFactor::shared_ptr clone() const { + gtsam::NonlinearFactor::shared_ptr clone() const override { return boost::static_pointer_cast( gtsam::NonlinearFactor::shared_ptr(new TestFactor4(*this))); } }; @@ -287,13 +307,13 @@ class TestFactor5 : public NoiseModelFactor5 Base; TestFactor5() : Base(noiseModel::Diagonal::Sigmas((Vector(1) << 2.0).finished()), X(1), X(2), X(3), X(4), X(5)) {} - virtual Vector + Vector evaluateError(const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, boost::optional H1 = boost::none, boost::optional H2 = boost::none, boost::optional H3 = boost::none, boost::optional H4 = boost::none, - boost::optional H5 = boost::none) const { + boost::optional H5 = boost::none) const override { if(H1) { *H1 = (Matrix(1, 1) << 1.0).finished(); *H2 = (Matrix(1, 1) << 2.0).finished(); @@ -336,14 +356,14 @@ class TestFactor6 : public NoiseModelFactor6 Base; TestFactor6() : Base(noiseModel::Diagonal::Sigmas((Vector(1) << 2.0).finished()), X(1), X(2), X(3), X(4), X(5), X(6)) {} - virtual Vector + Vector evaluateError(const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, boost::optional H1 = boost::none, boost::optional H2 = boost::none, boost::optional H3 = boost::none, boost::optional H4 = boost::none, boost::optional H5 = boost::none, - boost::optional H6 = boost::none) const { + boost::optional H6 = boost::none) const override { if(H1) { *H1 = (Matrix(1, 1) << 1.0).finished(); *H2 = (Matrix(1, 1) << 2.0).finished(); diff --git a/tests/testNonlinearISAM.cpp b/tests/testNonlinearISAM.cpp index 88ae508b6..4249f5bc4 100644 --- a/tests/testNonlinearISAM.cpp +++ b/tests/testNonlinearISAM.cpp @@ -288,7 +288,7 @@ TEST(testNonlinearISAM, loop_closures ) { break; // Check if vertex - const auto indexedPose = parseVertex(is, tag); + const auto indexedPose = parseVertexPose(is, tag); if (indexedPose) { Key id = indexedPose->first; initialEstimate.insert(Symbol('x', id), indexedPose->second); @@ -308,7 +308,7 @@ TEST(testNonlinearISAM, loop_closures ) { // check if edge const auto betweenPose = parseEdge(is, tag); if (betweenPose) { - Key id1, id2; + size_t id1, id2; tie(id1, id2) = betweenPose->first; graph.emplace_shared >(Symbol('x', id2), Symbol('x', id1), betweenPose->second, model); diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 5d4c3f844..295721cc4 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -48,6 +48,19 @@ const double tol = 1e-5; using symbol_shorthand::X; using symbol_shorthand::L; +/* ************************************************************************* */ +TEST( NonlinearOptimizer, paramsEquals ) +{ + // default constructors lead to two identical params + GaussNewtonParams gnParams1; + GaussNewtonParams gnParams2; + CHECK(gnParams1.equals(gnParams2)); + + // but the params become different if we change something in gnParams2 + gnParams2.setVerbosity("DELTA"); + CHECK(!gnParams1.equals(gnParams2)); +} + /* ************************************************************************* */ TEST( NonlinearOptimizer, iterateLM ) { @@ -459,11 +472,12 @@ TEST(NonlinearOptimizer, RobustMeanCalculation) { init.insert(0, 100.0); expected.insert(0, 3.33333333); - LevenbergMarquardtParams params; + DoglegParams params_dl; + params_dl.setRelativeErrorTol(1e-10); auto gn_result = GaussNewtonOptimizer(fg, init).optimize(); - auto lm_result = LevenbergMarquardtOptimizer(fg, init, params).optimize(); - auto dl_result = DoglegOptimizer(fg, init).optimize(); + auto lm_result = LevenbergMarquardtOptimizer(fg, init).optimize(); + auto dl_result = DoglegOptimizer(fg, init, params_dl).optimize(); EXPECT(assert_equal(expected, gn_result, tol)); EXPECT(assert_equal(expected, lm_result, tol)); @@ -509,8 +523,8 @@ class IterativeLM : public LevenbergMarquardtOptimizer { initial_(initialValues) {} /// Solve that uses conjugate gradient - virtual VectorValues solve(const GaussianFactorGraph& gfg, - const NonlinearOptimizerParams& params) const { + VectorValues solve(const GaussianFactorGraph& gfg, + const NonlinearOptimizerParams& params) const override { VectorValues zeros = initial_.zeroVectors(); return conjugateGradientDescent(gfg, zeros, cgParams_); } @@ -540,7 +554,6 @@ TEST(NonlinearOptimizer, subclass_solver) { } /* ************************************************************************* */ -#include TEST( NonlinearOptimizer, logfile ) { NonlinearFactorGraph fg(example::createReallyNonlinearFactorGraph()); @@ -566,6 +579,58 @@ TEST( NonlinearOptimizer, logfile ) // EXPECT(actual.str()==expected.str()); } +/* ************************************************************************* */ +TEST( NonlinearOptimizer, iterationHook_LM ) +{ + NonlinearFactorGraph fg(example::createReallyNonlinearFactorGraph()); + + Point2 x0(3,3); + Values c0; + c0.insert(X(1), x0); + + // Levenberg-Marquardt + LevenbergMarquardtParams lmParams; + size_t lastIterCalled = 0; + lmParams.iterationHook = [&](size_t iteration, double oldError, double newError) + { + // Tests: + lastIterCalled = iteration; + EXPECT(newError " << newError <<"\n"; + }; + LevenbergMarquardtOptimizer(fg, c0, lmParams).optimize(); + + EXPECT(lastIterCalled>5); +} +/* ************************************************************************* */ +TEST( NonlinearOptimizer, iterationHook_CG ) +{ + NonlinearFactorGraph fg(example::createReallyNonlinearFactorGraph()); + + Point2 x0(3,3); + Values c0; + c0.insert(X(1), x0); + + // Levenberg-Marquardt + NonlinearConjugateGradientOptimizer::Parameters cgParams; + size_t lastIterCalled = 0; + cgParams.iterationHook = [&](size_t iteration, double oldError, double newError) + { + // Tests: + lastIterCalled = iteration; + EXPECT(newError " << newError <<"\n"; + }; + NonlinearConjugateGradientOptimizer(fg, c0, cgParams).optimize(); + + EXPECT(lastIterCalled>5); +} + + /* ************************************************************************* */ //// Minimal traits example struct MyType : public Vector3 { diff --git a/tests/testSerializationSLAM.cpp b/tests/testSerializationSLAM.cpp index e3252a90b..2e99aff71 100644 --- a/tests/testSerializationSLAM.cpp +++ b/tests/testSerializationSLAM.cpp @@ -18,15 +18,10 @@ #include -#if 0 - #include -//#include #include #include -//#include #include -//#include #include #include #include @@ -34,8 +29,6 @@ #include #include #include -#include -#include #include #include #include @@ -49,6 +42,7 @@ #include #include #include +#include #include @@ -57,8 +51,6 @@ using namespace gtsam; using namespace gtsam::serializationTestHelpers; // Creating as many permutations of factors as possible -typedef PriorFactor PriorFactorLieVector; -typedef PriorFactor PriorFactorLieMatrix; typedef PriorFactor PriorFactorPoint2; typedef PriorFactor PriorFactorStereoPoint2; typedef PriorFactor PriorFactorPoint3; @@ -69,12 +61,9 @@ typedef PriorFactor PriorFactorPose3; typedef PriorFactor PriorFactorCal3_S2; typedef PriorFactor PriorFactorCal3DS2; typedef PriorFactor PriorFactorCalibratedCamera; -typedef PriorFactor PriorFactorSimpleCamera; typedef PriorFactor PriorFactorPinholeCameraCal3_S2; typedef PriorFactor PriorFactorStereoCamera; -typedef BetweenFactor BetweenFactorLieVector; -typedef BetweenFactor BetweenFactorLieMatrix; typedef BetweenFactor BetweenFactorPoint2; typedef BetweenFactor BetweenFactorPoint3; typedef BetweenFactor BetweenFactorRot2; @@ -82,8 +71,6 @@ typedef BetweenFactor BetweenFactorRot3; typedef BetweenFactor BetweenFactorPose2; typedef BetweenFactor BetweenFactorPose3; -typedef NonlinearEquality NonlinearEqualityLieVector; -typedef NonlinearEquality NonlinearEqualityLieMatrix; typedef NonlinearEquality NonlinearEqualityPoint2; typedef NonlinearEquality NonlinearEqualityStereoPoint2; typedef NonlinearEquality NonlinearEqualityPoint3; @@ -94,7 +81,6 @@ typedef NonlinearEquality NonlinearEqualityPose3; typedef NonlinearEquality NonlinearEqualityCal3_S2; typedef NonlinearEquality NonlinearEqualityCal3DS2; typedef NonlinearEquality NonlinearEqualityCalibratedCamera; -typedef NonlinearEquality NonlinearEqualitySimpleCamera; typedef NonlinearEquality NonlinearEqualityPinholeCameraCal3_S2; typedef NonlinearEquality NonlinearEqualityStereoCamera; @@ -103,10 +89,8 @@ typedef RangeFactor RangeFactor3D; typedef RangeFactor RangeFactorPose2; typedef RangeFactor RangeFactorPose3; typedef RangeFactor RangeFactorCalibratedCameraPoint; -typedef RangeFactor RangeFactorSimpleCameraPoint; typedef RangeFactor RangeFactorPinholeCameraCal3_S2Point; typedef RangeFactor RangeFactorCalibratedCamera; -typedef RangeFactor RangeFactorSimpleCamera; typedef RangeFactor RangeFactorPinholeCameraCal3_S2; typedef BearingRangeFactor BearingRangeFactor2D; @@ -116,7 +100,7 @@ typedef GenericProjectionFactor GenericProjectionFactorC typedef GenericProjectionFactor GenericProjectionFactorCal3DS2; typedef gtsam::GeneralSFMFactor GeneralSFMFactorCal3_S2; -//typedef gtsam::GeneralSFMFactor GeneralSFMFactorCal3DS2; +typedef gtsam::GeneralSFMFactor GeneralSFMFactorCal3DS2; typedef gtsam::GeneralSFMFactor2 GeneralSFMFactor2Cal3_S2; @@ -148,8 +132,6 @@ BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); /* Create GUIDs for geometry */ /* ************************************************************************* */ -GTSAM_VALUE_EXPORT(gtsam::LieVector); -GTSAM_VALUE_EXPORT(gtsam::LieMatrix); GTSAM_VALUE_EXPORT(gtsam::Point2); GTSAM_VALUE_EXPORT(gtsam::StereoPoint2); GTSAM_VALUE_EXPORT(gtsam::Point3); @@ -161,7 +143,6 @@ GTSAM_VALUE_EXPORT(gtsam::Cal3_S2); GTSAM_VALUE_EXPORT(gtsam::Cal3DS2); GTSAM_VALUE_EXPORT(gtsam::Cal3_S2Stereo); GTSAM_VALUE_EXPORT(gtsam::CalibratedCamera); -GTSAM_VALUE_EXPORT(gtsam::SimpleCamera); GTSAM_VALUE_EXPORT(gtsam::PinholeCameraCal3_S2); GTSAM_VALUE_EXPORT(gtsam::StereoCamera); @@ -170,8 +151,6 @@ GTSAM_VALUE_EXPORT(gtsam::StereoCamera); BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor"); BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor"); -BOOST_CLASS_EXPORT_GUID(PriorFactorLieVector, "gtsam::PriorFactorLieVector"); -BOOST_CLASS_EXPORT_GUID(PriorFactorLieMatrix, "gtsam::PriorFactorLieMatrix"); BOOST_CLASS_EXPORT_GUID(PriorFactorPoint2, "gtsam::PriorFactorPoint2"); BOOST_CLASS_EXPORT_GUID(PriorFactorStereoPoint2, "gtsam::PriorFactorStereoPoint2"); BOOST_CLASS_EXPORT_GUID(PriorFactorPoint3, "gtsam::PriorFactorPoint3"); @@ -182,11 +161,8 @@ BOOST_CLASS_EXPORT_GUID(PriorFactorPose3, "gtsam::PriorFactorPose3"); BOOST_CLASS_EXPORT_GUID(PriorFactorCal3_S2, "gtsam::PriorFactorCal3_S2"); BOOST_CLASS_EXPORT_GUID(PriorFactorCal3DS2, "gtsam::PriorFactorCal3DS2"); BOOST_CLASS_EXPORT_GUID(PriorFactorCalibratedCamera, "gtsam::PriorFactorCalibratedCamera"); -BOOST_CLASS_EXPORT_GUID(PriorFactorSimpleCamera, "gtsam::PriorFactorSimpleCamera"); BOOST_CLASS_EXPORT_GUID(PriorFactorStereoCamera, "gtsam::PriorFactorStereoCamera"); -BOOST_CLASS_EXPORT_GUID(BetweenFactorLieVector, "gtsam::BetweenFactorLieVector"); -BOOST_CLASS_EXPORT_GUID(BetweenFactorLieMatrix, "gtsam::BetweenFactorLieMatrix"); BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint2, "gtsam::BetweenFactorPoint2"); BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint3, "gtsam::BetweenFactorPoint3"); BOOST_CLASS_EXPORT_GUID(BetweenFactorRot2, "gtsam::BetweenFactorRot2"); @@ -194,8 +170,6 @@ BOOST_CLASS_EXPORT_GUID(BetweenFactorRot3, "gtsam::BetweenFactorRot3"); BOOST_CLASS_EXPORT_GUID(BetweenFactorPose2, "gtsam::BetweenFactorPose2"); BOOST_CLASS_EXPORT_GUID(BetweenFactorPose3, "gtsam::BetweenFactorPose3"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityLieVector, "gtsam::NonlinearEqualityLieVector"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualityLieMatrix, "gtsam::NonlinearEqualityLieMatrix"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint2, "gtsam::NonlinearEqualityPoint2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoPoint2, "gtsam::NonlinearEqualityStereoPoint2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint3, "gtsam::NonlinearEqualityPoint3"); @@ -206,7 +180,6 @@ BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose3, "gtsam::NonlinearEqualityPose3") BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3_S2, "gtsam::NonlinearEqualityCal3_S2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3DS2, "gtsam::NonlinearEqualityCal3DS2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCalibratedCamera, "gtsam::NonlinearEqualityCalibratedCamera"); -BOOST_CLASS_EXPORT_GUID(NonlinearEqualitySimpleCamera, "gtsam::NonlinearEqualitySimpleCamera"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoCamera, "gtsam::NonlinearEqualityStereoCamera"); BOOST_CLASS_EXPORT_GUID(RangeFactor2D, "gtsam::RangeFactor2D"); @@ -214,9 +187,9 @@ BOOST_CLASS_EXPORT_GUID(RangeFactor3D, "gtsam::RangeFactor3D"); BOOST_CLASS_EXPORT_GUID(RangeFactorPose2, "gtsam::RangeFactorPose2"); BOOST_CLASS_EXPORT_GUID(RangeFactorPose3, "gtsam::RangeFactorPose3"); BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCameraPoint, "gtsam::RangeFactorCalibratedCameraPoint"); -BOOST_CLASS_EXPORT_GUID(RangeFactorSimpleCameraPoint, "gtsam::RangeFactorSimpleCameraPoint"); +BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2Point, "gtsam::RangeFactorPinholeCameraCal3_S2Point"); BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCamera, "gtsam::RangeFactorCalibratedCamera"); -BOOST_CLASS_EXPORT_GUID(RangeFactorSimpleCamera, "gtsam::RangeFactorSimpleCamera"); +BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2, "gtsam::RangeFactorPinholeCameraCal3_S2"); BOOST_CLASS_EXPORT_GUID(BearingRangeFactor2D, "gtsam::BearingRangeFactor2D"); @@ -224,7 +197,7 @@ BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3_S2, "gtsam::GenericProjectio BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3DS2, "gtsam::GenericProjectionFactorCal3DS2"); BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3_S2, "gtsam::GeneralSFMFactorCal3_S2"); -//BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3DS2, "gtsam::GeneralSFMFactorCal3DS2"); +BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3DS2, "gtsam::GeneralSFMFactorCal3DS2"); BOOST_CLASS_EXPORT_GUID(GeneralSFMFactor2Cal3_S2, "gtsam::GeneralSFMFactor2Cal3_S2"); @@ -286,8 +259,6 @@ TEST (testSerializationSLAM, smallExample_nonlinear) { /* ************************************************************************* */ TEST (testSerializationSLAM, factors) { - LieVector lieVector((Vector(4) << 1.0, 2.0, 3.0, 4.0).finished()); - LieMatrix lieMatrix((Matrix(2, 3) << 1.0, 2.0, 3.0, 4.0, 5.0 ,6.0).finished()); Point2 point2(1.0, 2.0); StereoPoint2 stereoPoint2(1.0, 2.0, 3.0); Point3 point3(1.0, 2.0, 3.0); @@ -311,8 +282,6 @@ TEST (testSerializationSLAM, factors) { b11('b',11), b12('b',12), b13('b',13), b14('b',14), b15('b',15); Values values; - values.insert(a01, lieVector); - values.insert(a02, lieMatrix); values.insert(a03, point2); values.insert(a04, stereoPoint2); values.insert(a05, point3); @@ -344,8 +313,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsDereferencedXML(robust1)); EXPECT(equalsDereferencedBinary(robust1)); - PriorFactorLieVector priorFactorLieVector(a01, lieVector, model4); - PriorFactorLieMatrix priorFactorLieMatrix(a02, lieMatrix, model6); PriorFactorPoint2 priorFactorPoint2(a03, point2, model2); PriorFactorStereoPoint2 priorFactorStereoPoint2(a04, stereoPoint2, model3); PriorFactorPoint3 priorFactorPoint3(a05, point3, model3); @@ -356,11 +323,8 @@ TEST (testSerializationSLAM, factors) { PriorFactorCal3_S2 priorFactorCal3_S2(a10, cal3_s2, model5); PriorFactorCal3DS2 priorFactorCal3DS2(a11, cal3ds2, model9); PriorFactorCalibratedCamera priorFactorCalibratedCamera(a12, calibratedCamera, model6); - PriorFactorSimpleCamera priorFactorSimpleCamera(a13, simpleCamera, model11); PriorFactorStereoCamera priorFactorStereoCamera(a14, stereoCamera, model11); - BetweenFactorLieVector betweenFactorLieVector(a01, b01, lieVector, model4); - BetweenFactorLieMatrix betweenFactorLieMatrix(a02, b02, lieMatrix, model6); BetweenFactorPoint2 betweenFactorPoint2(a03, b03, point2, model2); BetweenFactorPoint3 betweenFactorPoint3(a05, b05, point3, model3); BetweenFactorRot2 betweenFactorRot2(a06, b06, rot2, model1); @@ -368,8 +332,6 @@ TEST (testSerializationSLAM, factors) { BetweenFactorPose2 betweenFactorPose2(a08, b08, pose2, model3); BetweenFactorPose3 betweenFactorPose3(a09, b09, pose3, model6); - NonlinearEqualityLieVector nonlinearEqualityLieVector(a01, lieVector); - NonlinearEqualityLieMatrix nonlinearEqualityLieMatrix(a02, lieMatrix); NonlinearEqualityPoint2 nonlinearEqualityPoint2(a03, point2); NonlinearEqualityStereoPoint2 nonlinearEqualityStereoPoint2(a04, stereoPoint2); NonlinearEqualityPoint3 nonlinearEqualityPoint3(a05, point3); @@ -380,7 +342,6 @@ TEST (testSerializationSLAM, factors) { NonlinearEqualityCal3_S2 nonlinearEqualityCal3_S2(a10, cal3_s2); NonlinearEqualityCal3DS2 nonlinearEqualityCal3DS2(a11, cal3ds2); NonlinearEqualityCalibratedCamera nonlinearEqualityCalibratedCamera(a12, calibratedCamera); - NonlinearEqualitySimpleCamera nonlinearEqualitySimpleCamera(a13, simpleCamera); NonlinearEqualityStereoCamera nonlinearEqualityStereoCamera(a14, stereoCamera); RangeFactor2D rangeFactor2D(a08, a03, 2.0, model1); @@ -388,9 +349,9 @@ TEST (testSerializationSLAM, factors) { RangeFactorPose2 rangeFactorPose2(a08, b08, 2.0, model1); RangeFactorPose3 rangeFactorPose3(a09, b09, 2.0, model1); RangeFactorCalibratedCameraPoint rangeFactorCalibratedCameraPoint(a12, a05, 2.0, model1); - RangeFactorSimpleCameraPoint rangeFactorSimpleCameraPoint(a13, a05, 2.0, model1); + RangeFactorPinholeCameraCal3_S2Point rangeFactorPinholeCameraCal3_S2Point(a13, a05, 2.0, model1); RangeFactorCalibratedCamera rangeFactorCalibratedCamera(a12, b12, 2.0, model1); - RangeFactorSimpleCamera rangeFactorSimpleCamera(a13, b13, 2.0, model1); + RangeFactorPinholeCameraCal3_S2 rangeFactorPinholeCameraCal3_S2(a13, b13, 2.0, model1); BearingRangeFactor2D bearingRangeFactor2D(a08, a03, rot2, 2.0, model2); @@ -405,8 +366,6 @@ TEST (testSerializationSLAM, factors) { NonlinearFactorGraph graph; - graph += priorFactorLieVector; - graph += priorFactorLieMatrix; graph += priorFactorPoint2; graph += priorFactorStereoPoint2; graph += priorFactorPoint3; @@ -417,11 +376,8 @@ TEST (testSerializationSLAM, factors) { graph += priorFactorCal3_S2; graph += priorFactorCal3DS2; graph += priorFactorCalibratedCamera; - graph += priorFactorSimpleCamera; graph += priorFactorStereoCamera; - graph += betweenFactorLieVector; - graph += betweenFactorLieMatrix; graph += betweenFactorPoint2; graph += betweenFactorPoint3; graph += betweenFactorRot2; @@ -429,8 +385,6 @@ TEST (testSerializationSLAM, factors) { graph += betweenFactorPose2; graph += betweenFactorPose3; - graph += nonlinearEqualityLieVector; - graph += nonlinearEqualityLieMatrix; graph += nonlinearEqualityPoint2; graph += nonlinearEqualityStereoPoint2; graph += nonlinearEqualityPoint3; @@ -441,7 +395,6 @@ TEST (testSerializationSLAM, factors) { graph += nonlinearEqualityCal3_S2; graph += nonlinearEqualityCal3DS2; graph += nonlinearEqualityCalibratedCamera; - graph += nonlinearEqualitySimpleCamera; graph += nonlinearEqualityStereoCamera; graph += rangeFactor2D; @@ -449,9 +402,9 @@ TEST (testSerializationSLAM, factors) { graph += rangeFactorPose2; graph += rangeFactorPose3; graph += rangeFactorCalibratedCameraPoint; - graph += rangeFactorSimpleCameraPoint; + graph += rangeFactorPinholeCameraCal3_S2Point; graph += rangeFactorCalibratedCamera; - graph += rangeFactorSimpleCamera; + graph += rangeFactorPinholeCameraCal3_S2; graph += bearingRangeFactor2D; @@ -471,8 +424,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsObj(values)); EXPECT(equalsObj(graph)); - EXPECT(equalsObj(priorFactorLieVector)); - EXPECT(equalsObj(priorFactorLieMatrix)); EXPECT(equalsObj(priorFactorPoint2)); EXPECT(equalsObj(priorFactorStereoPoint2)); EXPECT(equalsObj(priorFactorPoint3)); @@ -483,11 +434,8 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsObj(priorFactorCal3_S2)); EXPECT(equalsObj(priorFactorCal3DS2)); EXPECT(equalsObj(priorFactorCalibratedCamera)); - EXPECT(equalsObj(priorFactorSimpleCamera)); EXPECT(equalsObj(priorFactorStereoCamera)); - EXPECT(equalsObj(betweenFactorLieVector)); - EXPECT(equalsObj(betweenFactorLieMatrix)); EXPECT(equalsObj(betweenFactorPoint2)); EXPECT(equalsObj(betweenFactorPoint3)); EXPECT(equalsObj(betweenFactorRot2)); @@ -495,8 +443,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsObj(betweenFactorPose2)); EXPECT(equalsObj(betweenFactorPose3)); - EXPECT(equalsObj(nonlinearEqualityLieVector)); - EXPECT(equalsObj(nonlinearEqualityLieMatrix)); EXPECT(equalsObj(nonlinearEqualityPoint2)); EXPECT(equalsObj(nonlinearEqualityStereoPoint2)); EXPECT(equalsObj(nonlinearEqualityPoint3)); @@ -507,7 +453,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsObj(nonlinearEqualityCal3_S2)); EXPECT(equalsObj(nonlinearEqualityCal3DS2)); EXPECT(equalsObj(nonlinearEqualityCalibratedCamera)); - EXPECT(equalsObj(nonlinearEqualitySimpleCamera)); EXPECT(equalsObj(nonlinearEqualityStereoCamera)); EXPECT(equalsObj(rangeFactor2D)); @@ -515,9 +460,9 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsObj(rangeFactorPose2)); EXPECT(equalsObj(rangeFactorPose3)); EXPECT(equalsObj(rangeFactorCalibratedCameraPoint)); - EXPECT(equalsObj(rangeFactorSimpleCameraPoint)); + EXPECT(equalsObj(rangeFactorPinholeCameraCal3_S2Point)); EXPECT(equalsObj(rangeFactorCalibratedCamera)); - EXPECT(equalsObj(rangeFactorSimpleCamera)); + EXPECT(equalsObj(rangeFactorPinholeCameraCal3_S2)); EXPECT(equalsObj(bearingRangeFactor2D)); @@ -537,8 +482,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsXML(values)); EXPECT(equalsXML(graph)); - EXPECT(equalsXML(priorFactorLieVector)); - EXPECT(equalsXML(priorFactorLieMatrix)); EXPECT(equalsXML(priorFactorPoint2)); EXPECT(equalsXML(priorFactorStereoPoint2)); EXPECT(equalsXML(priorFactorPoint3)); @@ -549,11 +492,8 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsXML(priorFactorCal3_S2)); EXPECT(equalsXML(priorFactorCal3DS2)); EXPECT(equalsXML(priorFactorCalibratedCamera)); - EXPECT(equalsXML(priorFactorSimpleCamera)); EXPECT(equalsXML(priorFactorStereoCamera)); - EXPECT(equalsXML(betweenFactorLieVector)); - EXPECT(equalsXML(betweenFactorLieMatrix)); EXPECT(equalsXML(betweenFactorPoint2)); EXPECT(equalsXML(betweenFactorPoint3)); EXPECT(equalsXML(betweenFactorRot2)); @@ -561,8 +501,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsXML(betweenFactorPose2)); EXPECT(equalsXML(betweenFactorPose3)); - EXPECT(equalsXML(nonlinearEqualityLieVector)); - EXPECT(equalsXML(nonlinearEqualityLieMatrix)); EXPECT(equalsXML(nonlinearEqualityPoint2)); EXPECT(equalsXML(nonlinearEqualityStereoPoint2)); EXPECT(equalsXML(nonlinearEqualityPoint3)); @@ -573,7 +511,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsXML(nonlinearEqualityCal3_S2)); EXPECT(equalsXML(nonlinearEqualityCal3DS2)); EXPECT(equalsXML(nonlinearEqualityCalibratedCamera)); - EXPECT(equalsXML(nonlinearEqualitySimpleCamera)); EXPECT(equalsXML(nonlinearEqualityStereoCamera)); EXPECT(equalsXML(rangeFactor2D)); @@ -581,9 +518,9 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsXML(rangeFactorPose2)); EXPECT(equalsXML(rangeFactorPose3)); EXPECT(equalsXML(rangeFactorCalibratedCameraPoint)); - EXPECT(equalsXML(rangeFactorSimpleCameraPoint)); + EXPECT(equalsXML(rangeFactorPinholeCameraCal3_S2Point)); EXPECT(equalsXML(rangeFactorCalibratedCamera)); - EXPECT(equalsXML(rangeFactorSimpleCamera)); + EXPECT(equalsXML(rangeFactorPinholeCameraCal3_S2)); EXPECT(equalsXML(bearingRangeFactor2D)); @@ -603,8 +540,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsBinary(values)); EXPECT(equalsBinary(graph)); - EXPECT(equalsBinary(priorFactorLieVector)); - EXPECT(equalsBinary(priorFactorLieMatrix)); EXPECT(equalsBinary(priorFactorPoint2)); EXPECT(equalsBinary(priorFactorStereoPoint2)); EXPECT(equalsBinary(priorFactorPoint3)); @@ -615,11 +550,8 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsBinary(priorFactorCal3_S2)); EXPECT(equalsBinary(priorFactorCal3DS2)); EXPECT(equalsBinary(priorFactorCalibratedCamera)); - EXPECT(equalsBinary(priorFactorSimpleCamera)); EXPECT(equalsBinary(priorFactorStereoCamera)); - EXPECT(equalsBinary(betweenFactorLieVector)); - EXPECT(equalsBinary(betweenFactorLieMatrix)); EXPECT(equalsBinary(betweenFactorPoint2)); EXPECT(equalsBinary(betweenFactorPoint3)); EXPECT(equalsBinary(betweenFactorRot2)); @@ -627,8 +559,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsBinary(betweenFactorPose2)); EXPECT(equalsBinary(betweenFactorPose3)); - EXPECT(equalsBinary(nonlinearEqualityLieVector)); - EXPECT(equalsBinary(nonlinearEqualityLieMatrix)); EXPECT(equalsBinary(nonlinearEqualityPoint2)); EXPECT(equalsBinary(nonlinearEqualityStereoPoint2)); EXPECT(equalsBinary(nonlinearEqualityPoint3)); @@ -639,7 +569,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsBinary(nonlinearEqualityCal3_S2)); EXPECT(equalsBinary(nonlinearEqualityCal3DS2)); EXPECT(equalsBinary(nonlinearEqualityCalibratedCamera)); - EXPECT(equalsBinary(nonlinearEqualitySimpleCamera)); EXPECT(equalsBinary(nonlinearEqualityStereoCamera)); EXPECT(equalsBinary(rangeFactor2D)); @@ -647,9 +576,9 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsBinary(rangeFactorPose2)); EXPECT(equalsBinary(rangeFactorPose3)); EXPECT(equalsBinary(rangeFactorCalibratedCameraPoint)); - EXPECT(equalsBinary(rangeFactorSimpleCameraPoint)); + EXPECT(equalsBinary(rangeFactorPinholeCameraCal3_S2Point)); EXPECT(equalsBinary(rangeFactorCalibratedCamera)); - EXPECT(equalsBinary(rangeFactorSimpleCamera)); + EXPECT(equalsBinary(rangeFactorPinholeCameraCal3_S2)); EXPECT(equalsBinary(bearingRangeFactor2D)); @@ -663,7 +592,6 @@ TEST (testSerializationSLAM, factors) { EXPECT(equalsBinary(genericStereoFactor3D)); } -#endif /* ************************************************************************* */ int main() { TestResult tr; return TestRegistry::runAllTests(tr); } diff --git a/tests/testTranslationRecovery.cpp b/tests/testTranslationRecovery.cpp new file mode 100644 index 000000000..7260fd5af --- /dev/null +++ b/tests/testTranslationRecovery.cpp @@ -0,0 +1,246 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file testTranslationRecovery.cpp + * @author Frank Dellaert, Akshay Krishnan + * @date March 2020 + * @brief test recovering translations when rotations are given. + */ + +#include + +#include +#include + +using namespace std; +using namespace gtsam; + +// Returns the Unit3 direction as measured in the binary measurement, but +// computed from the input poses. Helper function used in the unit tests. +Unit3 GetDirectionFromPoses(const Values& poses, + const BinaryMeasurement& unitTranslation) { + const Pose3 wTa = poses.at(unitTranslation.key1()), + wTb = poses.at(unitTranslation.key2()); + const Point3 Ta = wTa.translation(), Tb = wTb.translation(); + return Unit3(Tb - Ta); +} + +/* ************************************************************************* */ +// We read the BAL file, which has 3 cameras in it, with poses. We then assume +// the rotations are correct, but translations have to be estimated from +// translation directions only. Since we have 3 cameras, A, B, and C, we can at +// most create three relative measurements, let's call them w_aZb, w_aZc, and +// bZc. These will be of type Unit3. We then call `recoverTranslations` which +// sets up an optimization problem for the three unknown translations. +TEST(TranslationRecovery, BAL) { + const string filename = findExampleDataFile("dubrovnik-3-7-pre"); + SfmData db; + bool success = readBAL(filename, db); + if (!success) throw runtime_error("Could not access file!"); + + // Get camera poses, as Values + size_t j = 0; + Values poses; + for (auto camera : db.cameras) { + poses.insert(j++, camera.pose()); + } + + // Simulate measurements + const auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {0, 2}, {1, 2}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + TranslationRecovery algorithm(relativeTranslations); + const auto graph = algorithm.buildGraph(); + EXPECT_LONGS_EQUAL(3, graph.size()); + + // Run translation recovery + const double scale = 2.0; + const auto result = algorithm.run(scale); + + // Check result for first two translations, determined by prior + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0))); + EXPECT(assert_equal( + Point3(2 * GetDirectionFromPoses(poses, relativeTranslations[0])), + result.at(1))); + + // Check that the third translations is correct + Point3 Ta = poses.at(0).translation(); + Point3 Tb = poses.at(1).translation(); + Point3 Tc = poses.at(2).translation(); + Point3 expected = (Tc - Ta) * (scale / (Tb - Ta).norm()); + EXPECT(assert_equal(expected, result.at(2), 1e-4)); + + // TODO(frank): how to get stats back? + // EXPECT_DOUBLES_EQUAL(0.0199833, actualError, 1e-5); +} + +TEST(TranslationRecovery, TwoPoseTest) { + // Create a dataset with 2 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // + // 0 and 1 face in the same direction but have a translation offset. + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + + auto relativeTranslations = + TranslationRecovery::SimulateMeasurements(poses, {{0, 1}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + TranslationRecovery algorithm(relativeTranslations); + const auto graph = algorithm.buildGraph(); + EXPECT_LONGS_EQUAL(1, graph.size()); + + // Run translation recovery + const auto result = algorithm.run(/*scale=*/3.0); + + // Check result for first two translations, determined by prior + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0))); + EXPECT(assert_equal(Point3(3, 0, 0), result.at(1))); +} + +TEST(TranslationRecovery, ThreePoseTest) { + // Create a dataset with 3 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // \ __ / + // \\// + // 3 + // + // 0 and 1 face in the same direction but have a translation offset. 3 is in + // the same direction as 0 and 1, in between 0 and 1, with some Y axis offset. + + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {1, 3}, {3, 0}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + TranslationRecovery algorithm(relativeTranslations); + const auto graph = algorithm.buildGraph(); + EXPECT_LONGS_EQUAL(3, graph.size()); + + const auto result = algorithm.run(/*scale=*/3.0); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0))); + EXPECT(assert_equal(Point3(3, 0, 0), result.at(1))); + EXPECT(assert_equal(Point3(1.5, -1.5, 0), result.at(3))); +} + +TEST(TranslationRecovery, ThreePosesIncludingZeroTranslation) { + // Create a dataset with 3 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // 2 <| + // + // 0 and 1 face in the same direction but have a translation offset. 2 is at + // the same point as 1 but is rotated, with little FOV overlap. + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(2, Pose3(Rot3::RzRyRx(-M_PI / 2, 0, 0), Point3(2, 0, 0))); + + auto relativeTranslations = + TranslationRecovery::SimulateMeasurements(poses, {{0, 1}, {1, 2}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + TranslationRecovery algorithm(relativeTranslations); + const auto graph = algorithm.buildGraph(); + // There is only 1 non-zero translation edge. + EXPECT_LONGS_EQUAL(1, graph.size()); + + // Run translation recovery + const auto result = algorithm.run(/*scale=*/3.0); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0))); + EXPECT(assert_equal(Point3(3, 0, 0), result.at(1))); + EXPECT(assert_equal(Point3(3, 0, 0), result.at(2))); +} + +TEST(TranslationRecovery, FourPosesIncludingZeroTranslation) { + // Create a dataset with 4 poses. + // __ __ + // \/ \/ + // 0 _____ 1 + // \ __ 2 <| + // \\// + // 3 + // + // 0 and 1 face in the same direction but have a translation offset. 2 is at + // the same point as 1 but is rotated, with very little FOV overlap. 3 is in + // the same direction as 0 and 1, in between 0 and 1, with some Y axis offset. + + Values poses; + poses.insert(0, Pose3(Rot3(), Point3(0, 0, 0))); + poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0))); + poses.insert(2, Pose3(Rot3::RzRyRx(-M_PI / 2, 0, 0), Point3(2, 0, 0))); + poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0))); + + auto relativeTranslations = TranslationRecovery::SimulateMeasurements( + poses, {{0, 1}, {1, 2}, {1, 3}, {3, 0}}); + + // Check simulated measurements. + for (auto& unitTranslation : relativeTranslations) { + EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation), + unitTranslation.measured())); + } + + TranslationRecovery algorithm(relativeTranslations); + const auto graph = algorithm.buildGraph(); + EXPECT_LONGS_EQUAL(3, graph.size()); + + // Run translation recovery + const auto result = algorithm.run(/*scale=*/4.0); + + // Check result + EXPECT(assert_equal(Point3(0, 0, 0), result.at(0))); + EXPECT(assert_equal(Point3(4, 0, 0), result.at(1))); + EXPECT(assert_equal(Point3(4, 0, 0), result.at(2))); + EXPECT(assert_equal(Point3(2, -2, 0), result.at(3))); +} + +/* ************************************************************************* */ +int main() { + TestResult tr; + return TestRegistry::runAllTests(tr); +} +/* ************************************************************************* */ diff --git a/timing/timeFactorOverhead.cpp b/timing/timeFactorOverhead.cpp index d9c19703c..038901388 100644 --- a/timing/timeFactorOverhead.cpp +++ b/timing/timeFactorOverhead.cpp @@ -22,13 +22,14 @@ #include #include -#include +#include #include using namespace gtsam; using namespace std; -static boost::variate_generator > rg(boost::mt19937(), boost::uniform_real<>(0.0, 1.0)); +static std::mt19937 rng; +static std::uniform_real_distribution<> uniform(0.0, 1.0); int main(int argc, char *argv[]) { @@ -64,10 +65,10 @@ int main(int argc, char *argv[]) { Matrix A(blockdim, vardim); for(size_t j=0; j(key, A, b, noise)); } } @@ -111,10 +112,10 @@ int main(int argc, char *argv[]) { // Generate a random Gaussian factor for(size_t j=0; j(key, Acomb, bcomb, noiseModel::Isotropic::Sigma(blockdim*nBlocks, 1.0))); diff --git a/timing/timeMatrixOps.cpp b/timing/timeMatrixOps.cpp index d9f2ffaf6..95333fbf8 100644 --- a/timing/timeMatrixOps.cpp +++ b/timing/timeMatrixOps.cpp @@ -16,7 +16,6 @@ * @date Sep 18, 2010 */ -#include #include #include @@ -24,6 +23,7 @@ #include #include +#include #include #include @@ -33,7 +33,8 @@ using namespace std; using boost::format; using namespace boost::lambda; -static boost::variate_generator > rng(boost::mt19937(), boost::uniform_real<>(-1.0, 0.0)); +static std::mt19937 rng; +static std::uniform_real_distribution<> uniform(-1.0, 0.0); //typedef ublas::matrix matrix; //typedef ublas::matrix_range matrix_range; //typedef Eigen::Matrix matrix; @@ -53,8 +54,8 @@ int main(int argc, char* argv[]) { volatile size_t n=300; volatile size_t nReps = 1000; assert(m > n); - boost::variate_generator > rni(boost::mt19937(), boost::uniform_int(0,m-1)); - boost::variate_generator > rnj(boost::mt19937(), boost::uniform_int(0,n-1)); + std::uniform_int_distribution uniform_i(0,m-1); + std::uniform_int_distribution uniform_j(0,n-1); gtsam::Matrix mat((int)m,(int)n); gtsam::SubMatrix full = mat.block(0, 0, m, n); gtsam::SubMatrix top = mat.block(0, 0, n, n); @@ -75,13 +76,13 @@ int main(int argc, char* argv[]) { for(size_t rep=0; rep<1000; ++rep) for(size_t i=0; i<(size_t)mat.rows(); ++i) for(size_t j=0; j<(size_t)mat.cols(); ++j) - mat(i,j) = rng(); + mat(i,j) = uniform(rng); gttic_(basicTime); for(size_t rep=0; repsecs(); @@ -92,7 +93,7 @@ int main(int argc, char* argv[]) { for(size_t rep=0; repsecs(); @@ -103,7 +104,7 @@ int main(int argc, char* argv[]) { for(size_t rep=0; repsecs(); @@ -114,7 +115,7 @@ int main(int argc, char* argv[]) { for(size_t rep=0; repsecs(); @@ -133,13 +134,13 @@ int main(int argc, char* argv[]) { for(size_t rep=0; rep<1000; ++rep) for(size_t j=0; j<(size_t)mat.cols(); ++j) for(size_t i=0; i<(size_t)mat.rows(); ++i) - mat(i,j) = rng(); + mat(i,j) = uniform(rng); gttic_(basicTime); for(size_t rep=0; repsecs(); @@ -150,7 +151,7 @@ int main(int argc, char* argv[]) { for(size_t rep=0; repsecs(); @@ -161,7 +162,7 @@ int main(int argc, char* argv[]) { for(size_t rep=0; repsecs(); @@ -172,7 +173,7 @@ int main(int argc, char* argv[]) { for(size_t rep=0; repsecs(); @@ -190,14 +191,14 @@ int main(int argc, char* argv[]) { cout << "Row-major matrix, random assignment:" << endl; // Do a few initial assignments to let any cache effects stabilize - for_each(ijs.begin(), ijs.end(), _1 = make_pair(rni(),rnj())); + for_each(ijs.begin(), ijs.end(), _1 = make_pair(uniform_i(rng),uniform_j(rng))); for(size_t rep=0; rep<1000; ++rep) - for(const ij_t& ij: ijs) { mat(ij.first, ij.second) = rng(); } + for(const ij_t& ij: ijs) { mat(ij.first, ij.second) = uniform(rng); } gttic_(basicTime); - for_each(ijs.begin(), ijs.end(), _1 = make_pair(rni(),rnj())); + for_each(ijs.begin(), ijs.end(), _1 = make_pair(uniform_i(rng),uniform_j(rng))); for(size_t rep=0; rep<1000; ++rep) - for(const ij_t& ij: ijs) { mat(ij.first, ij.second) = rng(); } + for(const ij_t& ij: ijs) { mat(ij.first, ij.second) = uniform(rng); } gttoc_(basicTime); tictoc_getNode(basicTimeNode, basicTime); basicTime = basicTimeNode->secs(); @@ -205,9 +206,9 @@ int main(int argc, char* argv[]) { cout << format(" Basic: %1% mus/element") % double(1000000 * basicTime / double(ijs.size()*nReps)) << endl; gttic_(fullTime); - for_each(ijs.begin(), ijs.end(), _1 = make_pair(rni(),rnj())); + for_each(ijs.begin(), ijs.end(), _1 = make_pair(uniform_i(rng),uniform_j(rng))); for(size_t rep=0; rep<1000; ++rep) - for(const ij_t& ij: ijs) { full(ij.first, ij.second) = rng(); } + for(const ij_t& ij: ijs) { full(ij.first, ij.second) = uniform(rng); } gttoc_(fullTime); tictoc_getNode(fullTimeNode, fullTime); fullTime = fullTimeNode->secs(); @@ -215,9 +216,9 @@ int main(int argc, char* argv[]) { cout << format(" Full: %1% mus/element") % double(1000000 * fullTime / double(ijs.size()*nReps)) << endl; gttic_(topTime); - for_each(ijs.begin(), ijs.end(), _1 = make_pair(rni()%top.rows(),rnj())); + for_each(ijs.begin(), ijs.end(), _1 = make_pair(uniform_i(rng)%top.rows(),uniform_j(rng))); for(size_t rep=0; rep<1000; ++rep) - for(const ij_t& ij: ijs) { top(ij.first, ij.second) = rng(); } + for(const ij_t& ij: ijs) { top(ij.first, ij.second) = uniform(rng); } gttoc_(topTime); tictoc_getNode(topTimeNode, topTime); topTime = topTimeNode->secs(); @@ -225,9 +226,9 @@ int main(int argc, char* argv[]) { cout << format(" Top: %1% mus/element") % double(1000000 * topTime / double(ijs.size()*nReps)) << endl; gttic_(blockTime); - for_each(ijs.begin(), ijs.end(), _1 = make_pair(rni()%block.rows(),rnj()%block.cols())); + for_each(ijs.begin(), ijs.end(), _1 = make_pair(uniform_i(rng)%block.rows(),uniform_j(rng)%block.cols())); for(size_t rep=0; rep<1000; ++rep) - for(const ij_t& ij: ijs) { block(ij.first, ij.second) = rng(); } + for(const ij_t& ij: ijs) { block(ij.first, ij.second) = uniform(rng); } gttoc_(blockTime); tictoc_getNode(blockTimeNode, blockTime); blockTime = blockTimeNode->secs(); @@ -250,7 +251,7 @@ int main(int argc, char* argv[]) { // matrix mat(5,5); // for(size_t j=0; j<(size_t)mat.cols(); ++j) // for(size_t i=0; i<(size_t)mat.rows(); ++i) -// mat(i,j) = rng(); +// mat(i,j) = uniform(rng); // // tri = ublas::triangular_adaptor(mat); // cout << " Assigned from triangular adapter: " << tri << endl; @@ -259,13 +260,13 @@ int main(int argc, char* argv[]) { // // for(size_t j=0; j<(size_t)mat.cols(); ++j) // for(size_t i=0; i<(size_t)mat.rows(); ++i) -// mat(i,j) = rng(); +// mat(i,j) = uniform(rng); // mat = tri; // cout << " Assign matrix from triangular: " << mat << endl; // // for(size_t j=0; j<(size_t)mat.cols(); ++j) // for(size_t i=0; i<(size_t)mat.rows(); ++i) -// mat(i,j) = rng(); +// mat(i,j) = uniform(rng); // (ublas::triangular_adaptor(mat)) = tri; // cout << " Assign triangular adaptor from triangular: " << mat << endl; // } @@ -281,7 +282,7 @@ int main(int argc, char* argv[]) { // matrix mat(5,7); // for(size_t j=0; j<(size_t)mat.cols(); ++j) // for(size_t i=0; i<(size_t)mat.rows(); ++i) -// mat(i,j) = rng(); +// mat(i,j) = uniform(rng); // // tri = ublas::triangular_adaptor(mat); // cout << " Assigned from triangular adapter: " << tri << endl; @@ -290,13 +291,13 @@ int main(int argc, char* argv[]) { // // for(size_t j=0; j<(size_t)mat.cols(); ++j) // for(size_t i=0; i<(size_t)mat.rows(); ++i) -// mat(i,j) = rng(); +// mat(i,j) = uniform(rng); // mat = tri; // cout << " Assign matrix from triangular: " << mat << endl; // // for(size_t j=0; j<(size_t)mat.cols(); ++j) // for(size_t i=0; i<(size_t)mat.rows(); ++i) -// mat(i,j) = rng(); +// mat(i,j) = uniform(rng); // mat = ublas::triangular_adaptor(mat); // cout << " Assign matrix from triangular adaptor of self: " << mat << endl; // } diff --git a/timing/timeShonanFactor.cpp b/timing/timeShonanFactor.cpp new file mode 100644 index 000000000..207d54a4d --- /dev/null +++ b/timing/timeShonanFactor.cpp @@ -0,0 +1,108 @@ +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file timeShonanFactor.cpp + * @brief time ShonanFactor with BAL file + * @author Frank Dellaert + * @date 2019 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +using namespace std; +using namespace gtsam; + +static SharedNoiseModel gNoiseModel = noiseModel::Unit::Create(2); + +int main(int argc, char* argv[]) { + // primitive argument parsing: + if (argc > 3) { + throw runtime_error("Usage: timeShonanFactor [g2oFile]"); + } + + string g2oFile; + try { + if (argc > 1) + g2oFile = argv[argc - 1]; + else + g2oFile = findExampleDataFile("sphere_smallnoise.graph"); + } catch (const exception& e) { + cerr << e.what() << '\n'; + exit(1); + } + + // Read G2O file + const auto measurements = parseMeasurements(g2oFile); + const auto poses = parseVariables(g2oFile); + + // Build graph + NonlinearFactorGraph graph; + // graph.add(NonlinearEquality(0, SOn::identity(4))); + auto priorModel = noiseModel::Isotropic::Sigma(6, 10000); + graph.add(PriorFactor(0, SOn::identity(4), priorModel)); + auto G = boost::make_shared(SOn::VectorizedGenerators(4)); + for (const auto &m : measurements) { + const auto &keys = m.keys(); + const Rot3 &Rij = m.measured(); + const auto &model = m.noiseModel(); + graph.emplace_shared( + keys[0], keys[1], Rij, 4, model, G); + } + + std::mt19937 rng(42); + + // Set parameters to be similar to ceres + LevenbergMarquardtParams params; + LevenbergMarquardtParams::SetCeresDefaults(¶ms); + params.setLinearSolverType("MULTIFRONTAL_QR"); + // params.setVerbosityLM("SUMMARY"); + // params.linearSolverType = LevenbergMarquardtParams::Iterative; + // auto pcg = boost::make_shared(); + // pcg->preconditioner_ = + // boost::make_shared(); + // boost::make_shared(); + // params.iterativeParams = pcg; + + // Optimize + for (size_t i = 0; i < 100; i++) { + gttic_(optimize); + Values initial; + initial.insert(0, SOn::identity(4)); + for (size_t j = 1; j < poses.size(); j++) { + initial.insert(j, SOn::Random(rng, 4)); + } + LevenbergMarquardtOptimizer lm(graph, initial, params); + Values result = lm.optimize(); + cout << "cost = " << graph.error(result) << endl; + } + + tictoc_finishedIteration_(); + tictoc_print_(); + + return 0; +} diff --git a/update_wrap.sh b/update_wrap.sh new file mode 100755 index 000000000..61f55e32e --- /dev/null +++ b/update_wrap.sh @@ -0,0 +1,13 @@ +#!/bin/bash +REF=${1-master} # branch or tag; defaults to 'master' if parameter 1 not present +REMOTE=wrap # just a name to identify the remote +REPO=git@github.com:borglab/wrap.git # replace this with your repository URL +FOLDER=wrap # where to mount the subtree + +git remote add $REMOTE --no-tags $REPO +if [[ -d $FOLDER ]]; then # update the existing subtree + git subtree pull $REMOTE $REF --prefix=$FOLDER --squash -m "Merging '$REF' into '$FOLDER'" +else # add the subtree + git subtree add $REMOTE $REF --prefix=$FOLDER --squash -m "Merging '$REF' into '$FOLDER'" +fi +git remote remove $REMOTE \ No newline at end of file diff --git a/wrap/.github/workflows/ci.yml b/wrap/.github/workflows/ci.yml new file mode 100644 index 000000000..2e38bc3dd --- /dev/null +++ b/wrap/.github/workflows/ci.yml @@ -0,0 +1,52 @@ +name: Python CI + +on: [push, pull_request] + +jobs: + build: + name: ${{ matrix.name }} 🐍 ${{ matrix.python_version }} + runs-on: ${{ matrix.os }} + + env: + PYTHON_VERSION: ${{ matrix.python_version }} + strategy: + fail-fast: false + matrix: + # Github Actions requires a single row to be added to the build matrix. + # See https://help.github.com/en/articles/workflow-syntax-for-github-actions. + name: [ + ubuntu-18.04 + ] + + python_version: [3] + include: + - name: ubuntu-18.04 + os: ubuntu-18.04 + + steps: + - name: Checkout + uses: actions/checkout@master + - name: Install (Linux) + if: runner.os == 'Linux' + run: | + sudo apt-get -y update + + sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev + - name: Install (macOS) + if: runner.os == 'macOS' + run: | + brew install cmake ninja boost + - name: Build (Linux) + if: runner.os == 'Linux' + run: | + sudo pip$PYTHON_VERSION install -r requirements.txt + cd tests + python$PYTHON_VERSION test_pybind_wrapper.py + python$PYTHON_VERSION test_matlab_wrapper.py + - name: Build (macOS) + if: runner.os == 'macOS' + run: | + pip$PYTHON_VERSION install -r requirements.txt + cd tests + python$PYTHON_VERSION test_pybind_wrapper.py + python$PYTHON_VERSION test_matlab_wrapper.py \ No newline at end of file diff --git a/wrap/.gitignore b/wrap/.gitignore new file mode 100644 index 000000000..4fd660b95 --- /dev/null +++ b/wrap/.gitignore @@ -0,0 +1,5 @@ +__pycache__/ +.vscode/ +*build* +*dist* +*.egg-info diff --git a/wrap/Argument.cpp b/wrap/Argument.cpp deleted file mode 100644 index f85aed72e..000000000 --- a/wrap/Argument.cpp +++ /dev/null @@ -1,316 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Argument.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Argument.h" -#include "Class.h" - -#include - -#include -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -Argument Argument::expandTemplate(const TemplateSubstitution& ts) const { - Argument instArg = *this; - instArg.type = ts.tryToSubstitite(type); - return instArg; -} - -/* ************************************************************************* */ -ArgumentList ArgumentList::expandTemplate( - const TemplateSubstitution& ts) const { - ArgumentList instArgList; - for(const Argument& arg: *this) { - Argument instArg = arg.expandTemplate(ts); - instArgList.push_back(instArg); - } - return instArgList; -} - -/* ************************************************************************* */ -string Argument::matlabClass(const string& delim) const { - string result; - for(const string& ns: type.namespaces()) - result += ns + delim; - if (type.name() == "string" || type.name() == "unsigned char" - || type.name() == "char") - return result + "char"; - if (type.name() == "Vector" || type.name() == "Matrix") - return result + "double"; - if (type.name() == "int" || type.name() == "size_t") - return result + "numeric"; - if (type.name() == "bool") - return result + "logical"; - return result + type.name(); -} - -/* ************************************************************************* */ -void Argument::matlab_unwrap(FileWriter& file, const string& matlabName) const { - file.oss << " "; - - string cppType = type.qualifiedName("::"); - string matlabUniqueType = type.qualifiedName(); - bool isNotScalar = !type.isScalar(); - - // We cannot handle scalar non const references - if (!isNotScalar && is_ref && !is_const) { - throw std::runtime_error("Cannot unwrap a scalar non-const reference"); - } - - if (is_ptr && type.category != Qualified::EIGEN) - // A pointer: emit an "unwrap_shared_ptr" call which returns a pointer - file.oss << "boost::shared_ptr<" << cppType << "> " << name - << " = unwrap_shared_ptr< "; - else if (is_ref && isNotScalar && type.category != Qualified::EIGEN) - // A reference: emit an "unwrap_shared_ptr" call and de-reference the pointer - file.oss << cppType << "& " << name << " = *unwrap_shared_ptr< "; - else - // Not a pointer, or a reference to a scalar type. Therefore, emit an "unwrap" call - // unwrap is specified in matlab.h as a series of template specializations - // that know how to unpack the expected MATLAB object - // example: double tol = unwrap< double >(in[2]); - // example: Vector v = unwrap< Vector >(in[1]); - file.oss << cppType << " " << name << " = unwrap< "; - - file.oss << cppType << " >(" << matlabName; - if( (is_ptr || is_ref) && isNotScalar && type.category != Qualified::EIGEN) - file.oss << ", \"ptr_" << matlabUniqueType << "\""; - file.oss << ");" << endl; -} - -/* ************************************************************************* */ -void Argument::proxy_check(FileWriter& proxyFile, const string& s) const { - proxyFile.oss << "isa(" << s << ",'" << matlabClass(".") << "')"; - if (type.name() == "Vector") - proxyFile.oss << " && size(" << s << ",2)==1"; -} - -/* ************************************************************************* */ -void Argument::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - string cythonType = type.pxdClassName(); - if (cythonType == "This") cythonType = className; - else if (type.isEigen()) - cythonType = "const " + cythonType + "&"; - else if (type.match(templateArgs)) - cythonType = type.name(); - - // add modifier - if (!type.isEigen()) { - if (is_ptr) cythonType = "shared_ptr[" + cythonType + "]&"; - if (is_ref) cythonType = cythonType + "&"; - if (is_const) cythonType = "const " + cythonType; - } - - file.oss << cythonType << " " << name; -} - -/* ************************************************************************* */ -void Argument::emit_cython_pyx(FileWriter& file) const { - file.oss << type.pyxArgumentType() << " " << name; -} - -/* ************************************************************************* */ -std::string Argument::pyx_convertEigenTypeAndStorageOrder() const { - if (!type.isEigen()) - return ""; - return name + " = " + name + ".astype(float, order=\'F\', copy=False)"; -} - -/* ************************************************************************* */ -std::string Argument::pyx_asParam() const { - string cythonType = type.pxdClassName(); - string cythonVar; - if (type.isNonBasicType()) { - cythonVar = name + "." + type.shared_pxd_obj_in_pyx(); - if (!is_ptr) cythonVar = "deref(" + cythonVar + ")"; - } else if (type.isEigen()) { - cythonVar = "<" + cythonType + ">" + "(Map[" + cythonType + "](" + name + "))"; - } else { - cythonVar = name; - } - return cythonVar; -} - -/* ************************************************************************* */ -string ArgumentList::types() const { - string str; - bool first = true; - for(Argument arg: *this) { - if (!first) - str += ","; - str += arg.type.name(); - first = false; - } - return str; -} - -/* ************************************************************************* */ -string ArgumentList::signature() const { - string sig; - bool cap = false; - - for(Argument arg: *this) { - for(char ch: arg.type.name()) - if (isupper(ch)) { - sig += ch; - //If there is a capital letter, we don't want to read it below - cap = true; - } - if (!cap) - sig += arg.type.name()[0]; - //Reset to default - cap = false; - } - - return sig; -} - -/* ************************************************************************* */ -string ArgumentList::names() const { - string str; - bool first = true; - for(Argument arg: *this) { - if (!first) - str += ","; - str += arg.name; - first = false; - } - return str; -} - -/* ************************************************************************* */ -bool ArgumentList::allScalar() const { - for(Argument arg: *this) - if (!arg.type.isScalar()) - return false; - return true; -} - -/* ************************************************************************* */ -void ArgumentList::matlab_unwrap(FileWriter& file, int start) const { - int index = start; - for(Argument arg: *this) { - stringstream buf; - buf << "in[" << index << "]"; - arg.matlab_unwrap(file, buf.str()); - index++; - } -} - -/* ************************************************************************* */ -void ArgumentList::emit_prototype(FileWriter& file, const string& name) const { - file.oss << name << "("; - bool first = true; - for(Argument arg: *this) { - if (!first) - file.oss << ", "; - file.oss << arg.type.name() << " " << arg.name; - first = false; - } - file.oss << ")"; -} - -/* ************************************************************************* */ -void ArgumentList::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - for (size_t j = 0; j(__params[" + std::to_string(j) + "])\n"; - return s; -} - -/* ************************************************************************* */ -void ArgumentList::proxy_check(FileWriter& proxyFile) const { - // Check nr of arguments - proxyFile.oss << "if length(varargin) == " << size(); - if (size() > 0) - proxyFile.oss << " && "; - // ...and their type.names - bool first = true; - for (size_t i = 0; i < size(); i++) { - if (!first) - proxyFile.oss << " && "; - string s = "varargin{" + boost::lexical_cast(i + 1) + "}"; - (*this)[i].proxy_check(proxyFile, s); - first = false; - } - proxyFile.oss << "\n"; -} - -/* ************************************************************************* */ - diff --git a/wrap/Argument.h b/wrap/Argument.h deleted file mode 100644 index c08eb0be9..000000000 --- a/wrap/Argument.h +++ /dev/null @@ -1,242 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Argument.h - * @brief arguments to constructors and methods - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include "TemplateSubstitution.h" -#include "FileWriter.h" -#include "ReturnValue.h" - -namespace wrap { - -/// Argument class -struct Argument { - Qualified type; - std::string name; - bool is_const, is_ref, is_ptr; - - Argument() : - is_const(false), is_ref(false), is_ptr(false) { - } - - Argument(const Qualified& t, const std::string& n) : - type(t), name(n), is_const(false), is_ref(false), is_ptr(false) { - } - - bool isSameSignature(const Argument& other) const { - return type == other.type - && is_const == other.is_const && is_ref == other.is_ref - && is_ptr == other.is_ptr; - } - - bool operator==(const Argument& other) const { - return type == other.type && name == other.name - && is_const == other.is_const && is_ref == other.is_ref - && is_ptr == other.is_ptr; - } - - Argument expandTemplate(const TemplateSubstitution& ts) const; - - /// return MATLAB class for use in isa(x,class) - std::string matlabClass(const std::string& delim = "") const; - - /// MATLAB code generation, MATLAB to C++ - void matlab_unwrap(FileWriter& file, const std::string& matlabName) const; - - /** - * emit checking argument to MATLAB proxy - * @param proxyFile output stream - */ - void proxy_check(FileWriter& proxyFile, const std::string& s) const; - - /** - * emit arguments for cython pxd - * @param file output stream - */ - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - void emit_cython_pyx(FileWriter& file) const; - std::string pyx_asParam() const; - std::string pyx_convertEigenTypeAndStorageOrder() const; - - friend std::ostream& operator<<(std::ostream& os, const Argument& arg) { - os << (arg.is_const ? "const " : "") << arg.type << (arg.is_ptr ? "*" : "") - << (arg.is_ref ? "&" : ""); - return os; - } - -}; - -/// Argument list is just a container with Arguments -struct ArgumentList: public std::vector { - - /// create a comma-separated string listing all argument types (not used) - std::string types() const; - - /// create a short "signature" string - std::string signature() const; - - /// create a comma-separated string listing all argument names, used in m-files - std::string names() const; - - /// Check if all arguments scalar - bool allScalar() const; - - ArgumentList expandTemplate(const TemplateSubstitution& ts) const; - - bool isSameSignature(const ArgumentList& other) const { - for(size_t i = 0; i& templateArgs) const; - void emit_cython_pyx(FileWriter& file) const; - std::string pyx_asParams() const; - std::string pyx_paramsList() const; - std::string pyx_castParamsToPythonType(const std::string& indent) const; - std::string pyx_convertEigenTypeAndStorageOrder(const std::string& indent) const; - - /** - * emit checking arguments to MATLAB proxy - * @param proxyFile output stream - */ - void proxy_check(FileWriter& proxyFile) const; - - /// Output stream operator - friend std::ostream& operator<<(std::ostream& os, - const ArgumentList& argList) { - os << "("; - if (argList.size() > 0) - os << argList.front(); - if (argList.size() > 1) - for (size_t i = 1; i < argList.size(); i++) - os << ", " << argList[i]; - os << ")"; - return os; - } - -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ArgumentGrammar: public classic::grammar { - - wrap::Argument& result_; ///< successful parse will be placed in here - TypeGrammar argument_type_g; ///< Type parser for Argument::type - - /// Construct type grammar and specify where result is placed - ArgumentGrammar(wrap::Argument& result) : - result_(result), argument_type_g(result.type) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - typedef classic::rule Rule; - - Rule argument_p; - - definition(ArgumentGrammar const& self) { - using namespace classic; - - // NOTE: allows for pointers to all types - // Slightly more permissive than before on basis/eigen type qualification - // Also, currently parses Point2*&, can't make it work otherwise :-( - argument_p = !str_p("const")[assign_a(self.result_.is_const, T)] // - >> self.argument_type_g // - >> !ch_p('*')[assign_a(self.result_.is_ptr, T)] - >> !ch_p('&')[assign_a(self.result_.is_ref, T)] - >> BasicRules::name_p[assign_a(self.result_.name)]; - } - - Rule const& start() const { - return argument_p; - } - - }; -}; -// ArgumentGrammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ArgumentListGrammar: public classic::grammar { - - wrap::ArgumentList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - ArgumentListGrammar(wrap::ArgumentList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - const Argument arg0; ///< used to reset arg - Argument arg; ///< temporary argument for use during parsing - ArgumentGrammar argument_g; ///< single Argument parser - - classic::rule argument_p, argumentList_p; - - definition(ArgumentListGrammar const& self) : - argument_g(arg) { - using namespace classic; - - argument_p = argument_g // - [classic::push_back_a(self.result_, arg)] // - [assign_a(arg, arg0)]; - - argumentList_p = '(' >> !argument_p >> *(',' >> argument_p) >> ')'; - } - - classic::rule const& start() const { - return argumentList_p; - } - - }; -}; -// ArgumentListGrammar - -/* ************************************************************************* */ - -}// \namespace wrap - diff --git a/wrap/CMakeLists.txt b/wrap/CMakeLists.txt index c04a44edb..d363c7161 100644 --- a/wrap/CMakeLists.txt +++ b/wrap/CMakeLists.txt @@ -1,45 +1,59 @@ -# Build/install Wrap - -set(WRAP_BOOST_LIBRARIES - Boost::system - Boost::filesystem - Boost::thread -) - -# Allow for disabling serialization to handle errors related to Clang's linker -option(GTSAM_WRAP_SERIALIZATION "If enabled, allows for wrapped objects to be saved via boost.serialization" ON) - -# Build the executable itself -file(GLOB wrap_srcs "*.cpp") -file(GLOB wrap_headers "*.h") -list(REMOVE_ITEM wrap_srcs ${CMAKE_CURRENT_SOURCE_DIR}/wrap.cpp) -add_library(wrap_lib STATIC ${wrap_srcs} ${wrap_headers}) -target_include_directories(wrap_lib PUBLIC - $ -) -if (NOT GTSAM_WRAP_SERIALIZATION) - target_compile_definitions(wrap_lib PUBLIC -DWRAP_DISABLE_SERIALIZE) -endif() +cmake_minimum_required(VERSION 3.9) + +# Set the project name and version +project(GTwrap VERSION 1.0) -# Apply build flags: -gtsam_apply_build_flags(wrap_lib) +# ############################################################################## +# General configuration -target_link_libraries(wrap_lib ${WRAP_BOOST_LIBRARIES}) -gtsam_assign_source_folders(${wrap_srcs} ${wrap_headers}) -add_executable(wrap wrap.cpp) -target_link_libraries(wrap PRIVATE wrap_lib) +set(WRAP_PYTHON_VERSION + "Default" + CACHE STRING "The Python version to use for wrapping") -# Set folder in Visual Studio -file(RELATIVE_PATH relative_path "${PROJECT_SOURCE_DIR}" "${CMAKE_CURRENT_SOURCE_DIR}") -set_target_properties(wrap_lib wrap PROPERTIES FOLDER "${relative_path}") +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/GtwrapUtils.cmake) +gtwrap_get_python_version(${WRAP_PYTHON_VERSION}) -# Install wrap binary and export target -install(TARGETS wrap EXPORT GTSAM-exports DESTINATION ${CMAKE_INSTALL_BINDIR}) -list(APPEND GTSAM_EXPORTED_TARGETS wrap) -set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) +# ############################################################################## +# Install the CMake file to be used by other projects +if(WIN32 AND NOT CYGWIN) + set(SCRIPT_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/CMake") +else() + set(SCRIPT_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib/cmake") +endif() -# Install matlab header -install(FILES matlab.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/wrap) +# Install scripts to the standard CMake script directory. +install(FILES cmake/gtwrapConfig.cmake cmake/PybindWrap.cmake + cmake/GtwrapUtils.cmake + DESTINATION "${SCRIPT_INSTALL_DIR}/gtwrap") + +# Install wrapping scripts as binaries to `CMAKE_INSTALL_PREFIX/bin` so they can +# be invoked for wrapping. +# We use DESTINATION (instead of TYPE) so we can support older CMake versions. +install(PROGRAMS scripts/pybind_wrap.py scripts/matlab_wrap.py + DESTINATION ${CMAKE_INSTALL_BINDIR}) + +# Install pybind11 directory to `CMAKE_INSTALL_PREFIX/lib/pybind11` This will +# allow the gtwrapConfig.cmake file to load it later. +# We use DESTINATION (instead of TYPE) so we can support older CMake versions. +install(DIRECTORY pybind11 + DESTINATION ${CMAKE_INSTALL_LIBDIR}) + +# ############################################################################## +# Install the Python package +find_package( + Python ${WRAP_PYTHON_VERSION} + COMPONENTS Interpreter + EXACT) + +# Detect virtualenv and set Pip args accordingly +# https://www.scivision.dev/cmake-install-python-package/ +if(DEFINED ENV{VIRTUAL_ENV} OR DEFINED ENV{CONDA_PREFIX}) + set(_pip_args) +else() + set(_pip_args "--user") +endif() +#TODO add correct flags for virtualenv -# Build tests -add_subdirectory(tests) +# Finally install the gtwrap python package. +execute_process(COMMAND ${Python_EXECUTABLE} -m pip install . ${_pip_args} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/wrap/Class.cpp b/wrap/Class.cpp deleted file mode 100644 index 65ce9eab7..000000000 --- a/wrap/Class.cpp +++ /dev/null @@ -1,897 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.cpp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Class.h" -#include "utilities.h" -#include "Argument.h" -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include // std::ostream_iterator -//#include // on Linux GCC: fails with error regarding needing C++0x std flags -//#include // same failure as above -#include // works on Linux GCC -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void Class::assignParent(const Qualified& parent) { - parentClass.reset(parent); -} - -/* ************************************************************************* */ -boost::optional Class::qualifiedParent() const { - boost::optional result = boost::none; - if (parentClass) - result = parentClass->qualifiedName("::"); - return result; -} - -/* ************************************************************************* */ -static void handleException(const out_of_range& oor, - const Class::Methods& methods) { - cerr << "Class::method: key not found: " << oor.what() << ", methods are:\n"; - using boost::adaptors::map_keys; - ostream_iterator out_it(cerr, "\n"); - boost::copy(methods | map_keys, out_it); -} - -/* ************************************************************************* */ -// Method& Class::mutableMethod(Str key) { -// try { -// return methods_.at(key); -// } catch (const out_of_range& oor) { -// handleException(oor, methods_); -// throw runtime_error("Internal error in wrap"); -// } -// } - -/* ************************************************************************* */ -const Method& Class::method(Str key) const { - try { - return methods_.at(key); - } catch (const out_of_range& oor) { - handleException(oor, methods_); - throw runtime_error("Internal error in wrap"); - } -} - -/* ************************************************************************* */ -void Class::matlab_proxy(Str toolboxPath, Str wrapperName, - const TypeAttributesTable& typeAttributes, FileWriter& wrapperFile, - vector& functionNames) const { - - // Create namespace folders - createNamespaceStructure(namespaces(), toolboxPath); - - // open destination classFile - string classFile = matlabName(toolboxPath); - FileWriter proxyFile(classFile, verbose_, "%"); - - // get the name of actual matlab object - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppName = qualifiedName("::"); - - // emit class proxy code - // we want our class to inherit the handle class for memory purposes - const string parent = - parentClass ? parentClass->qualifiedName(".") : "handle"; - comment_fragment(proxyFile); - proxyFile.oss << "classdef " << name() << " < " << parent << endl; - proxyFile.oss << " properties\n"; - proxyFile.oss << " ptr_" << matlabUniqueName << " = 0\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " methods\n"; - - // Constructor - proxyFile.oss << " function obj = " << name() << "(varargin)\n"; - // Special pointer constructors - one in MATLAB to create an object and - // assign a pointer returned from a C++ function. In turn this MATLAB - // constructor calls a special C++ function that just adds the object to - // its collector. This allows wrapped functions to return objects in - // other wrap modules - to add these to their collectors the pointer is - // passed from one C++ module into matlab then back into the other C++ - // module. - pointer_constructor_fragments(proxyFile, wrapperFile, wrapperName, - functionNames); - wrapperFile.oss << "\n"; - - // Regular constructors - boost::optional cppBaseName = qualifiedParent(); - for (size_t i = 0; i < constructor.nrOverloads(); i++) { - ArgumentList args = constructor.argumentList(i); - const int id = (int) functionNames.size(); - constructor.proxy_fragment(proxyFile, wrapperName, (bool) parentClass, id, - args); - const string wrapFunctionName = constructor.wrapper_fragment(wrapperFile, - cppName, matlabUniqueName, cppBaseName, id, args); - wrapperFile.oss << "\n"; - functionNames.push_back(wrapFunctionName); - } - proxyFile.oss << " else\n"; - proxyFile.oss << " error('Arguments do not match any overload of " - << matlabQualName << " constructor');\n"; - proxyFile.oss << " end\n"; - if (parentClass) - proxyFile.oss << " obj = obj@" << parentClass->qualifiedName(".") - << "(uint64(" << ptr_constructor_key << "), base_ptr);\n"; - proxyFile.oss << " obj.ptr_" << matlabUniqueName << " = my_ptr;\n"; - proxyFile.oss << " end\n\n"; - - // Deconstructor - { - const int id = (int) functionNames.size(); - deconstructor.proxy_fragment(proxyFile, wrapperName, matlabUniqueName, id); - proxyFile.oss << "\n"; - const string functionName = deconstructor.wrapper_fragment(wrapperFile, - cppName, matlabUniqueName, id); - wrapperFile.oss << "\n"; - functionNames.push_back(functionName); - } - proxyFile.oss - << " function display(obj), obj.print(''); end\n %DISPLAY Calls print on the object\n"; - proxyFile.oss - << " function disp(obj), obj.display; end\n %DISP Calls print on the object\n"; - - // Methods - for(const Methods::value_type& name_m: methods_) { - const Method& m = name_m.second; - m.proxy_wrapper_fragments(proxyFile, wrapperFile, cppName, matlabQualName, - matlabUniqueName, wrapperName, typeAttributes, functionNames); - proxyFile.oss << "\n"; - wrapperFile.oss << "\n"; - } - if (hasSerialization) - serialization_fragments(proxyFile, wrapperFile, wrapperName, functionNames); - - proxyFile.oss << " end\n"; - proxyFile.oss << "\n"; - proxyFile.oss << " methods(Static = true)\n"; - - // Static methods - for(const StaticMethods::value_type& name_m: static_methods) { - const StaticMethod& m = name_m.second; - m.proxy_wrapper_fragments(proxyFile, wrapperFile, cppName, matlabQualName, - matlabUniqueName, wrapperName, typeAttributes, functionNames); - proxyFile.oss << "\n"; - wrapperFile.oss << "\n"; - } - if (hasSerialization) - deserialization_fragments(proxyFile, wrapperFile, wrapperName, - functionNames); - - proxyFile.oss << " end\n"; - proxyFile.oss << "end\n"; - - // Close file - proxyFile.emit(true); -} - -/* ************************************************************************* */ -void Class::pointer_constructor_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - - const string matlabUniqueName = qualifiedName(); - const string cppName = qualifiedName("::"); - - const int collectorInsertId = (int) functionNames.size(); - const string collectorInsertFunctionName = matlabUniqueName - + "_collectorInsertAndMakeBase_" - + boost::lexical_cast(collectorInsertId); - functionNames.push_back(collectorInsertFunctionName); - - int upcastFromVoidId; - string upcastFromVoidFunctionName; - if (isVirtual) { - upcastFromVoidId = (int) functionNames.size(); - upcastFromVoidFunctionName = matlabUniqueName + "_upcastFromVoid_" - + boost::lexical_cast(upcastFromVoidId); - functionNames.push_back(upcastFromVoidFunctionName); - } - - // MATLAB constructor that assigns pointer to matlab object then calls c++ - // function to add the object to the collector. - if (isVirtual) { - proxyFile.oss - << " if (nargin == 2 || (nargin == 3 && strcmp(varargin{3}, 'void')))"; - } else { - proxyFile.oss << " if nargin == 2"; - } - proxyFile.oss << " && isa(varargin{1}, 'uint64') && varargin{1} == uint64(" - << ptr_constructor_key << ")\n"; - if (isVirtual) { - proxyFile.oss << " if nargin == 2\n"; - proxyFile.oss << " my_ptr = varargin{2};\n"; - proxyFile.oss << " else\n"; - proxyFile.oss << " my_ptr = " << wrapperName << "(" - << upcastFromVoidId << ", varargin{2});\n"; - proxyFile.oss << " end\n"; - } else { - proxyFile.oss << " my_ptr = varargin{2};\n"; - } - if (!parentClass) // If this class has a base class, we'll get a base class pointer back - proxyFile.oss << " "; - else - proxyFile.oss << " base_ptr = "; - proxyFile.oss << wrapperName << "(" << collectorInsertId << ", my_ptr);\n"; // Call collector insert and get base class ptr - - // C++ function to add pointer from MATLAB to collector. The pointer always - // comes from a C++ return value; this mechanism allows the object to be added - // to a collector in a different wrap module. If this class has a base class, - // a new pointer to the base class is allocated and returned. - wrapperFile.oss << "void " << collectorInsertFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " mexAtExit(&_deleteAllObjects);\n"; - // Typedef boost::shared_ptr - wrapperFile.oss << " typedef boost::shared_ptr<" << cppName << "> Shared;\n"; - wrapperFile.oss << "\n"; - // Get self pointer passed in - wrapperFile.oss - << " Shared *self = *reinterpret_cast (mxGetData(in[0]));\n"; - // Add to collector - wrapperFile.oss << " collector_" << matlabUniqueName << ".insert(self);\n"; - // If we have a base class, return the base class pointer (MATLAB will call the base class collectorInsertAndMakeBase to add this to the collector and recurse the heirarchy) - boost::optional cppBaseName = qualifiedParent(); - if (cppBaseName) { - wrapperFile.oss << "\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << *cppBaseName - << "> SharedBase;\n"; - wrapperFile.oss - << " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);\n"; - wrapperFile.oss - << " *reinterpret_cast(mxGetData(out[0])) = new SharedBase(*self);\n"; - } - wrapperFile.oss << "}\n"; - - // If this is a virtual function, C++ function to dynamic upcast it from a - // shared_ptr. This mechanism allows automatic dynamic creation of the - // real underlying derived-most class when a C++ method returns a virtual - // base class. - if (isVirtual) - wrapperFile.oss << "\n" - "void " << upcastFromVoidFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[]) {\n" - " mexAtExit(&_deleteAllObjects);\n" - " typedef boost::shared_ptr<" << cppName - << "> Shared;\n" - " boost::shared_ptr *asVoid = *reinterpret_cast**> (mxGetData(in[0]));\n" - " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);\n" - " Shared *self = new Shared(boost::static_pointer_cast<" << cppName - << ">(*asVoid));\n" - " *reinterpret_cast(mxGetData(out[0])) = self;\n" - "}\n"; -} - -/* ************************************************************************* */ -Class Class::expandTemplate(const TemplateSubstitution& ts) const { - Class inst = *this; - inst.methods_ = expandMethodTemplate(methods_, ts); - inst.static_methods = expandMethodTemplate(static_methods, ts); - inst.constructor = constructor.expandTemplate(ts); - inst.deconstructor.name = inst.name(); - return inst; -} - -/* ************************************************************************* */ -vector Class::expandTemplate(Str templateArg, - const vector& instantiations) const { - vector result; - for(const Qualified& instName: instantiations) { - Qualified expandedClass = (Qualified) (*this); - expandedClass.expand(instName.name()); - const TemplateSubstitution ts(templateArg, instName, expandedClass); - Class inst = expandTemplate(ts); - inst.name_ = expandedClass.name(); - inst.templateArgs.clear(); - inst.typedefName = qualifiedName("::") + "<" + instName.qualifiedName("::") - + ">"; - inst.templateInstTypeList.push_back(instName); - inst.templateClass = *this; - result.push_back(inst); - } - return result; -} - -/* ************************************************************************* */ -vector Class::expandTemplate(Str templateArg, - const vector& integers) const { - vector result; - for(int i: integers) { - Qualified expandedClass = (Qualified) (*this); - stringstream ss; ss << i; - string instName = ss.str(); - expandedClass.expand(instName); - const TemplateSubstitution ts(templateArg, instName, expandedClass); - Class inst = expandTemplate(ts); - inst.name_ = expandedClass.name(); - inst.templateArgs.clear(); - inst.typedefName = qualifiedName("::") + "<" + instName + ">"; - result.push_back(inst); - } - return result; -} - -/* ************************************************************************* */ -void Class::addMethod(bool verbose, bool is_const, Str methodName, - const ArgumentList& argumentList, - const ReturnValue& returnValue, const Template& tmplate) { - // Check if templated - if (tmplate.valid()) { - try { - templateMethods_[methodName].addOverload(methodName, argumentList, - returnValue, is_const, - tmplate.argName(), verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + methodName + "\n" + e.what()); - } - // Create method to expand - // For all values of the template argument, create a new method - for (const Qualified& instName : tmplate.argValues()) { - const TemplateSubstitution ts(tmplate.argName(), instName, *this); - // substitute template in arguments - ArgumentList expandedArgs = argumentList.expandTemplate(ts); - // do the same for return type - ReturnValue expandedRetVal = returnValue.expandTemplate(ts); - // Now stick in new overload stack with expandedMethodName key - // but note we use the same, unexpanded methodName in overload - string expandedMethodName = methodName + instName.name(); - try { - methods_[expandedMethodName].addOverload(methodName, expandedArgs, - expandedRetVal, is_const, - instName, verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + expandedMethodName + "\n" + e.what()); - } - } - } else { - try { - // just add overload - methods_[methodName].addOverload(methodName, argumentList, returnValue, - is_const, boost::none, verbose); - nontemplateMethods_[methodName].addOverload(methodName, argumentList, - returnValue, is_const, - boost::none, verbose); - } catch (const std::runtime_error& e) { - throw std::runtime_error("Class::addMethod: error adding " + name_ + - "::" + methodName + "\n" + e.what()); - } - } -} - -/* ************************************************************************* */ -void Class::erase_serialization(Methods& methods) { - Methods::iterator it = methods.find("serializable"); - if (it != methods.end()) { -#ifndef WRAP_DISABLE_SERIALIZE - isSerializable = true; -#else - // cout << "Ignoring serializable() flag in class " << name << endl; -#endif - methods.erase(it); - } - - it = methods.find("serialize"); - if (it != methods.end()) { -#ifndef WRAP_DISABLE_SERIALIZE - isSerializable = true; - hasSerialization = true; -#else - // cout << "Ignoring serialize() flag in class " << name << endl; -#endif - methods.erase(it); - } -} - -void Class::erase_serialization() { - erase_serialization(methods_); - erase_serialization(nontemplateMethods_); -} - -/* ************************************************************************* */ -void Class::verifyAll(vector& validTypes, bool& hasSerialiable) const { - - hasSerialiable |= isSerializable; - - // verify all of the function arguments - //TODO:verifyArguments(validTypes, constructor.args_list); - verifyArguments(validTypes, static_methods); - verifyArguments(validTypes, methods_); - - // verify function return types - verifyReturnTypes(validTypes, static_methods); - verifyReturnTypes(validTypes, methods_); - - // verify parents - boost::optional parent = qualifiedParent(); - if (parent - && find(validTypes.begin(), validTypes.end(), *parent) - == validTypes.end()) - throw DependencyMissing(*parent, qualifiedName("::")); -} - -/* ************************************************************************* */ -void Class::appendInheritedMethods(const Class& cls, - const vector& classes) { - - if (cls.parentClass) { - - // Find parent - for(const Class& parent: classes) { - // We found a parent class for our parent, TODO improve ! - if (parent.name() == cls.parentClass->name()) { - methods_.insert(parent.methods_.begin(), parent.methods_.end()); - appendInheritedMethods(parent, classes); - } - } - } -} - -/* ************************************************************************* */ -void Class::removeInheritedNontemplateMethods(vector& classes) { - if (!parentClass) return; - // Find parent - auto parentIt = std::find_if(classes.begin(), classes.end(), - [&](const Class& cls) { return cls.name() == parentClass->name(); }); - if (parentIt == classes.end()) return; // ignore if parent not found - Class& parent = *parentIt; - - // Only check nontemplateMethods_ - for(const string& methodName: nontemplateMethods_ | boost::adaptors::map_keys) { - // check if the method exists in its parent - // Check against parent's methods_ because all the methods of grand - // parent and grand-grand-parent, etc. are already included there - // This is to avoid looking into higher level grand parents... - auto it = parent.methods_.find(methodName); - if (it == parent.methods_.end()) continue; // if not: ignore! - - Method& parentMethod = it->second; - Method& method = nontemplateMethods_[methodName]; - // check if they have the same modifiers (const/static/templateArgs) - if (!method.isSameModifiers(parentMethod)) continue; // if not: ignore - - // check and remove duplicate overloads - auto methodOverloads = boost::combine(method.returnVals_, method.argLists_); - auto parentMethodOverloads = boost::combine(parentMethod.returnVals_, parentMethod.argLists_); - auto result = boost::remove_if( - methodOverloads, - [&](boost::tuple const& overload) { - bool found = std::find_if( - parentMethodOverloads.begin(), - parentMethodOverloads.end(), - [&](boost::tuple const& - parentOverload) { - return overload.get<0>() == parentOverload.get<0>() && - overload.get<1>().isSameSignature(parentOverload.get<1>()); - }) != parentMethodOverloads.end(); - return found; - }); - // remove all duplicate overloads - method.returnVals_.erase(boost::get<0>(result.get_iterator_tuple()), - method.returnVals_.end()); - method.argLists_.erase(boost::get<1>(result.get_iterator_tuple()), - method.argLists_.end()); - } - // [Optional] remove the entire method if it has no overload - for (auto it = nontemplateMethods_.begin(), ite = nontemplateMethods_.end(); it != ite;) - if (it->second.nrOverloads() == 0) it = nontemplateMethods_.erase(it); else ++it; -} - -/* ************************************************************************* */ -string Class::getTypedef() const { - string result; - for(Str namesp: namespaces()) { - result += ("namespace " + namesp + " { "); - } - result += ("typedef " + typedefName + " " + name() + ";"); - for (size_t i = 0; i < namespaces().size(); ++i) { - result += " }"; - } - return result; -} - -/* ************************************************************************* */ -void Class::comment_fragment(FileWriter& proxyFile) const { - proxyFile.oss << "%class " << name() << ", see Doxygen page for details\n"; - proxyFile.oss - << "%at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - - constructor.comment_fragment(proxyFile); - - if (!methods_.empty()) - proxyFile.oss << "%\n%-------Methods-------\n"; - for(const Methods::value_type& name_m: methods_) - name_m.second.comment_fragment(proxyFile); - - if (!static_methods.empty()) - proxyFile.oss << "%\n%-------Static Methods-------\n"; - for(const StaticMethods::value_type& name_m: static_methods) - name_m.second.comment_fragment(proxyFile); - - if (hasSerialization) { - proxyFile.oss << "%\n%-------Serialization Interface-------\n"; - proxyFile.oss << "%string_serialize() : returns string\n"; - proxyFile.oss << "%string_deserialize(string serialized) : returns " - << name() << "\n"; - } - - proxyFile.oss << "%\n"; -} - -/* ************************************************************************* */ -void Class::serialization_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - -//void Point3_string_serialize_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) -//{ -// typedef boost::shared_ptr Shared; -// checkArguments("string_serialize",nargout,nargin-1,0); -// Shared obj = unwrap_shared_ptr(in[0], "ptr_Point3"); -// ostringstream out_archive_stream; -// boost::archive::text_oarchive out_archive(out_archive_stream); -// out_archive << *obj; -// out[0] = wrap< string >(out_archive_stream.str()); -//} - - int serialize_id = functionNames.size(); - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppClassName = qualifiedName("::"); - const string wrapFunctionNameSerialize = matlabUniqueName - + "_string_serialize_" + boost::lexical_cast(serialize_id); - functionNames.push_back(wrapFunctionNameSerialize); - - // call - //void Point3_string_serialize_17(int nargout, mxArray *out[], int nargin, const mxArray *in[]) - wrapperFile.oss << "void " << wrapFunctionNameSerialize - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << cppClassName - << "> Shared;" << endl; - - // check arguments - for serialize, no arguments - // example: checkArguments("string_serialize",nargout,nargin-1,0); - wrapperFile.oss - << " checkArguments(\"string_serialize\",nargout,nargin-1,0);\n"; - - // get class pointer - // example: Shared obj = unwrap_shared_ptr(in[0], "ptr_Point3"); - wrapperFile.oss << " Shared obj = unwrap_shared_ptr<" << cppClassName - << ">(in[0], \"ptr_" << matlabUniqueName << "\");" << endl; - - // Serialization boilerplate - wrapperFile.oss << " ostringstream out_archive_stream;\n"; - wrapperFile.oss - << " boost::archive::text_oarchive out_archive(out_archive_stream);\n"; - wrapperFile.oss << " out_archive << *obj;\n"; - wrapperFile.oss << " out[0] = wrap< string >(out_archive_stream.str());\n"; - - // finish - wrapperFile.oss << "}\n"; - - // Generate code for matlab function -// function varargout string_serialize(this, varargin) -// % STRING_SERIALIZE usage: string_serialize() : returns string -// % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html -// if length(varargin) == 0 -// varargout{1} = geometry_wrapper(15, this, varargin{:}); -// else -// error('Arguments do not match any overload of function Point3.string_serialize'); -// end -// end - - proxyFile.oss - << " function varargout = string_serialize(this, varargin)\n"; - proxyFile.oss - << " % STRING_SERIALIZE usage: string_serialize() : returns string\n"; - proxyFile.oss - << " % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - proxyFile.oss << " if length(varargin) == 0\n"; - proxyFile.oss << " varargout{1} = " << wrapperName << "(" - << boost::lexical_cast(serialize_id) << ", this, varargin{:});\n"; - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << ".string_serialize');\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " end\n\n"; - - // Generate code for matlab save function -// function sobj = saveobj(obj) -// % SAVEOBJ Saves the object to a matlab-readable format -// sobj = obj.string_serialize(); -// end - - proxyFile.oss << " function sobj = saveobj(obj)\n"; - proxyFile.oss - << " % SAVEOBJ Saves the object to a matlab-readable format\n"; - proxyFile.oss << " sobj = obj.string_serialize();\n"; - proxyFile.oss << " end\n"; -} - -/* ************************************************************************* */ -void Class::deserialization_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - vector& functionNames) const { - //void Point3_string_deserialize_18(int nargout, mxArray *out[], int nargin, const mxArray *in[]) - //{ - // typedef boost::shared_ptr Shared; - // checkArguments("Point3.string_deserialize",nargout,nargin,1); - // string serialized = unwrap< string >(in[0]); - // istringstream in_archive_stream(serialized); - // boost::archive::text_iarchive in_archive(in_archive_stream); - // Shared output(new Point3(0,0,0)); - // in_archive >> *output; - // out[0] = wrap_shared_ptr(output,"Point3", false); - //} - int deserialize_id = functionNames.size(); - const string matlabQualName = qualifiedName("."); - const string matlabUniqueName = qualifiedName(); - const string cppClassName = qualifiedName("::"); - const string wrapFunctionNameDeserialize = matlabUniqueName - + "_string_deserialize_" + boost::lexical_cast(deserialize_id); - functionNames.push_back(wrapFunctionNameDeserialize); - - // call - wrapperFile.oss << "void " << wrapFunctionNameDeserialize - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - wrapperFile.oss << "{\n"; - wrapperFile.oss << " typedef boost::shared_ptr<" << cppClassName - << "> Shared;" << endl; - - // check arguments - for deserialize, 1 string argument - wrapperFile.oss << " checkArguments(\"" << matlabUniqueName - << ".string_deserialize\",nargout,nargin,1);\n"; - - // string argument with deserialization boilerplate - wrapperFile.oss << " string serialized = unwrap< string >(in[0]);\n"; - wrapperFile.oss << " istringstream in_archive_stream(serialized);\n"; - wrapperFile.oss - << " boost::archive::text_iarchive in_archive(in_archive_stream);\n"; - wrapperFile.oss << " Shared output(new " << cppClassName << "());\n"; - wrapperFile.oss << " in_archive >> *output;\n"; - wrapperFile.oss << " out[0] = wrap_shared_ptr(output,\"" << matlabQualName - << "\", false);\n"; - wrapperFile.oss << "}\n"; - - // Generate matlab function -// function varargout = string_deserialize(varargin) -// % STRING_DESERIALIZE usage: string_deserialize() : returns Point3 -// % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html -// if length(varargin) == 1 -// varargout{1} = geometry_wrapper(18, varargin{:}); -// else -// error('Arguments do not match any overload of function Point3.string_deserialize'); -// end -// end - - proxyFile.oss << " function varargout = string_deserialize(varargin)\n"; - proxyFile.oss - << " % STRING_DESERIALIZE usage: string_deserialize() : returns " - << matlabQualName << "\n"; - proxyFile.oss - << " % Doxygen can be found at http://research.cc.gatech.edu/borg/sites/edu.borg/html/index.html\n"; - proxyFile.oss << " if length(varargin) == 1\n"; - proxyFile.oss << " varargout{1} = " << wrapperName << "(" - << boost::lexical_cast(deserialize_id) << ", varargin{:});\n"; - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << ".string_deserialize');\n"; - proxyFile.oss << " end\n"; - proxyFile.oss << " end\n\n"; - - // Generate matlab load function -// function obj = loadobj(sobj) -// % LOADOBJ Saves the object to a matlab-readable format -// obj = Point3.string_deserialize(sobj); -// end - - proxyFile.oss << " function obj = loadobj(sobj)\n"; - proxyFile.oss - << " % LOADOBJ Saves the object to a matlab-readable format\n"; - proxyFile.oss << " obj = " << matlabQualName - << ".string_deserialize(sobj);\n"; - proxyFile.oss << " end" << endl; -} - -/* ************************************************************************* */ -string Class::getSerializationExport() const { - //BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsamSharedDiagonal"); - return "BOOST_CLASS_EXPORT_GUID(" + qualifiedName("::") + ", \"" - + qualifiedName() + "\");"; -} - -/* ************************************************************************* */ -void Class::python_wrapper(FileWriter& wrapperFile) const { - wrapperFile.oss << "class_<" << name() << ">(\"" << name() << "\")\n"; - constructor.python_wrapper(wrapperFile, name()); - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.python_wrapper(wrapperFile, name()); - for(const Method& m: methods_ | boost::adaptors::map_values) - m.python_wrapper(wrapperFile, name()); - wrapperFile.oss << ";\n\n"; -} - -/* ************************************************************************* */ -void Class::emit_cython_pxd(FileWriter& pxdFile) const { - pxdFile.oss << "cdef extern from \"" << includeFile << "\""; - string ns = qualifiedNamespaces("::"); - if (!ns.empty()) - pxdFile.oss << " namespace \"" << ns << "\""; - pxdFile.oss << ":" << endl; - pxdFile.oss << " cdef cppclass " << pxdClassName() << " \"" << qualifiedName("::") << "\""; - if (templateArgs.size()>0) { - pxdFile.oss << "["; - for(size_t i = 0; ipxdClassName() << ")"; - pxdFile.oss << ":\n"; - - constructor.emit_cython_pxd(pxdFile, *this); - if (constructor.nrOverloads()>0) pxdFile.oss << "\n"; - - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - if (static_methods.size()>0) pxdFile.oss << "\n"; - - for(const Method& m: nontemplateMethods_ | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - - for(const TemplateMethod& m: templateMethods_ | boost::adaptors::map_values) - m.emit_cython_pxd(pxdFile, *this); - size_t numMethods = constructor.nrOverloads() + static_methods.size() + - methods_.size() + templateMethods_.size(); - if (numMethods == 0) - pxdFile.oss << " pass\n"; -} -/* ************************************************************************* */ -void Class::emit_cython_wrapper_pxd(FileWriter& pxdFile) const { - pxdFile.oss << "\ncdef class " << pyxClassName(); - if (getParent()) - pxdFile.oss << "(" << getParent()->pyxClassName() << ")"; - pxdFile.oss << ":\n"; - pxdFile.oss << " cdef " << shared_pxd_class_in_pyx() << " " - << shared_pxd_obj_in_pyx() << "\n"; - // cyCreateFromShared - pxdFile.oss << " @staticmethod\n"; - pxdFile.oss << " cdef " << pyxClassName() << " cyCreateFromShared(const " - << shared_pxd_class_in_pyx() << "& other)\n"; - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_wrapper_pxd(pxdFile, *this); - if (static_methods.size()>0) pxdFile.oss << "\n"; -} - -/* ************************************************************************* */ -void Class::pyxInitParentObj(FileWriter& pyxFile, const std::string& pyObj, - const std::string& cySharedObj, - const std::vector& allClasses) const { - if (parentClass) { - pyxFile.oss << pyObj << "." << parentClass->shared_pxd_obj_in_pyx() << " = " - << "<" << parentClass->shared_pxd_class_in_pyx() << ">(" - << cySharedObj << ")\n"; - // Find the parent class with name "parentClass" and point its cython obj - // to the same pointer - auto parent_it = find_if(allClasses.begin(), allClasses.end(), - [this](const Class& cls) { - return cls.pxdClassName() == - this->parentClass->pxdClassName(); - }); - if (parent_it == allClasses.end()) { - cerr << "Can't find parent class: " << parentClass->pxdClassName(); - throw std::runtime_error("Parent class not found!"); - } - parent_it->pyxInitParentObj(pyxFile, pyObj, cySharedObj, allClasses); - } -} - -/* ************************************************************************* */ -void Class::pyxDynamicCast(FileWriter& pyxFile, const Class& curLevel, - const std::vector& allClasses) const { - std::string me = this->pyxClassName(), sharedMe = this->shared_pxd_class_in_pyx(); - if (curLevel.parentClass) { - std::string parent = curLevel.parentClass->pyxClassName(), - parentObj = curLevel.parentClass->shared_pxd_obj_in_pyx(), - parentCythonClass = curLevel.parentClass->pxd_class_in_pyx(); - pyxFile.oss << "def dynamic_cast_" << me << "_" << parent << "(" << parent - << " parent):\n"; - pyxFile.oss << " try:\n"; - pyxFile.oss << " return " << me << ".cyCreateFromShared(<" << sharedMe - << ">dynamic_pointer_cast[" << pxd_class_in_pyx() << "," - << parentCythonClass << "](parent." << parentObj - << "))\n"; - pyxFile.oss << " except:\n"; - pyxFile.oss << " raise TypeError('dynamic cast failed!')\n"; - // Move up higher to one level: Find the parent class with name "parentClass" - auto parent_it = find_if(allClasses.begin(), allClasses.end(), - [&curLevel](const Class& cls) { - return cls.pxdClassName() == - curLevel.parentClass->pxdClassName(); - }); - if (parent_it == allClasses.end()) { - cerr << "Can't find parent class: " << parentClass->pxdClassName(); - throw std::runtime_error("Parent class not found!"); - } - pyxDynamicCast(pyxFile, *parent_it, allClasses); - } -} - -/* ************************************************************************* */ -void Class::emit_cython_pyx(FileWriter& pyxFile, const std::vector& allClasses) const { - pyxFile.oss << "cdef class " << pyxClassName(); - if (parentClass) pyxFile.oss << "(" << parentClass->pyxClassName() << ")"; - pyxFile.oss << ":\n"; - - // __init___ - pyxFile.oss << " def __init__(self, *args, **kwargs):\n"; - pyxFile.oss << " cdef list __params\n"; - pyxFile.oss << " self." << shared_pxd_obj_in_pyx() << " = " << shared_pxd_class_in_pyx() << "()\n"; - pyxFile.oss << " if len(args)==0 and len(kwargs)==1 and kwargs.has_key('cyCreateFromShared'):\n return\n"; - - // Constructors - constructor.emit_cython_pyx(pyxFile, *this); - pyxFile.oss << " if (self." << shared_pxd_obj_in_pyx() << ".use_count()==0):\n"; - pyxFile.oss << " raise TypeError('" << pyxClassName() - << " construction failed!')\n"; - pyxInitParentObj(pyxFile, " self", "self." + shared_pxd_obj_in_pyx(), allClasses); - pyxFile.oss << "\n"; - - // cyCreateFromShared - pyxFile.oss << " @staticmethod\n"; - pyxFile.oss << " cdef " << pyxClassName() << " cyCreateFromShared(const " - << shared_pxd_class_in_pyx() << "& other):\n" - << " if other.get() == NULL:\n" - << " raise RuntimeError('Cannot create object from a nullptr!')\n" - << " cdef " << pyxClassName() << " return_value = " << pyxClassName() << "(cyCreateFromShared=True)\n" - << " return_value." << shared_pxd_obj_in_pyx() << " = other\n"; - pyxInitParentObj(pyxFile, " return_value", "other", allClasses); - pyxFile.oss << " return return_value" << "\n\n"; - - for(const StaticMethod& m: static_methods | boost::adaptors::map_values) - m.emit_cython_pyx(pyxFile, *this); - if (static_methods.size()>0) pyxFile.oss << "\n"; - - for(const Method& m: methods_ | boost::adaptors::map_values) - m.emit_cython_pyx(pyxFile, *this); - - pyxDynamicCast(pyxFile, *this, allClasses); - - pyxFile.oss << "\n\n"; -} - -/* ************************************************************************* */ diff --git a/wrap/Class.h b/wrap/Class.h deleted file mode 100644 index 3df37fe67..000000000 --- a/wrap/Class.h +++ /dev/null @@ -1,315 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.h - * @brief describe the C++ class that is being wrapped - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include "spirit.h" -#include "Template.h" -#include "Constructor.h" -#include "Deconstructor.h" -#include "Method.h" -#include "StaticMethod.h" -#include "TemplateMethod.h" -#include "TypeAttributesTable.h" - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" -#endif -#include -#include -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -namespace bl = boost::lambda; - -#include -#include - -#include -#include - -namespace wrap { - -/// Class has name, constructors, methods -class Class: public Qualified { - -public: - typedef const std::string& Str; - typedef std::map Methods; - typedef std::map StaticMethods; - typedef std::map TemplateMethods; - -private: - - boost::optional parentClass; ///< The *single* parent - Methods methods_; ///< Class methods, including all expanded/instantiated template methods -- to be serialized to matlab and Python classes in Cython pyx - Methods nontemplateMethods_; ///< only nontemplate methods -- to be serialized into Cython pxd - TemplateMethods templateMethods_; ///< only template methods -- to be serialized into Cython pxd - // Method& mutableMethod(Str key); - -public: - - StaticMethods static_methods; ///< Static methods - - // Then the instance variables are set directly by the Module constructor - std::vector templateArgs; ///< Template arguments - std::string typedefName; ///< The name to typedef *from*, if this class is actually a typedef, i.e. typedef [typedefName] [name] - std::vector templateInstTypeList; ///< the original typelist used to instantiate this class from a template. - ///< Empty if it's not an instantiation. Needed for template classes in Cython pxd. - boost::optional templateClass = boost::none; ///< qualified name of the original template class from which this class was instantiated. - ///< boost::none if not an instantiation. Needed for template classes in Cython pxd. - bool isVirtual; ///< Whether the class is part of a virtual inheritance chain - bool isSerializable; ///< Whether we can use boost.serialization to serialize the class - creates exports - bool hasSerialization; ///< Whether we should create the serialization functions - Constructor constructor; ///< Class constructors - Deconstructor deconstructor; ///< Deconstructor to deallocate C++ object - bool verbose_; ///< verbose flag - std::string includeFile; - - /// Constructor creates an empty class - Class(bool verbose = true) : - parentClass(boost::none), isVirtual(false), isSerializable(false), hasSerialization( - false), deconstructor(verbose), verbose_(verbose) { - } - - Class(const std::string& name, bool verbose = true) - : Qualified(name, Qualified::Category::CLASS), - parentClass(boost::none), - isVirtual(false), - isSerializable(false), - hasSerialization(false), - deconstructor(verbose), - verbose_(verbose) {} - - void assignParent(const Qualified& parent); - - boost::optional qualifiedParent() const; - boost::optional getParent() const { return parentClass; } - - size_t nrMethods() const { - return methods_.size(); - } - - const Method& method(Str key) const; - - bool exists(Str name) const { - return methods_.find(name) != methods_.end(); - } - - // And finally MATLAB code is emitted, methods below called by Module::matlab_code - void matlab_proxy(Str toolboxPath, Str wrapperName, - const TypeAttributesTable& typeAttributes, FileWriter& wrapperFile, - std::vector& functionNames) const; ///< emit proxy class - - Class expandTemplate(const TemplateSubstitution& ts) const; - - std::vector expandTemplate(Str templateArg, - const std::vector& instantiations) const; - - // Create new classes with integer template arguments - std::vector expandTemplate(Str templateArg, - const std::vector& integers) const; - - /// Add potentially overloaded, potentially templated method - void addMethod(bool verbose, bool is_const, Str methodName, - const ArgumentList& argumentList, const ReturnValue& returnValue, - const Template& tmplate); - - /// Post-process classes for serialization markers - void erase_serialization(); // non-const ! - void erase_serialization(Methods& methods); // non-const ! - - /// verify all of the function arguments - void verifyAll(std::vector& functionNames, - bool& hasSerialiable) const; - - void appendInheritedMethods(const Class& cls, - const std::vector& classes); - - void removeInheritedNontemplateMethods(std::vector& classes); - - /// The typedef line for this class, if this class is a typedef, otherwise returns an empty string. - std::string getTypedef() const; - - /// Returns the string for an export flag - std::string getSerializationExport() const; - - /// Creates a member function that performs serialization - void serialization_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str wrapperName, std::vector& functionNames) const; - - /// Creates a static member function that performs deserialization - void deserialization_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str wrapperName, std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile) const; - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile) const; - void emit_cython_wrapper_pxd(FileWriter& pxdFile) const; - void emit_cython_pyx(FileWriter& pyxFile, - const std::vector& allClasses) const; - void pyxInitParentObj(FileWriter& pyxFile, const std::string& pyObj, - const std::string& cySharedObj, - const std::vector& allClasses) const; - void pyxDynamicCast(FileWriter& pyxFile, const Class& curLevel, - const std::vector& allClasses) const; - - friend std::ostream& operator<<(std::ostream& os, const Class& cls) { - os << "class " << cls.name() << "{\n"; - os << cls.constructor << ";\n"; - for(const StaticMethod& m: cls.static_methods | boost::adaptors::map_values) - os << m << ";\n"; - for(const Method& m: cls.methods_ | boost::adaptors::map_values) - os << m << ";\n"; - os << "};" << std::endl; - return os; - } - -private: - - void pointer_constructor_fragments(FileWriter& proxyFile, - FileWriter& wrapperFile, Str wrapperName, - std::vector& functionNames) const; - - void comment_fragment(FileWriter& proxyFile) const; -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ClassGrammar: public classic::grammar { - - Class& cls_; ///< successful parse will be placed in here - Template& template_; ///< result needs to be visible outside - - /// Construct type grammar and specify where result is placed - ClassGrammar(Class& cls, Template& t) : - cls_(cls), template_(t) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - using BasicRules::name_p; - using BasicRules::className_p; - using BasicRules::comments_p; - - // NOTE: allows for pointers to all types - ArgumentList args; - ArgumentListGrammar argumentList_g; - - Constructor constructor0, constructor; - - ReturnValue retVal0, retVal; - ReturnValueGrammar returnValue_g; - - Template methodTemplate; - TemplateGrammar methodTemplate_g, classTemplate_g; - - std::string methodName; - bool isConst, T, F; - - // Parent class - Qualified possibleParent; - TypeGrammar classParent_g; - - classic::rule constructor_p, methodName_p, method_p, - staticMethodName_p, static_method_p, templateList_p, classParent_p, - functions_p, class_p; - - definition(ClassGrammar const& self) : - argumentList_g(args), returnValue_g(retVal), // - methodTemplate_g(methodTemplate), classTemplate_g(self.template_), // - T(true), F(false), classParent_g(possibleParent) { - - using namespace classic; - bool verbose = false; // TODO - - // ConstructorGrammar - constructor_p = (className_p >> argumentList_g >> ';' >> !comments_p) // - [bl::bind(&Constructor::push_back, bl::var(constructor), - bl::var(args))] // - [clear_a(args)]; - - // MethodGrammar - methodName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - // gtsam::Values retract(const gtsam::VectorValues& delta) const; - method_p = !methodTemplate_g - >> (returnValue_g >> methodName_p[assign_a(methodName)] - >> argumentList_g >> !str_p("const")[assign_a(isConst, T)] >> ';' - >> *comments_p) // - [bl::bind(&Class::addMethod, bl::var(self.cls_), verbose, - bl::var(isConst), bl::var(methodName), bl::var(args), - bl::var(retVal), bl::var(methodTemplate))] // - [assign_a(retVal, retVal0)][clear_a(args)] // - [clear_a(methodTemplate)][assign_a(isConst, F)]; - - // StaticMethodGrammar - staticMethodName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - static_method_p = (str_p("static") >> returnValue_g - >> staticMethodName_p[assign_a(methodName)] >> argumentList_g >> ';' - >> *comments_p) // - [bl::bind(&StaticMethod::addOverload, - bl::var(self.cls_.static_methods)[bl::var(methodName)], - bl::var(methodName), bl::var(args), bl::var(retVal), boost::none, - verbose)] // - [assign_a(retVal, retVal0)][clear_a(args)]; - - // template - templateList_p = (str_p("template") >> '<' - >> name_p[push_back_a(self.cls_.templateArgs)] - >> *(',' >> name_p[push_back_a(self.cls_.templateArgs)]) >> '>'); - - // parse a full class - classParent_p = (':' >> classParent_g >> '{') // - [bl::bind(&Class::assignParent, bl::var(self.cls_), - bl::var(possibleParent))][clear_a(possibleParent)]; - - functions_p = constructor_p | method_p | static_method_p; - - // parse a full class - class_p = (!(classTemplate_g[push_back_a(self.cls_.templateArgs, - self.template_.argName())] | templateList_p) - >> !(str_p("virtual")[assign_a(self.cls_.isVirtual, T)]) - >> str_p("class") >> className_p[assign_a(self.cls_.name_)] - >> (classParent_p | '{') >> // - *(functions_p | comments_p) >> str_p("};")) // - [bl::bind(&Constructor::initializeOrCheck, bl::var(constructor), - bl::var(self.cls_.name_), boost::none, verbose)][assign_a( - self.cls_.constructor, constructor)] // - [assign_a(self.cls_.deconstructor.name, self.cls_.name_)] // - [assign_a(constructor, constructor0)]; - } - - classic::rule const& start() const { - return class_p; - } - - }; -}; -// ClassGrammar - -}// \namespace wrap - diff --git a/wrap/Constructor.cpp b/wrap/Constructor.cpp deleted file mode 100644 index 74719b289..000000000 --- a/wrap/Constructor.cpp +++ /dev/null @@ -1,160 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Constructor.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include -#include -#include - -#include - -#include "utilities.h" -#include "Constructor.h" -#include "Class.h" - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -string Constructor::matlab_wrapper_name(Str className) const { - string str = "new_" + className; - return str; -} - -/* ************************************************************************* */ -void Constructor::proxy_fragment(FileWriter& file, - const std::string& wrapperName, bool hasParent, - const int id, const ArgumentList args) const { - size_t nrArgs = args.size(); - // check for number of arguments... - file.oss << " elseif nargin == " << nrArgs; - if (nrArgs > 0) file.oss << " && "; - // ...and their types - bool first = true; - for (size_t i = 0; i < nrArgs; i++) { - if (!first) file.oss << " && "; - file.oss << "isa(varargin{" << i + 1 << "},'" << args[i].matlabClass(".") - << "')"; - first = false; - } - // emit code for calling constructor - if (hasParent) - file.oss << "\n [ my_ptr, base_ptr ] = "; - else - file.oss << "\n my_ptr = "; - file.oss << wrapperName << "(" << id; - // emit constructor arguments - for (size_t i = 0; i < nrArgs; i++) { - file.oss << ", "; - file.oss << "varargin{" << i + 1 << "}"; - } - file.oss << ");\n"; -} - -/* ************************************************************************* */ -string Constructor::wrapper_fragment(FileWriter& file, Str cppClassName, - Str matlabUniqueName, - boost::optional cppBaseClassName, - int id, const ArgumentList& al) const { - const string wrapFunctionName = - matlabUniqueName + "_constructor_" + boost::lexical_cast(id); - - file.oss << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])" - << endl; - file.oss << "{\n"; - file.oss << " mexAtExit(&_deleteAllObjects);\n"; - // Typedef boost::shared_ptr - file.oss << " typedef boost::shared_ptr<" << cppClassName << "> Shared;\n"; - file.oss << "\n"; - - // Check to see if there will be any arguments and remove {} for consiseness - if (al.size() > 0) al.matlab_unwrap(file); // unwrap arguments - file.oss << " Shared *self = new Shared(new " << cppClassName << "(" - << al.names() << "));" << endl; - file.oss << " collector_" << matlabUniqueName << ".insert(self);\n"; - - if (verbose_) - file.oss << " std::cout << \"constructed \" << self << std::endl;" << endl; - file.oss - << " out[0] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, mxREAL);" - << endl; - file.oss << " *reinterpret_cast (mxGetData(out[0])) = self;" - << endl; - - // If we have a base class, return the base class pointer (MATLAB will call - // the base class collectorInsertAndMakeBase to add this to the collector and - // recurse the heirarchy) - if (cppBaseClassName) { - file.oss << "\n"; - file.oss << " typedef boost::shared_ptr<" << *cppBaseClassName - << "> SharedBase;\n"; - file.oss << " out[1] = mxCreateNumericMatrix(1, 1, mxUINT32OR64_CLASS, " - "mxREAL);\n"; - file.oss << " *reinterpret_cast(mxGetData(out[1])) = new " - "SharedBase(*self);\n"; - } - - file.oss << "}" << endl; - - return wrapFunctionName; -} - -/* ************************************************************************* */ -void Constructor::python_wrapper(FileWriter& wrapperFile, Str className) const { - wrapperFile.oss << " .def(\"" << name_ << "\", &" << className - << "::" << name_ << ");\n"; -} - -/* ************************************************************************* */ -bool Constructor::hasDefaultConstructor() const { - for (size_t i = 0; i < nrOverloads(); i++) { - if (argumentList(i).size() == 0) return true; - } - return false; -} - -/* ************************************************************************* */ -void Constructor::emit_cython_pxd(FileWriter& pxdFile, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); i++) { - ArgumentList args = argumentList(i); - - // generate the constructor - pxdFile.oss << " " << cls.pxdClassName() << "("; - args.emit_cython_pxd(pxdFile, cls.pxdClassName(), cls.templateArgs); - pxdFile.oss << ") " << "except +\n"; - } -} - -/* ************************************************************************* */ -void Constructor::emit_cython_pyx(FileWriter& pyxFile, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); i++) { - ArgumentList args = argumentList(i); - pyxFile.oss << " try:\n"; - pyxFile.oss << pyx_resolveOverloadParams(args, true, 3); - pyxFile.oss - << argumentList(i).pyx_convertEigenTypeAndStorageOrder(" "); - - pyxFile.oss << " self." << cls.shared_pxd_obj_in_pyx() << " = " - << cls.shared_pxd_class_in_pyx() << "(new " << cls.pxd_class_in_pyx() - << "(" << args.pyx_asParams() << "))\n"; - pyxFile.oss << " except (AssertionError, ValueError):\n"; - pyxFile.oss << " pass\n"; - } -} - -/* ************************************************************************* */ diff --git a/wrap/Constructor.h b/wrap/Constructor.h deleted file mode 100644 index 172cd24a4..000000000 --- a/wrap/Constructor.h +++ /dev/null @@ -1,99 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Constructor.h - * @brief class describing a constructor + code generation - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "OverloadedFunction.h" -#include -#include -#include - -namespace wrap { - -// Forward declaration -class Class; - -// Constructor class -struct Constructor: public OverloadedFunction { - - typedef const std::string& Str; - - /// Constructor creates an empty class - Constructor(bool verbose = false) { - verbose_ = verbose; - } - - Constructor expandTemplate(const TemplateSubstitution& ts) const { - Constructor inst = *this; - inst.argLists_ = expandArgumentListsTemplate(ts); - inst.name_ = ts.expandedClassName(); - return inst; - } - - /// return true if the default constructor exists - bool hasDefaultConstructor() const; - - // MATLAB code generation - // toolboxPath is main toolbox directory, e.g., ../matlab - // classFile is class proxy file, e.g., ../matlab/@Point2/Point2.m - - /// wrapper name - std::string matlab_wrapper_name(Str className) const; - - void comment_fragment(FileWriter& proxyFile) const { - if (nrOverloads() > 0) - proxyFile.oss << "%\n%-------Constructors-------\n"; - for (size_t i = 0; i < nrOverloads(); i++) { - proxyFile.oss << "%"; - argumentList(i).emit_prototype(proxyFile, name_); - proxyFile.oss << "\n"; - } - } - - /** - * Create fragment to select constructor in proxy class, e.g., - * if nargin == 2, obj.self = new_Pose3_RP(varargin{1},varargin{2}); end - */ - void proxy_fragment(FileWriter& file, Str wrapperName, bool hasParent, - const int id, const ArgumentList args) const; - - /// cpp wrapper - std::string wrapper_fragment(FileWriter& file, Str cppClassName, - Str matlabUniqueName, boost::optional cppBaseClassName, int id, - const ArgumentList& al) const; - - /// constructor function - void generate_construct(FileWriter& file, Str cppClassName, - std::vector& args_list) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile, Str className) const; - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile, const Class& cls) const; - void emit_cython_pyx(FileWriter& pyxFile, const Class& cls) const; - - friend std::ostream& operator<<(std::ostream& os, const Constructor& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << m.name_ << m.argLists_[i]; - return os; - } - -}; - -} // \namespace wrap diff --git a/wrap/Deconstructor.cpp b/wrap/Deconstructor.cpp deleted file mode 100644 index 7bb366e3f..000000000 --- a/wrap/Deconstructor.cpp +++ /dev/null @@ -1,73 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Deconstructor.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include -#include - -#include - -#include "utilities.h" -#include "Deconstructor.h" - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -string Deconstructor::matlab_wrapper_name(const string& className) const { - string str = "delete_" + className; - return str; -} - -/* ************************************************************************* */ -void Deconstructor::proxy_fragment(FileWriter& file, - const std::string& wrapperName, - const std::string& matlabUniqueName, int id) const { - - file.oss << " function delete(obj)\n"; - file.oss << " " << wrapperName << "(" << id << ", obj.ptr_" << matlabUniqueName << ");\n"; - file.oss << " end\n"; -} - -/* ************************************************************************* */ -string Deconstructor::wrapper_fragment(FileWriter& file, - const string& cppClassName, - const string& matlabUniqueName, - int id) const { - - const string matlabName = matlab_wrapper_name(matlabUniqueName); - - const string wrapFunctionName = matlabUniqueName + "_deconstructor_" + boost::lexical_cast(id); - - file.oss << "void " << wrapFunctionName << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])" << endl; - file.oss << "{" << endl; - file.oss << " typedef boost::shared_ptr<" << cppClassName << "> Shared;" << endl; - //Deconstructor takes 1 arg, the mxArray obj - file.oss << " checkArguments(\"" << matlabName << "\",nargout,nargin," << "1" << ");" << endl; - file.oss << " Shared *self = *reinterpret_cast(mxGetData(in[0]));\n"; - file.oss << " Collector_" << matlabUniqueName << "::iterator item;\n"; - file.oss << " item = collector_" << matlabUniqueName << ".find(self);\n"; - file.oss << " if(item != collector_" << matlabUniqueName << ".end()) {\n"; - file.oss << " delete self;\n"; - file.oss << " collector_" << matlabUniqueName << ".erase(item);\n"; - file.oss << " }\n"; - file.oss << "}" << endl; - - return wrapFunctionName; -} - -/* ************************************************************************* */ diff --git a/wrap/Deconstructor.h b/wrap/Deconstructor.h deleted file mode 100644 index ee2f4ea19..000000000 --- a/wrap/Deconstructor.h +++ /dev/null @@ -1,61 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Deconstructor.h - * @brief class describing a constructor + code generation - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include -#include - -#include "Argument.h" - -namespace wrap { - -// Deconstructor class -struct Deconstructor { - - /// Deconstructor creates an empty class - Deconstructor(bool verbose = true) : - verbose_(verbose) { - } - - // Then the instance variables are set directly by the Module deconstructor - std::string name; - bool verbose_; - - // MATLAB code generation - // toolboxPath is main toolbox directory, e.g., ../matlab - // classFile is class proxy file, e.g., ../matlab/@Point2/Point2.m - - /// wrapper name - std::string matlab_wrapper_name(const std::string& className) const; - - /// m-file - void proxy_fragment(FileWriter& file, - const std::string& wrapperName, - const std::string& matlabUniqueName, int id) const; - - /// cpp wrapper - std::string wrapper_fragment(FileWriter& file, - const std::string& cppClassName, - const std::string& matlabUniqueName, - int id) const; -}; - -} // \namespace wrap - diff --git a/wrap/FileWriter.cpp b/wrap/FileWriter.cpp deleted file mode 100644 index c07de0eb0..000000000 --- a/wrap/FileWriter.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/** - * @file FileWriter.cpp - * - * @date Jan 15, 2012 - * @author Alex Cunningham - */ - -#include "FileWriter.h" -#include "utilities.h" - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -FileWriter::FileWriter(const string& filename, bool verbose, - const string& comment_str) : - verbose_(verbose), filename_(filename), comment_str_(comment_str) { -} - -/* ************************************************************************* */ -void FileWriter::emit(bool add_header, bool force_overwrite) const { - if (verbose_) - cerr << "generating " << filename_ << " "; - // read in file if it exists - string existing_contents; - bool file_exists = true; - try { - existing_contents = file_contents(filename_.c_str(), add_header); - } catch (const CantOpenFile& ) { - file_exists = false; - } - - // Only write a file if it is new, an update, or overwrite is forced - string new_contents = oss.str(); - if (force_overwrite || !file_exists || existing_contents != new_contents) { - // Binary to use LF line endings instead of CRLF - ofstream ofs(filename_.c_str(), ios::binary); - if (!ofs) - throw CantOpenFile(filename_); - - // dump in stringstream - ofs << new_contents; - ofs.close(); - } - if (verbose_) - cerr << " ...no update" << endl; -} -/* ************************************************************************* */ - diff --git a/wrap/FileWriter.h b/wrap/FileWriter.h deleted file mode 100644 index 12e033fdf..000000000 --- a/wrap/FileWriter.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * @file FileWriter.h - * - * @brief Wrapper for writing files and avoiding overwriting existing files - * This class wraps a stream object and will check that the file is - * actually different to write the new generated file. - * - * @date Jan 15, 2012 - * @author Alex Cunningham - */ - -#pragma once - -#include - -namespace wrap { - -class FileWriter { -protected: - bool verbose_; - std::string filename_; - std::string comment_str_; - -public: - std::ostringstream oss; ///< Primary stream for operating on the file - - /** Create a writer with a filename and delimiter for the header comment */ - FileWriter(const std::string& filename, bool verbose, const std::string& comment_str); - - /** Writes the contents of the stringstream to the file, checking if actually new */ - void emit(bool add_header, bool force=false) const; - -}; - -} // \namespace wrap diff --git a/wrap/ForwardDeclaration.h b/wrap/ForwardDeclaration.h deleted file mode 100644 index 190387ecc..000000000 --- a/wrap/ForwardDeclaration.h +++ /dev/null @@ -1,36 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Class.h - * @brief describe the C++ class that is being wrapped - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#pragma once - -#include - -namespace wrap { - - class Class; - - struct ForwardDeclaration { - Class cls; - bool isVirtual; - ForwardDeclaration() : isVirtual(false) {} - explicit ForwardDeclaration(const std::string& s) : cls(s), isVirtual(false) {} - std::string name() const { return cls.qualifiedName("::"); } - }; - -} diff --git a/wrap/FullyOverloadedFunction.cpp b/wrap/FullyOverloadedFunction.cpp deleted file mode 100644 index 4db4c8713..000000000 --- a/wrap/FullyOverloadedFunction.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include "FullyOverloadedFunction.h" - -using namespace std; - -namespace wrap { -const std::array FullyOverloadedFunction::pythonKeywords{ - {"print", "lambda"}}; - -/* ************************************************************************* */ -std::string FullyOverloadedFunction::pyx_functionCall( - const std::string& caller, - const std::string& funcName, size_t iOverload) const { - - string ret; - if (!returnVals_[iOverload].isPair && !returnVals_[iOverload].type1.isPtr && - returnVals_[iOverload].type1.isNonBasicType()) { - ret = returnVals_[iOverload].type1.make_shared_pxd_class_in_pyx() + "("; - } - - // actual function call ... - if (!caller.empty()) ret += caller + "."; - ret += funcName; - if (templateArgValue_) ret += "[" + templateArgValue_->pxd_class_in_pyx() + "]"; - //... with argument list - ret += "(" + argumentList(iOverload).pyx_asParams() + ")"; - - if (!returnVals_[iOverload].isPair && !returnVals_[iOverload].type1.isPtr && - returnVals_[iOverload].type1.isNonBasicType()) - ret += ")"; - - return ret; -} - -} diff --git a/wrap/FullyOverloadedFunction.h b/wrap/FullyOverloadedFunction.h deleted file mode 100644 index 6b40f6a70..000000000 --- a/wrap/FullyOverloadedFunction.h +++ /dev/null @@ -1,147 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file FullyOverloadedFunction.h - * @brief Function that can be fully overloaded: arguments and return values - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "OverloadedFunction.h" -#include - -namespace wrap { - -/** - * Signature Overload (including return value) - */ -class SignatureOverloads: public ArgumentOverloads { - -public: - - std::vector returnVals_; - -public: - - const ReturnValue& returnValue(size_t i) const { - return returnVals_.at(i); - } - - void push_back(const ArgumentList& args, const ReturnValue& retVal) { - argLists_.push_back(args); - returnVals_.push_back(retVal); - } - - void verifyReturnTypes(const std::vector& validtypes, - const std::string& s) const { - for(const ReturnValue& retval: returnVals_) { - retval.type1.verify(validtypes, s); - if (retval.isPair) - retval.type2.verify(validtypes, s); - } - } - - // TODO use transform ? - std::vector expandReturnValuesTemplate( - const TemplateSubstitution& ts) const { - std::vector result; - for(const ReturnValue& retVal: returnVals_) { - ReturnValue instRetVal = retVal.expandTemplate(ts); - result.push_back(instRetVal); - } - return result; - } - - /// Expand templates, imperative ! - void expandTemplate(const TemplateSubstitution& ts) { - // substitute template in arguments - argLists_ = expandArgumentListsTemplate(ts); - // do the same for return types - returnVals_ = expandReturnValuesTemplate(ts); - } - - // emit a list of comments, one for each overload - void usage_fragment(FileWriter& proxyFile, const std::string& name) const { - unsigned int argLCount = 0; - for(ArgumentList argList: argLists_) { - argList.emit_prototype(proxyFile, name); - if (argLCount != nrOverloads() - 1) - proxyFile.oss << ", "; - else - proxyFile.oss << " : returns " << returnValue(0).returnType() - << std::endl; - argLCount++; - } - } - - // emit a list of comments, one for each overload - void comment_fragment(FileWriter& proxyFile, const std::string& name) const { - size_t i = 0; - for(ArgumentList argList: argLists_) { - proxyFile.oss << "%"; - argList.emit_prototype(proxyFile, name); - proxyFile.oss << " : returns " << returnVals_[i++].returnType() - << std::endl; - } - } - - friend std::ostream& operator<<(std::ostream& os, - const SignatureOverloads& overloads) { - for (size_t i = 0; i < overloads.nrOverloads(); i++) - os << overloads.returnVals_[i] << overloads.argLists_[i] << std::endl; - return os; - } - -}; - -class FullyOverloadedFunction: public Function, public SignatureOverloads { - -public: - - bool addOverload(const std::string& name, const ArgumentList& args, - const ReturnValue& retVal, boost::optional instName = - boost::none, bool verbose = false) { - bool first = initializeOrCheck(name, instName, verbose); - SignatureOverloads::push_back(args, retVal); - return first; - } - - // emit cython pyx function call - std::string pyx_functionCall(const std::string& caller, const std::string& funcName, - size_t iOverload) const; - - /// Cython: Rename functions which names are python keywords - static const std::array pythonKeywords; - static std::string pyRename(const std::string& name) { - if (std::find(pythonKeywords.begin(), pythonKeywords.end(), name) == - pythonKeywords.end()) - return name; - else - return name + "_"; - } -}; - -// Templated checking functions -// TODO: do this via polymorphism, use transform ? - -template -inline void verifyReturnTypes(const std::vector& validTypes, - const std::map& vt) { - typedef typename std::map::value_type NamedMethod; - for(const NamedMethod& namedMethod: vt) - namedMethod.second.verifyReturnTypes(validTypes); -} - -} // \namespace wrap - diff --git a/wrap/Function.cpp b/wrap/Function.cpp deleted file mode 100644 index 80b0adbbe..000000000 --- a/wrap/Function.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Function.ccp - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#include "Function.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -bool Function::initializeOrCheck(const string& name, - boost::optional instName, bool verbose) { - - if (name.empty()) - throw runtime_error("Function::initializeOrCheck called with empty name"); - - // Check if this overload is give to the correct method - if (name_.empty()) { - name_ = name; - templateArgValue_ = instName; - verbose_ = verbose; - return true; - } else { - if (name_ != name || verbose_ != verbose - || ((bool) templateArgValue_ != (bool) instName) - || ((bool) templateArgValue_ && (bool) instName - && !(*templateArgValue_ == *instName))) - throw runtime_error( - "Function::initializeOrCheck called with different arguments"); - - return false; - } -} - -/* ************************************************************************* */ -void Function::emit_call(FileWriter& proxyFile, const ReturnValue& returnVal, - const string& wrapperName, int id) const { - returnVal.emit_matlab(proxyFile); - proxyFile.oss << wrapperName << "(" << id; - if (!isStatic()) - proxyFile.oss << ", this"; - proxyFile.oss << ", varargin{:});\n"; -} - -/* ************************************************************************* */ -void Function::emit_conditional_call(FileWriter& proxyFile, - const ReturnValue& returnVal, const ArgumentList& args, - const string& wrapperName, int id) const { - - // Check all arguments - args.proxy_check(proxyFile); - - // output call to C++ wrapper - proxyFile.oss << " "; - emit_call(proxyFile, returnVal, wrapperName, id); -} - -/* ************************************************************************* */ diff --git a/wrap/Function.h b/wrap/Function.h deleted file mode 100644 index c39b3231c..000000000 --- a/wrap/Function.h +++ /dev/null @@ -1,73 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Function.h - * @brief Base class for global functions and methods - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "Argument.h" -#include - -namespace wrap { - -/// Function class -class Function { - -protected: - - std::string name_; ///< name of method - boost::optional templateArgValue_; ///< value of template argument if applicable - bool verbose_; - -public: - - /** - * @brief first time, fill in instance variables, otherwise check if same - * @return true if first time, false thereafter - */ - bool initializeOrCheck(const std::string& name, - boost::optional instName = boost::none, bool verbose = - false); - - std::string name() const { - return name_; - } - - /// Only Methods are non-static - virtual bool isStatic() const { - return true; - } - - std::string matlabName() const { - if (templateArgValue_) - return name_ + templateArgValue_->name(); - else - return name_; - } - - /// Emit function call to MATLAB (no argument checking) - void emit_call(FileWriter& proxyFile, const ReturnValue& returnVal, - const std::string& wrapperName, int id) const; - - /// Emit checking arguments and function call to MATLAB - void emit_conditional_call(FileWriter& proxyFile, - const ReturnValue& returnVal, const ArgumentList& args, - const std::string& wrapperName, int id) const; - -}; - -} // \namespace wrap - diff --git a/wrap/GlobalFunction.cpp b/wrap/GlobalFunction.cpp deleted file mode 100644 index 02ab19657..000000000 --- a/wrap/GlobalFunction.cpp +++ /dev/null @@ -1,227 +0,0 @@ -/** - * @file GlobalFunction.cpp - * - * @date Jul 22, 2012 - * @author Alex Cunningham - */ - -#include "GlobalFunction.h" -#include "Class.h" -#include "utilities.h" - -#include - -namespace wrap { - -using namespace std; - -/* ************************************************************************* */ -void GlobalFunction::addOverload(const Qualified& overload, - const ArgumentList& args, const ReturnValue& retVal, const std::string& _includeFile, - boost::optional instName, bool verbose) { - FullyOverloadedFunction::addOverload(overload.name(), args, retVal, instName, - verbose); - overloads.push_back(overload); - includeFile = _includeFile; -} - -/* ************************************************************************* */ -void GlobalFunction::matlab_proxy(const string& toolboxPath, - const string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, vector& functionNames) const { - - // cluster overloads with same namespace - // create new GlobalFunction structures around namespaces - same namespaces and names are overloads - // map of namespace to global function - typedef map GlobalFunctionMap; - GlobalFunctionMap grouped_functions; - for (size_t i = 0; i < overloads.size(); ++i) { - Qualified overload = overloads.at(i); - // use concatenated namespaces as key - string str_ns = qualifiedName("", overload.namespaces()); - const ReturnValue& ret = returnValue(i); - const ArgumentList& args = argumentList(i); - grouped_functions[str_ns].addOverload(overload, args, ret); - } - - size_t lastcheck = grouped_functions.size(); - for(const GlobalFunctionMap::value_type& p: grouped_functions) { - p.second.generateSingleFunction(toolboxPath, wrapperName, typeAttributes, - file, functionNames); - if (--lastcheck != 0) - file.oss << endl; - } -} - -/* ************************************************************************* */ -void GlobalFunction::generateSingleFunction(const string& toolboxPath, - const string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, vector& functionNames) const { - - // create the folder for the namespace - const Qualified& overload1 = overloads.front(); - createNamespaceStructure(overload1.namespaces(), toolboxPath); - - // open destination mfunctionFileName - string mfunctionFileName = overload1.matlabName(toolboxPath); - FileWriter mfunctionFile(mfunctionFileName, verbose_, "%"); - - // get the name of actual matlab object - const string matlabQualName = overload1.qualifiedName("."); - const string matlabUniqueName = overload1.qualifiedName(""); - const string cppName = overload1.qualifiedName("::"); - - mfunctionFile.oss << "function varargout = " << name_ << "(varargin)\n"; - - for (size_t i = 0; i < nrOverloads(); ++i) { - const ArgumentList& args = argumentList(i); - const ReturnValue& returnVal = returnValue(i); - - const int id = functionNames.size(); - - // Output proxy matlab code - mfunctionFile.oss << " " << (i == 0 ? "" : "else"); - emit_conditional_call(mfunctionFile, returnVal, args, wrapperName, id); - - // Output C++ wrapper code - - const string wrapFunctionName = matlabUniqueName + "_" - + boost::lexical_cast(id); - - // call - file.oss << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - // start - file.oss << "{\n"; - - // check arguments - // NOTE: for static functions, there is no object passed - file.oss << " checkArguments(\"" << matlabUniqueName - << "\",nargout,nargin," << args.size() << ");\n"; - - // unwrap arguments, see Argument.cpp - args.matlab_unwrap(file, 0); // We start at 0 because there is no self object - - // call method with default type and wrap result - if (returnVal.type1.name() != "void") - returnVal.wrap_result(cppName + "(" + args.names() + ")", file, - typeAttributes); - else - file.oss << cppName + "(" + args.names() + ");\n"; - - // finish - file.oss << "}\n"; - - // Add to function list - functionNames.push_back(wrapFunctionName); - } - - mfunctionFile.oss << " else\n"; - mfunctionFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << "');" << endl; - mfunctionFile.oss << " end" << endl; - - // Close file - mfunctionFile.emit(true); -} - -/* ************************************************************************* */ -void GlobalFunction::python_wrapper(FileWriter& wrapperFile) const { - wrapperFile.oss << "def(\"" << name_ << "\", " << name_ << ");\n"; -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pxd(FileWriter& file) const { - file.oss << "cdef extern from \"" << includeFile << "\" namespace \"" - << overloads[0].qualifiedNamespaces("::") - << "\":" << endl; - for (size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, "", vector()); - file.oss << pxdName() + " \"" + overloads[0].qualifiedName("::") + - "\"("; - argumentList(i).emit_cython_pxd(file, "", vector()); - file.oss << ")"; - file.oss << " except +"; - file.oss << "\n"; - } -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pyx_no_overload(FileWriter& file) const { - string funcName = pyxName(); - - // Function definition - file.oss << "def " << funcName; - - // modify name of function instantiation as python doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - if (templateArgValue_) file.oss << templateArgValue_->pyxClassName(); - - // funtion arguments - file.oss << "("; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string ret = pyx_functionCall("", pxdName(), 0); - if (!returnVals_[0].isVoid()) { - file.oss << " cdef " << returnVals_[0].pyx_returnType() - << " ret = " << ret << "\n"; - file.oss << " return " << returnVals_[0].pyx_casting("ret") << "\n"; - } else { - file.oss << " " << ret << "\n"; - } -} - -/* ************************************************************************* */ -void GlobalFunction::emit_cython_pyx(FileWriter& file) const { - string funcName = pyxName(); - - size_t N = nrOverloads(); - if (N == 1) { - emit_cython_pyx_no_overload(file); - return; - } - - // Dealing with overloads.. - file.oss << "def " << funcName << "(*args, **kwargs):\n"; - for (size_t i = 0; i < N; ++i) { - file.oss << " success, results = " << funcName << "_" << i - << "(args, kwargs)\n"; - file.oss << " if success:\n return results\n"; - } - file.oss << " raise TypeError('Could not find the correct overload')\n"; - - for (size_t i = 0; i < N; ++i) { - ArgumentList args = argumentList(i); - file.oss << "def " + funcName + "_" + to_string(i) + "(args, kwargs):\n"; - file.oss << " cdef list __params\n"; - if (!returnVals_[i].isVoid()) { - file.oss << " cdef " << returnVals_[i].pyx_returnType() << " return_value\n"; - } - file.oss << " try:\n"; - file.oss << pyx_resolveOverloadParams(args, false, 2); // lazy: always return None even if it's a void function - - /// Call corresponding cython function - file.oss << argumentList(i).pyx_convertEigenTypeAndStorageOrder(" "); - // catch exception which indicates the parameters passed are incorrect. - file.oss << " except:\n"; - file.oss << " return False, None\n\n"; - - string call = pyx_functionCall("", pxdName(), i); - if (!returnVals_[i].isVoid()) { - file.oss << " return_value = " << call << "\n"; - file.oss << " return True, " << returnVals_[i].pyx_casting("return_value") << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return True, None\n"; - } - } -} -/* ************************************************************************* */ - -} // \namespace wrap - diff --git a/wrap/GlobalFunction.h b/wrap/GlobalFunction.h deleted file mode 100644 index 099cefa70..000000000 --- a/wrap/GlobalFunction.h +++ /dev/null @@ -1,148 +0,0 @@ -/** - * @file GlobalFunction.h - * - * @brief Implements codegen for a global function wrapped in matlab - * - * @date Jul 22, 2012 - * @author Alex Cunningham - */ - -#pragma once - -#include "FullyOverloadedFunction.h" - -#ifdef __GNUC__ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-variable" -#endif -#include -#include -#ifdef __GNUC__ -#pragma GCC diagnostic pop -#endif - -namespace bl = boost::lambda; - -namespace wrap { - -struct GlobalFunction: public FullyOverloadedFunction { - - std::vector overloads; ///< Stack of qualified names - std::string includeFile; - - // adds an overloaded version of this function, - void addOverload(const Qualified& overload, const ArgumentList& args, - const ReturnValue& retVal, const std::string& _includeFile = "", boost::optional instName = - boost::none, bool verbose = false); - - void verifyArguments(const std::vector& validArgs) const { - SignatureOverloads::verifyArguments(validArgs, name_); - } - - void verifyReturnTypes(const std::vector& validtypes) const { - SignatureOverloads::verifyReturnTypes(validtypes, name_); - } - - // codegen function called from Module to build the cpp and matlab versions of the function - void matlab_proxy(const std::string& toolboxPath, - const std::string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile) const; - - // function name in Cython pxd - std::string pxdName() const { return "pxd_" + pyRename(name_); } - // function name in Python pyx - std::string pyxName() const { - std::string result = ""; - for(size_t i=0; i= 1) { - result += (overloads[0].namespaces_[i] + "_"); - } - } - result += pyRename(name_); - return result; - } - - // emit cython wrapper - void emit_cython_pxd(FileWriter& pxdFile) const; - void emit_cython_pyx(FileWriter& pyxFile) const; - void emit_cython_pyx_no_overload(FileWriter& pyxFile) const; - -private: - - // Creates a single global function - all in same namespace - void generateSingleFunction(const std::string& toolboxPath, - const std::string& wrapperName, const TypeAttributesTable& typeAttributes, - FileWriter& file, std::vector& functionNames) const; - -}; - -typedef std::map GlobalFunctions; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct GlobalFunctionGrammar: public classic::grammar { - - GlobalFunctions& global_functions_; ///< successful parse will be placed in here - std::vector& namespaces_; - std::string& includeFile; - - /// Construct type grammar and specify where result is placed - GlobalFunctionGrammar(GlobalFunctions& global_functions, - std::vector& namespaces, - std::string& includeFile) - : global_functions_(global_functions), - namespaces_(namespaces), - includeFile(includeFile) {} - - /// Definition of type grammar - template - struct definition: BasicRules { - -// using BasicRules::name_p; -// using BasicRules::className_p; - using BasicRules::comments_p; - - ArgumentList args; - ArgumentListGrammar argumentList_g; - - ReturnValue retVal0, retVal; - ReturnValueGrammar returnValue_g; - - Qualified globalFunction; - - classic::rule globalFunctionName_p, global_function_p; - - definition(GlobalFunctionGrammar const& self) : - argumentList_g(args), returnValue_g(retVal) { - - using namespace classic; - bool verbose = false; // TODO - - globalFunctionName_p = lexeme_d[(upper_p | lower_p) >> *(alnum_p | '_')]; - - // parse a global function - global_function_p = (returnValue_g >> globalFunctionName_p[assign_a( - globalFunction.name_)] >> - argumentList_g >> ';' >> *comments_p) // - [assign_a(globalFunction.namespaces_, self.namespaces_)] // - [bl::bind( - &GlobalFunction::addOverload, - bl::var(self.global_functions_)[bl::var(globalFunction.name_)], - bl::var(globalFunction), bl::var(args), bl::var(retVal), bl::var(self.includeFile), - boost::none, verbose)] // - [assign_a(retVal, retVal0)][clear_a(globalFunction)][clear_a(args)]; - } - - classic::rule const& start() const { - return global_function_p; - } - - }; -}; -// GlobalFunctionGrammar - -}// \namespace wrap - diff --git a/wrap/LICENSE b/wrap/LICENSE new file mode 100644 index 000000000..406b266b7 --- /dev/null +++ b/wrap/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2010, Georgia Tech Research Corporation +Atlanta, Georgia 30332-0415 +All Rights Reserved + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/wrap/Method.cpp b/wrap/Method.cpp deleted file mode 100644 index 2a4b0b3af..000000000 --- a/wrap/Method.cpp +++ /dev/null @@ -1,205 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Method.ccp - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#include "Method.h" -#include "Class.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -bool Method::addOverload(Str name, const ArgumentList& args, - const ReturnValue& retVal, bool is_const, - boost::optional instName, - bool verbose) { - bool first = MethodBase::addOverload(name, args, retVal, instName, verbose); - if (first) - is_const_ = is_const; - else if (is_const && !is_const_) - throw std::runtime_error( - "Method::addOverload: " + name + - " now designated as const whereas before it was not"); - else if (!is_const && is_const_) - throw std::runtime_error( - "Method::addOverload: " + name + - " now designated as non-const whereas before it was"); - return first; -} - -/* ************************************************************************* */ -void Method::proxy_header(FileWriter& proxyFile) const { - proxyFile.oss << " function varargout = " << matlabName() - << "(this, varargin)\n"; -} - -/* ************************************************************************* */ -string Method::wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, - const ArgumentList& args) const { - // check arguments - // extra argument obj -> nargin-1 is passed ! - // example: checkArguments("equals",nargout,nargin-1,2); - wrapperFile.oss << " checkArguments(\"" << matlabName() - << "\",nargout,nargin-1," << args.size() << ");\n"; - - // get class pointer - // example: auto obj = unwrap_shared_ptr< Test >(in[0], "Test"); - wrapperFile.oss << " auto obj = unwrap_shared_ptr<" << cppClassName - << ">(in[0], \"ptr_" << matlabUniqueName << "\");" << endl; - - // unwrap arguments, see Argument.cpp, we start at 1 as first is obj - args.matlab_unwrap(wrapperFile, 1); - - // call method and wrap result - // example: out[0]=wrap(obj->return_field(t)); - string expanded = "obj->" + name_; - if (templateArgValue_) - expanded += ("<" + templateArgValue_->qualifiedName("::") + ">"); - - return expanded; -} - -/* ************************************************************************* */ -void Method::emit_cython_pxd(FileWriter& file, const Class& cls) const { - for (size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - const string renamed = pyRename(name_); - if (renamed != name_) { - file.oss << pyRename(name_) + " \"" + name_ + "\"" << "("; - } else { - file.oss << name_ << "("; - } - argumentList(i).emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << ")"; - // if (is_const_) file.oss << " const"; - file.oss << " except +"; - file.oss << "\n"; - } -} - -/* ************************************************************************* */ -void Method::emit_cython_pyx_no_overload(FileWriter& file, - const Class& cls) const { - string funcName = pyRename(name_); - - // leverage python's special treatment for print - if (funcName == "print_") { - file.oss << " def __repr__(self):\n"; - file.oss << " strBuf = RedirectCout()\n"; - file.oss << " self.print_('')\n"; - file.oss << " return strBuf.str()\n"; - } - - // Function definition - file.oss << " def " << funcName; - - // modify name of function instantiation as python doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - if (templateArgValue_) file.oss << templateArgValue_->pyxClassName(); - - // function arguments - file.oss << "(self"; - if (argumentList(0).size() > 0) file.oss << ", "; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string caller = "self." + cls.shared_pxd_obj_in_pyx() + ".get()"; - string ret = pyx_functionCall(caller, funcName, 0); - if (!returnVals_[0].isVoid()) { - file.oss << " cdef " << returnVals_[0].pyx_returnType() - << " ret = " << ret << "\n"; - file.oss << " return " << returnVals_[0].pyx_casting("ret") << "\n"; - } else { - file.oss << " " << ret << "\n"; - } -} - -/* ************************************************************************* */ -void Method::emit_cython_pyx(FileWriter& file, const Class& cls) const { - string funcName = pyRename(name_); - // For template function: modify name of function instantiation as python - // doesn't allow overloads - // e.g. template funcName(...) --> funcNameA, funcNameB, funcNameC - string instantiatedName = - (templateArgValue_) ? funcName + templateArgValue_->pyxClassName() : - funcName; - - size_t N = nrOverloads(); - // It's easy if there's no overload - if (N == 1) { - emit_cython_pyx_no_overload(file, cls); - return; - } - - // Dealing with overloads.. - file.oss << " def " << instantiatedName << "(self, *args, **kwargs):\n"; - file.oss << " cdef list __params\n"; - - // Define return values for all possible overloads - vector return_type; // every overload has a return type, possibly void - map return_value; // we only define one return value for every distinct type - size_t j = 1; - for (size_t i = 0; i < nrOverloads(); ++i) { - if (returnVals_[i].isVoid()) { - return_type.push_back("void"); - } else { - const string type = returnVals_[i].pyx_returnType(); - return_type.push_back(type); - if (return_value.count(type) == 0) { - const string value = "return_value_" + to_string(j++); - return_value[type] = value; - file.oss << " cdef " << type << " " << value << "\n"; - } - } - } - - for (size_t i = 0; i < nrOverloads(); ++i) { - ArgumentList args = argumentList(i); - file.oss << " try:\n"; - file.oss << pyx_resolveOverloadParams(args, false, 3); // lazy: always return None even if it's a void function - - /// Call corresponding cython function - file.oss << args.pyx_convertEigenTypeAndStorageOrder(" "); - string caller = "self." + cls.shared_pxd_obj_in_pyx() + ".get()"; - string call = pyx_functionCall(caller, funcName, i); - if (!returnVals_[i].isVoid()) { - const string type = return_type[i]; - const string value = return_value[type]; - file.oss << " " << value << " = " << call << "\n"; - file.oss << " return " << returnVals_[i].pyx_casting(value) - << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return\n"; - } - file.oss << " except (AssertionError, ValueError):\n"; - file.oss << " pass\n"; - } - file.oss - << " raise TypeError('Incorrect arguments or types for method call.')\n\n"; -} -/* ************************************************************************* */ diff --git a/wrap/Method.h b/wrap/Method.h deleted file mode 100644 index 88a73cd80..000000000 --- a/wrap/Method.h +++ /dev/null @@ -1,74 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Method.h - * @brief describes and generates code for methods - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "MethodBase.h" - -namespace wrap { - -/// Method class -class Method: public MethodBase { - -protected: - bool is_const_; - -public: - - typedef const std::string& Str; - - bool addOverload(Str name, const ArgumentList& args, - const ReturnValue& retVal, bool is_const, - boost::optional instName = boost::none, bool verbose = - false); - - virtual bool isStatic() const { - return false; - } - - virtual bool isConst() const { - return is_const_; - } - - bool isSameModifiers(const Method& other) const { - return is_const_ == other.is_const_ && - ((templateArgValue_ && other.templateArgValue_) || - (!templateArgValue_ && !other.templateArgValue_)); - } - - friend std::ostream& operator<<(std::ostream& os, const Method& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << m.returnVals_[i] << " " << m.name_ << m.argLists_[i]; - return os; - } - - void emit_cython_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_pyx(FileWriter& file, const Class& cls) const; - void emit_cython_pyx_no_overload(FileWriter& file, const Class& cls) const; - -private: - - // Emit method header - void proxy_header(FileWriter& proxyFile) const; - - virtual std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const; -}; - -} // \namespace wrap - diff --git a/wrap/MethodBase.cpp b/wrap/MethodBase.cpp deleted file mode 100644 index a2ed68780..000000000 --- a/wrap/MethodBase.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file MethodBase.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Method.h" -#include "Class.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void MethodBase::proxy_wrapper_fragments( - FileWriter& proxyFile, FileWriter& wrapperFile, Str cppClassName, - Str matlabQualName, Str matlabUniqueName, Str wrapperName, - const TypeAttributesTable& typeAttributes, - vector& functionNames) const { - // emit header, e.g., function varargout = templatedMethod(this, varargin) - proxy_header(proxyFile); - - // Emit comments for documentation - string up_name = boost::to_upper_copy(matlabName()); - proxyFile.oss << " % " << up_name << " usage: "; - usage_fragment(proxyFile, matlabName()); - - // Emit URL to Doxygen page - proxyFile.oss << " % " - << "Doxygen can be found at " - "http://research.cc.gatech.edu/borg/sites/edu.borg/html/" - "index.html" << endl; - - // Handle special case of single overload with all numeric arguments - if (nrOverloads() == 1 && argumentList(0).allScalar()) { - // Output proxy matlab code - // TODO: document why is it OK to not check arguments in this case - proxyFile.oss << " "; - const int id = (int)functionNames.size(); - emit_call(proxyFile, returnValue(0), wrapperName, id); - - // Output C++ wrapper code - const string wrapFunctionName = wrapper_fragment( - wrapperFile, cppClassName, matlabUniqueName, 0, id, typeAttributes); - - // Add to function list - functionNames.push_back(wrapFunctionName); - } else { - // Check arguments for all overloads - for (size_t i = 0; i < nrOverloads(); ++i) { - // Output proxy matlab code - proxyFile.oss << " " << (i == 0 ? "" : "else"); - const int id = (int)functionNames.size(); - emit_conditional_call(proxyFile, returnValue(i), argumentList(i), - wrapperName, id); - - // Output C++ wrapper code - const string wrapFunctionName = wrapper_fragment( - wrapperFile, cppClassName, matlabUniqueName, i, id, typeAttributes); - - // Add to function list - functionNames.push_back(wrapFunctionName); - } - proxyFile.oss << " else\n"; - proxyFile.oss - << " error('Arguments do not match any overload of function " - << matlabQualName << "." << name_ << "');" << endl; - proxyFile.oss << " end\n"; - } - - proxyFile.oss << " end\n"; -} - -/* ************************************************************************* */ -string MethodBase::wrapper_fragment( - FileWriter& wrapperFile, Str cppClassName, Str matlabUniqueName, - int overload, int id, const TypeAttributesTable& typeAttributes) const { - // generate code - - const string wrapFunctionName = - matlabUniqueName + "_" + name_ + "_" + boost::lexical_cast(id); - - const ArgumentList& args = argumentList(overload); - const ReturnValue& returnVal = returnValue(overload); - - // call - wrapperFile.oss - << "void " << wrapFunctionName - << "(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - // start - wrapperFile.oss << "{\n"; - - // get call - // for static methods: cppClassName::staticMethod - // for instance methods: obj->instanceMethod - string expanded = - wrapper_call(wrapperFile, cppClassName, matlabUniqueName, args); - - expanded += ("(" + args.names() + ")"); - if (returnVal.type1.name() != "void") - returnVal.wrap_result(expanded, wrapperFile, typeAttributes); - else - wrapperFile.oss << " " + expanded + ";\n"; - - // finish - wrapperFile.oss << "}\n"; - - return wrapFunctionName; -} - -/* ************************************************************************* */ -void MethodBase::python_wrapper(FileWriter& wrapperFile, Str className) const { - wrapperFile.oss << " .def(\"" << name_ << "\", &" << className - << "::" << name_ << ");\n"; -} - -/* ************************************************************************* */ diff --git a/wrap/MethodBase.h b/wrap/MethodBase.h deleted file mode 100644 index ee72a6a53..000000000 --- a/wrap/MethodBase.h +++ /dev/null @@ -1,70 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file MethodBase.h - * @brief describes and generates code for static methods - * @author Frank Dellaert - * @author Alex Cunningham - * @author Richard Roberts - **/ - -#pragma once - -#include "FullyOverloadedFunction.h" - -namespace wrap { - -// Forward declaration -class Class; - -/// MethodBase class -struct MethodBase : public FullyOverloadedFunction { - typedef const std::string& Str; - - // emit a list of comments, one for each overload - void comment_fragment(FileWriter& proxyFile) const { - SignatureOverloads::comment_fragment(proxyFile, matlabName()); - } - - void verifyArguments(const std::vector& validArgs) const { - SignatureOverloads::verifyArguments(validArgs, name_); - } - - void verifyReturnTypes(const std::vector& validtypes) const { - SignatureOverloads::verifyReturnTypes(validtypes, name_); - } - - // MATLAB code generation - // classPath is class directory, e.g., ../matlab/@Point2 - void proxy_wrapper_fragments(FileWriter& proxyFile, FileWriter& wrapperFile, - Str cppClassName, Str matlabQualName, - Str matlabUniqueName, Str wrapperName, - const TypeAttributesTable& typeAttributes, - std::vector& functionNames) const; - - // emit python wrapper - void python_wrapper(FileWriter& wrapperFile, Str className) const; - -protected: - virtual void proxy_header(FileWriter& proxyFile) const = 0; - - std::string wrapper_fragment( - FileWriter& wrapperFile, Str cppClassName, Str matlabUniqueName, - int overload, int id, - const TypeAttributesTable& typeAttributes) const; ///< cpp wrapper - - virtual std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, - const ArgumentList& args) const = 0; -}; - -} // \namespace wrap diff --git a/wrap/Module.cpp b/wrap/Module.cpp deleted file mode 100644 index a7db9e1f6..000000000 --- a/wrap/Module.cpp +++ /dev/null @@ -1,646 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Module.ccp - * @author Frank Dellaert - * @author Alex Cunningham - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "Module.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; -using namespace BOOST_SPIRIT_CLASSIC_NS; -namespace bl = boost::lambda; -namespace fs = boost::filesystem; - -/* ************************************************************************* */ -// We parse an interface file into a Module object. -// The grammar is defined using the boost/spirit combinatorial parser. -// For example, str_p("const") parses the string "const", and the >> -// operator creates a sequence parser. The grammar below, composed of rules -// and with start rule [class_p], doubles as the specs for our interface files. -/* ************************************************************************* */ - -/* ************************************************************************* */ -// If a number of template arguments were given, generate a number of expanded -// class names, e.g., PriorFactor -> PriorFactorPose2, and add those classes -static void handle_possible_template(vector& classes, - vector& uninstantiatedClasses, - const Class& cls, const Template& t) { - uninstantiatedClasses.push_back(cls); - if (cls.templateArgs.empty() || t.empty()) { - classes.push_back(cls); - } else { - if (cls.templateArgs.size() != 1) - throw std::runtime_error( - "In-line template instantiations only handle a single template argument"); - string arg = cls.templateArgs.front(); - vector classInstantiations = - (t.nrValues() > 0) ? cls.expandTemplate(arg, t.argValues()) : - cls.expandTemplate(arg, t.intList()); - for(const Class& c: classInstantiations) - classes.push_back(c); - } -} - -static void push_typedef_pair(vector& typedefs, - const Qualified& oldType, - const Qualified& newType, - const string& includeFile) { - typedefs.push_back(TypedefPair(oldType, newType, includeFile)); -} - -/* ************************************************************************* */ -Module::Module(const std::string& moduleName, bool enable_verbose) -: name(moduleName), verbose(enable_verbose) -{ -} - -/* ************************************************************************* */ -Module::Module(const string& interfacePath, - const string& moduleName, bool enable_verbose) -: name(moduleName), verbose(enable_verbose) -{ - // read interface file - string interfaceFile = interfacePath + "/" + moduleName + ".h"; - string contents = file_contents(interfaceFile); - - // execute parsing - parseMarkup(contents); -} - -/* ************************************************************************* */ -void Module::parseMarkup(const std::string& data) { - // The parse imperatively :-( updates variables gradually during parse - // The one with postfix 0 are used to reset the variables after parse. - - //---------------------------------------------------------------------------- - // Grammar with actions that build the Class object. Actions are - // defined within the square brackets [] and are executed whenever a - // rule is successfully parsed. Define BOOST_SPIRIT_DEBUG to debug. - // The grammar is allows a very restricted C++ header - // lexeme_d turns off white space skipping - // http://www.boost.org/doc/libs/1_37_0/libs/spirit/classic/doc/directives.html - // ---------------------------------------------------------------------------- - - // Define Rule and instantiate basic rules - typedef rule Rule; - BasicRules basic; - - vector namespaces; // current namespace tag - string currentInclude; - - // parse a full class - Class cls0(verbose),cls(verbose); - Template classTemplate; - ClassGrammar class_g(cls,classTemplate); - Rule class_p = class_g // - [assign_a(cls.namespaces_, namespaces)] - [assign_a(cls.includeFile, currentInclude)][bl::bind( - &handle_possible_template, bl::var(classes), - bl::var(uninstantiatedClasses), bl::var(cls), - bl::var(classTemplate))][clear_a(classTemplate)] // - [assign_a(cls, cls0)]; - - // parse "gtsam::Pose2" and add to singleInstantiation.typeList - TemplateInstantiationTypedef singleInstantiation, singleInstantiation0; - TypeListGrammar<'<','>'> typelist_g(singleInstantiation.typeList); - - // typedef gtsam::RangeFactor RangeFactor2D; - TypeGrammar instantiationClass_g(singleInstantiation.class_); - Rule templateSingleInstantiation_p = - (str_p("typedef") >> instantiationClass_g >> - typelist_g >> - basic.className_p[assign_a(singleInstantiation.name_)] >> - ';') - [assign_a(singleInstantiation.namespaces_, namespaces)] - [push_back_a(templateInstantiationTypedefs, singleInstantiation)] - [assign_a(singleInstantiation, singleInstantiation0)]; - - Qualified oldType, newType; - TypeGrammar typedefOldClass_g(oldType), typedefNewClass_g(newType); - Rule typedef_p = - (str_p("typedef") >> typedefOldClass_g >> typedefNewClass_g >> - ';') - [assign_a(oldType.namespaces_, namespaces)] - [assign_a(newType.namespaces_, namespaces)] - [bl::bind(&push_typedef_pair, bl::var(typedefs), bl::var(oldType), - bl::var(newType), bl::var(currentInclude))]; - - // Create grammar for global functions - GlobalFunctionGrammar global_function_g(global_functions, namespaces, - currentInclude); - - Rule include_p = str_p("#include") >> ch_p('<') >> - (*(anychar_p - '>'))[push_back_a(includes)] - [assign_a(currentInclude)] >> - ch_p('>'); - -#ifdef __clang__ -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif - - Rule namespace_def_p = - (str_p("namespace") - >> basic.namespace_p[push_back_a(namespaces)] - >> ch_p('{') - >> *(include_p | class_p | templateSingleInstantiation_p | typedef_p | global_function_g | namespace_def_p | basic.comments_p) - >> ch_p('}')) - [pop_a(namespaces)]; - -#ifdef __clang__ -#pragma clang diagnostic pop -#endif - - // parse forward declaration - ForwardDeclaration fwDec0, fwDec; - Class fwParentClass; - TypeGrammar className_g(fwDec.cls); - TypeGrammar classParent_g(fwParentClass); - Rule classParent_p = (':' >> classParent_g >> ';') // - [bl::bind(&Class::assignParent, bl::var(fwDec.cls), - bl::var(fwParentClass))][clear_a(fwParentClass)]; - - Rule forward_declaration_p = - !(str_p("virtual")[assign_a(fwDec.isVirtual, T)]) - >> str_p("class") >> className_g - >> (classParent_p | ';') - [push_back_a(forward_declarations, fwDec)] - [assign_a(cls,cls0)] // also clear class to avoid partial parse - [assign_a(fwDec, fwDec0)]; - - Rule module_content_p = basic.comments_p | include_p | class_p - | templateSingleInstantiation_p | forward_declaration_p - | global_function_g | namespace_def_p; - - Rule module_p = *module_content_p >> !end_p; - - // and parse contents - parse_info info = parse(data.c_str(), module_p, space_p); - if(!info.full) { - printf("parsing stopped at \n%.20s\n",info.stop); - cout << "Stopped in:\n" - "class '" << cls.name_ << "'" << endl; - throw ParseFailed((int)info.length); - } - - // Post-process classes for serialization markers - for(Class& cls: classes) - cls.erase_serialization(); - - for(Class& cls: uninstantiatedClasses) - cls.erase_serialization(); - - // Explicitly add methods to the classes from parents so it shows in documentation - for(Class& cls: classes) - cls.appendInheritedMethods(cls, classes); - - // - Remove inherited methods for Cython classes in the pxd, otherwise Cython can't decide which one to call. - // - Only inherited nontemplateMethods_ in uninstantiatedClasses need to be removed - // because that what we serialized to the pxd. - // - However, we check against the class parent's *methods_* to avoid looking into - // its grand parent and grand-grand parent, etc., because all those are already - // added in its direct parent. - // - So this must be called *after* the above code appendInheritedMethods!! - for(Class& cls: uninstantiatedClasses) - cls.removeInheritedNontemplateMethods(uninstantiatedClasses); - - // Expand templates - This is done first so that template instantiations are - // counted in the list of valid types, have their attributes and dependencies - // checked, etc. - expandedClasses = ExpandTypedefInstantiations(classes, - templateInstantiationTypedefs); - - // Dependency check list - vector validTypes = GenerateValidTypes(expandedClasses, - forward_declarations, typedefs); - - // Check that all classes have been defined somewhere - verifyArguments(validTypes, global_functions); - verifyReturnTypes(validTypes, global_functions); - - hasSerialiable = false; - for(const Class& cls: expandedClasses) - cls.verifyAll(validTypes,hasSerialiable); - - // Create type attributes table and check validity - typeAttributes.addClasses(expandedClasses); - typeAttributes.addForwardDeclarations(forward_declarations); - for (const TypedefPair p: typedefs) - typeAttributes.addType(p.newType); - // add Eigen types as template arguments are also checked ? - vector eigen; - eigen.push_back(ForwardDeclaration("Vector")); - eigen.push_back(ForwardDeclaration("Matrix")); - typeAttributes.addForwardDeclarations(eigen); - typeAttributes.checkValidity(expandedClasses); -} - -/* ************************************************************************* */ -void Module::generate_matlab_wrapper(const string& toolboxPath) const { - - fs::create_directories(toolboxPath); - - // create the unified .cpp switch file - const string wrapperName = name + "_wrapper"; - string wrapperFileName = toolboxPath + "/" + wrapperName + ".cpp"; - FileWriter wrapperFile(wrapperFileName, verbose, "//"); - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "\n"; - - // Include boost.serialization archive headers before other class headers - if (hasSerialiable) { - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n"; - wrapperFile.oss << "#include \n\n"; - } - - // Generate includes while avoiding redundant includes - generateIncludes(wrapperFile); - - // create typedef classes - we put this at the top of the wrap file so that - // collectors and method arguments can use these typedefs - for(const Class& cls: expandedClasses) - if(!cls.typedefName.empty()) - wrapperFile.oss << cls.getTypedef() << "\n"; - wrapperFile.oss << "\n"; - - // Generate boost.serialization export flags (needs typedefs from above) - if (hasSerialiable) { - for(const Class& cls: expandedClasses) - if(cls.isSerializable) - wrapperFile.oss << cls.getSerializationExport() << "\n"; - wrapperFile.oss << "\n"; - } - - // Generate collectors and cleanup function to be called from mexAtExit - WriteCollectorsAndCleanupFcn(wrapperFile, name, expandedClasses); - - // generate RTTI registry (for returning derived-most types) - WriteRTTIRegistry(wrapperFile, name, expandedClasses); - - vector functionNames; // Function names stored by index for switch - - // create proxy class and wrapper code - for(const Class& cls: expandedClasses) - cls.matlab_proxy(toolboxPath, wrapperName, typeAttributes, wrapperFile, functionNames); - - // create matlab files and wrapper code for global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.matlab_proxy(toolboxPath, wrapperName, typeAttributes, wrapperFile, functionNames); - - // finish wrapper file - wrapperFile.oss << "\n"; - finish_wrapper(wrapperFile, functionNames); - - wrapperFile.emit(true); -} - -/* ************************************************************************* */ -void Module::generate_cython_wrapper(const string& toolboxPath, const std::string& pxdImports) const { - fs::create_directories(toolboxPath); - string pxdFileName = toolboxPath + "/" + name + ".pxd"; - FileWriter pxdFile(pxdFileName, verbose, "#"); - pxdFile.oss << pxdImports << "\n"; - emit_cython_pxd(pxdFile); - string pyxFileName = toolboxPath + "/" + name + ".pyx"; - FileWriter pyxFile(pyxFileName, verbose, "#"); - emit_cython_pyx(pyxFile); -} - -/* ************************************************************************* */ -void Module::emit_cython_pxd(FileWriter& pxdFile) const { - // headers - pxdFile.oss << "from gtsam_eigency.core cimport *\n" - "from libcpp.string cimport string\n" - "from libcpp.vector cimport vector\n" - "from libcpp.pair cimport pair\n" - "from libcpp.set cimport set\n" - "from libcpp.map cimport map\n" - "from libcpp cimport bool\n\n"; - - // boost shared_ptr - pxdFile.oss << "cdef extern from \"boost/shared_ptr.hpp\" namespace \"boost\":\n" - " cppclass shared_ptr[T]:\n" - " shared_ptr()\n" - " shared_ptr(T*)\n" - " T* get()\n" - " long use_count() const\n" - " T& operator*()\n\n" - " cdef shared_ptr[T] dynamic_pointer_cast[T,U](const shared_ptr[U]& r)\n" - " cdef shared_ptr[T] make_shared[T](const T& r)\n\n"; - - for(const TypedefPair& types: typedefs) - types.emit_cython_pxd(pxdFile); - - //... wrap all classes - for (const Class& cls : uninstantiatedClasses) { - cls.emit_cython_pxd(pxdFile); - - for (const Class& expCls : expandedClasses) { - bool matchingNonTemplated = !expCls.templateClass - && expCls.pxdClassName() == cls.pxdClassName(); - bool isTemplatedFromCls = expCls.templateClass - && expCls.templateClass->pxdClassName() == cls.pxdClassName(); - - // ctypedef for template instantiations - if (isTemplatedFromCls) { - pxdFile.oss << "\n"; - pxdFile.oss << "ctypedef " << expCls.templateClass->pxdClassName() - << "["; - for (size_t i = 0; i < expCls.templateInstTypeList.size(); ++i) - pxdFile.oss << expCls.templateInstTypeList[i].pxdClassName() - << ((i == expCls.templateInstTypeList.size() - 1) ? "" : ", "); - pxdFile.oss << "] " << expCls.pxdClassName() << "\n"; - } - - // Python wrapper class - if (isTemplatedFromCls || matchingNonTemplated) { - expCls.emit_cython_wrapper_pxd(pxdFile); - } - } - pxdFile.oss << "\n\n"; - } - - //... wrap global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.emit_cython_pxd(pxdFile); - - pxdFile.emit(true); -} - -/* ************************************************************************* */ -void Module::emit_cython_pyx(FileWriter& pyxFile) const { - // directives... - // allow str to automatically coerce to std::string and back (for python3) - pyxFile.oss << "# cython: c_string_type=str, c_string_encoding=ascii\n\n"; - - // headers... - string pxdHeader = name; - pyxFile.oss << "cimport numpy as np\n" - "import numpy as npp\n" - "cimport " << pxdHeader << "\n" - "from ."<< pxdHeader << " cimport shared_ptr\n" - "from ."<< pxdHeader << " cimport dynamic_pointer_cast\n" - "from ."<< pxdHeader << " cimport make_shared\n"; - - pyxFile.oss << "# C helper function that copies all arguments into a positional list.\n" - "cdef list process_args(list keywords, tuple args, dict kwargs):\n" - " cdef str keyword\n" - " cdef int n = len(args), m = len(keywords)\n" - " cdef list params = list(args)\n" - " assert len(args)+len(kwargs) == m, 'Expected {} arguments'.format(m)\n" - " try:\n" - " return params + [kwargs[keyword] for keyword in keywords[n:]]\n" - " except:\n" - " raise ValueError('Epected arguments ' + str(keywords))\n"; - - // import all typedefs, e.g. from gtsam_wrapper cimport Key, so we don't need to say gtsam.Key - for(const Qualified& q: Qualified::BasicTypedefs) { - pyxFile.oss << "from " << pxdHeader << " cimport " << q.pxdClassName() << "\n"; - } - pyxFile.oss << "from gtsam_eigency.core cimport *\n" - "from libcpp cimport bool\n\n" - "from libcpp.pair cimport pair\n" - "from libcpp.string cimport string\n" - "from cython.operator cimport dereference as deref\n\n\n"; - - // all classes include all forward declarations - std::vector allClasses = expandedClasses; - for(const ForwardDeclaration& fd: forward_declarations) - allClasses.push_back(fd.cls); - - for(const Class& cls: expandedClasses) - cls.emit_cython_pyx(pyxFile, allClasses); - pyxFile.oss << "\n"; - - //... wrap global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.emit_cython_pyx(pyxFile); - pyxFile.emit(true); -} - -/* ************************************************************************* */ -void Module::generateIncludes(FileWriter& file) const { - - // collect includes - vector all_includes(includes); - - // sort and remove duplicates - sort(all_includes.begin(), all_includes.end()); - vector::const_iterator last_include = unique(all_includes.begin(), all_includes.end()); - vector::const_iterator it = all_includes.begin(); - // add includes to file - for (; it != last_include; ++it) - file.oss << "#include <" << *it << ">" << endl; - file.oss << "\n"; -} - - -/* ************************************************************************* */ - void Module::finish_wrapper(FileWriter& file, const std::vector& functionNames) const { - file.oss << "void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])\n"; - file.oss << "{\n"; - file.oss << " mstream mout;\n"; // Send stdout to MATLAB console - file.oss << " std::streambuf *outbuf = std::cout.rdbuf(&mout);\n\n"; - file.oss << " _" << name << "_RTTIRegister();\n\n"; - file.oss << " int id = unwrap(in[0]);\n\n"; - file.oss << " try {\n"; - file.oss << " switch(id) {\n"; - for(size_t id = 0; id < functionNames.size(); ++id) { - file.oss << " case " << id << ":\n"; - file.oss << " " << functionNames[id] << "(nargout, out, nargin-1, in+1);\n"; - file.oss << " break;\n"; - } - file.oss << " }\n"; - file.oss << " } catch(const std::exception& e) {\n"; - file.oss << " mexErrMsgTxt((\"Exception from gtsam:\\n\" + std::string(e.what()) + \"\\n\").c_str());\n"; - file.oss << " }\n"; - file.oss << "\n"; - file.oss << " std::cout.rdbuf(outbuf);\n"; // Restore cout - file.oss << "}\n"; - } - -/* ************************************************************************* */ -vector Module::ExpandTypedefInstantiations(const vector& classes, const vector instantiations) { - - vector expandedClasses = classes; - - for(const TemplateInstantiationTypedef& inst: instantiations) { - // Add the new class to the list - expandedClasses.push_back(inst.findAndExpand(classes)); - } - - // Remove all template classes - for(size_t i = 0; i < expandedClasses.size(); ++i) - if(!expandedClasses[i].templateArgs.empty()) { - expandedClasses.erase(expandedClasses.begin() + size_t(i)); - -- i; - } - - return expandedClasses; -} - -/* ************************************************************************* */ -vector Module::GenerateValidTypes(const vector& classes, const vector& forwardDeclarations, const vector& typedefs) { - vector validTypes; - for(const ForwardDeclaration& fwDec: forwardDeclarations) { - validTypes.push_back(fwDec.name()); - } - validTypes.push_back("void"); - validTypes.push_back("string"); - validTypes.push_back("int"); - validTypes.push_back("bool"); - validTypes.push_back("char"); - validTypes.push_back("unsigned char"); - validTypes.push_back("size_t"); - validTypes.push_back("double"); - validTypes.push_back("Vector"); - validTypes.push_back("Matrix"); - //Create a list of parsed classes for dependency checking - for(const Class& cls: classes) { - validTypes.push_back(cls.qualifiedName("::")); - } - for(const TypedefPair& p: typedefs) { - validTypes.push_back(p.newType.qualifiedName("::")); - } - - return validTypes; -} - -/* ************************************************************************* */ -void Module::WriteCollectorsAndCleanupFcn(FileWriter& wrapperFile, const std::string& moduleName, const std::vector& classes) { - // Generate all collectors - for(const Class& cls: classes) { - const string matlabUniqueName = cls.qualifiedName(), - cppName = cls.qualifiedName("::"); - wrapperFile.oss << "typedef std::set*> " - << "Collector_" << matlabUniqueName << ";\n"; - wrapperFile.oss << "static Collector_" << matlabUniqueName << - " collector_" << matlabUniqueName << ";\n"; - } - - // generate mexAtExit cleanup function - wrapperFile.oss << - "\nvoid _deleteAllObjects()\n" - "{\n" - " mstream mout;\n" // Send stdout to MATLAB console - " std::streambuf *outbuf = std::cout.rdbuf(&mout);\n\n" - " bool anyDeleted = false;\n"; - for(const Class& cls: classes) { - const string matlabUniqueName = cls.qualifiedName(); - const string cppName = cls.qualifiedName("::"); - const string collectorType = "Collector_" + matlabUniqueName; - const string collectorName = "collector_" + matlabUniqueName; - // The extra curly-braces around the for loops work around a limitation in MSVC (existing - // since 2005!) preventing more than 248 blocks. - wrapperFile.oss << - " { for(" << collectorType << "::iterator iter = " << collectorName << ".begin();\n" - " iter != " << collectorName << ".end(); ) {\n" - " delete *iter;\n" - " " << collectorName << ".erase(iter++);\n" - " anyDeleted = true;\n" - " } }\n"; - } - wrapperFile.oss << - " if(anyDeleted)\n" - " cout <<\n" - " \"WARNING: Wrap modules with variables in the workspace have been reloaded due to\\n\"\n" - " \"calling destructors, call 'clear all' again if you plan to now recompile a wrap\\n\"\n" - " \"module, so that your recompiled module is used instead of the old one.\" << endl;\n" - " std::cout.rdbuf(outbuf);\n" // Restore cout - "}\n\n"; -} - -/* ************************************************************************* */ -void Module::WriteRTTIRegistry(FileWriter& wrapperFile, const std::string& moduleName, const std::vector& classes) { - wrapperFile.oss << - "void _" << moduleName << "_RTTIRegister() {\n" - " const mxArray *alreadyCreated = mexGetVariablePtr(\"global\", \"gtsam_" + moduleName + "_rttiRegistry_created\");\n" - " if(!alreadyCreated) {\n" - " std::map types;\n"; - for(const Class& cls: classes) { - if(cls.isVirtual) - wrapperFile.oss << - " types.insert(std::make_pair(typeid(" << cls.qualifiedName("::") << ").name(), \"" << cls.qualifiedName(".") << "\"));\n"; - } - wrapperFile.oss << "\n"; - - wrapperFile.oss << - " mxArray *registry = mexGetVariable(\"global\", \"gtsamwrap_rttiRegistry\");\n" - " if(!registry)\n" - " registry = mxCreateStructMatrix(1, 1, 0, NULL);\n" - " typedef std::pair StringPair;\n" - " for(const StringPair& rtti_matlab: types) {\n" - " int fieldId = mxAddField(registry, rtti_matlab.first.c_str());\n" - " if(fieldId < 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxArray *matlabName = mxCreateString(rtti_matlab.second.c_str());\n" - " mxSetFieldByNumber(registry, 0, fieldId, matlabName);\n" - " }\n" - " if(mexPutVariable(\"global\", \"gtsamwrap_rttiRegistry\", registry) != 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxDestroyArray(registry);\n" - " \n" - " mxArray *newAlreadyCreated = mxCreateNumericMatrix(0, 0, mxINT8_CLASS, mxREAL);\n" - " if(mexPutVariable(\"global\", \"gtsam_" + moduleName + "_rttiRegistry_created\", newAlreadyCreated) != 0)\n" - " mexErrMsgTxt(\"gtsam wrap: Error indexing RTTI types, inheritance will not work correctly\");\n" - " mxDestroyArray(newAlreadyCreated);\n" - " }\n" - "}\n" - "\n"; -} - -/* ************************************************************************* */ -void Module::generate_python_wrapper(const string& toolboxPath) const { - - fs::create_directories(toolboxPath); - - // create the unified .cpp switch file - const string wrapperName = name + "_python"; - string wrapperFileName = toolboxPath + "/" + wrapperName + ".cpp"; - FileWriter wrapperFile(wrapperFileName, verbose, "//"); - wrapperFile.oss << "#include \n\n"; - wrapperFile.oss << "using namespace boost::python;\n"; - wrapperFile.oss << "BOOST_PYTHON_MODULE(" + name + ")\n"; - wrapperFile.oss << "{\n"; - - // write out classes - for(const Class& cls: expandedClasses) { - cls.python_wrapper(wrapperFile); - } - - // write out global functions - for(const GlobalFunctions::value_type& p: global_functions) - p.second.python_wrapper(wrapperFile); - - // finish wrapper file - wrapperFile.oss << "}\n"; - - wrapperFile.emit(true); -} - -/* ************************************************************************* */ diff --git a/wrap/Module.h b/wrap/Module.h deleted file mode 100644 index 2a8344551..000000000 --- a/wrap/Module.h +++ /dev/null @@ -1,95 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Module.h - * @brief describes module to be wrapped - * @author Frank Dellaert - * @author Richard Roberts - **/ - -#pragma once - -#include "Class.h" -#include "GlobalFunction.h" -#include "TemplateInstantiationTypedef.h" -#include "ForwardDeclaration.h" -#include "TypedefPair.h" - -#include -#include -#include - -namespace wrap { - -/** - * A module just has a name and a list of classes - */ -struct Module { - - // Filled during parsing: - std::string name; ///< module name - bool verbose; ///< verbose flag - std::vector classes; ///< list of classes - std::vector uninstantiatedClasses; ///< list of template classes after instantiated - std::vector templateInstantiationTypedefs; ///< list of template instantiations - std::vector forward_declarations; - std::vector includes; ///< Include statements - GlobalFunctions global_functions; - std::vector typedefs; - - // After parsing: - std::vector expandedClasses; - bool hasSerialiable; - TypeAttributesTable typeAttributes; - - /// constructor that parses interface file - Module(const std::string& interfacePath, const std::string& moduleName, - bool enable_verbose = true); - - /// Dummy constructor that does no parsing - use only for testing - Module(const std::string& moduleName, bool enable_verbose = true); - - /// non-const function that performs parsing - typically called by constructor - /// Throws exception on failure - void parseMarkup(const std::string& data); - - /// MATLAB code generation: - void generate_matlab_wrapper(const std::string& path) const; - - /// Cython code generation: - void generate_cython_wrapper(const std::string& path, const std::string& pxdImports = "") const; - void emit_cython_pxd(FileWriter& file) const; - void emit_cython_pyx(FileWriter& file) const; - - void generateIncludes(FileWriter& file) const; - - void finish_wrapper(FileWriter& file, - const std::vector& functionNames) const; - - /// Python code generation: - void generate_python_wrapper(const std::string& path) const; - -private: - static std::vector ExpandTypedefInstantiations( - const std::vector& classes, - const std::vector instantiations); - static std::vector GenerateValidTypes( - const std::vector& classes, - const std::vector& forwardDeclarations, - const std::vector& typedefs); - static void WriteCollectorsAndCleanupFcn(FileWriter& wrapperFile, - const std::string& moduleName, const std::vector& classes); - static void WriteRTTIRegistry(FileWriter& wrapperFile, - const std::string& moduleName, const std::vector& classes); -}; - -} // \namespace wrap diff --git a/wrap/OverloadedFunction.h b/wrap/OverloadedFunction.h deleted file mode 100644 index 6bcb72d94..000000000 --- a/wrap/OverloadedFunction.h +++ /dev/null @@ -1,140 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file OverloadedFunction.h - * @brief Function that can overload its arguments only - * @author Frank Dellaert - * @date Nov 13, 2014 - **/ - -#pragma once - -#include "Function.h" -#include "Argument.h" -#include -namespace wrap { - -/** - * ArgumentList Overloads - */ -class ArgumentOverloads { -public: - std::vector argLists_; - -public: - size_t nrOverloads() const { return argLists_.size(); } - - const ArgumentList& argumentList(size_t i) const { return argLists_.at(i); } - - void push_back(const ArgumentList& args) { argLists_.push_back(args); } - - std::vector expandArgumentListsTemplate( - const TemplateSubstitution& ts) const { - std::vector result; - for (const ArgumentList& argList : argLists_) { - ArgumentList instArgList = argList.expandTemplate(ts); - result.push_back(instArgList); - } - return result; - } - - /// Expand templates, imperative ! - virtual void ExpandTemplate(const TemplateSubstitution& ts) { - argLists_ = expandArgumentListsTemplate(ts); - } - - void verifyArguments(const std::vector& validArgs, - const std::string s) const { - for (const ArgumentList& argList : argLists_) { - for (Argument arg : argList) { - std::string fullType = arg.type.qualifiedName("::"); - if (find(validArgs.begin(), validArgs.end(), fullType) == - validArgs.end()) - throw DependencyMissing(fullType, "checking argument of " + s); - } - } - } - - friend std::ostream& operator<<(std::ostream& os, - const ArgumentOverloads& overloads) { - for (const ArgumentList& argList : overloads.argLists_) - os << argList << std::endl; - return os; - } - - std::string pyx_resolveOverloadParams(const ArgumentList& args, bool isVoid, - size_t indentLevel = 2) const { - std::string indent; - for (size_t i = 0; i < indentLevel; ++i) - indent += " "; - std::string s; - s += indent + "__params = process_args([" + args.pyx_paramsList() - + "], args, kwargs)\n"; - s += args.pyx_castParamsToPythonType(indent); - if (args.size() > 0) { - for (size_t i = 0; i < args.size(); ++i) { - // For python types we can do the assert after the assignment and save list accesses - if (args[i].type.isNonBasicType() || args[i].type.isEigen()) { - std::string param = args[i].name; - s += indent + "assert isinstance(" + param + ", " - + args[i].type.pyxArgumentType() + ")"; - if (args[i].type.isEigen()) { - s += " and " + param + ".ndim == " - + ((args[i].type.pyxClassName() == "Vector") ? "1" : "2"); - } - s += "\n"; - } - } - } - return s; - } -}; - -class OverloadedFunction : public Function, public ArgumentOverloads { -public: - bool addOverload(const std::string& name, const ArgumentList& args, - boost::optional instName = boost::none, - bool verbose = false) { - bool first = initializeOrCheck(name, instName, verbose); - ArgumentOverloads::push_back(args); - return first; - } - -private: -}; - -// Templated checking functions -// TODO: do this via polymorphism, use transform ? - -template -static std::map expandMethodTemplate( - const std::map& methods, const TemplateSubstitution& ts) { - std::map result; - typedef std::pair NamedMethod; - for (NamedMethod namedMethod : methods) { - F instMethod = namedMethod.second; - instMethod.expandTemplate(ts); - namedMethod.second = instMethod; - result.insert(namedMethod); - } - return result; -} - -template -inline void verifyArguments(const std::vector& validArgs, - const std::map& vt) { - typedef typename std::map::value_type NamedMethod; - for (const NamedMethod& namedMethod : vt) - namedMethod.second.verifyArguments(validArgs); -} - -} // \namespace wrap diff --git a/wrap/Qualified.cpp b/wrap/Qualified.cpp deleted file mode 100644 index 947e51d54..000000000 --- a/wrap/Qualified.cpp +++ /dev/null @@ -1,5 +0,0 @@ -#include - -namespace wrap { - std::vector Qualified::BasicTypedefs; -} diff --git a/wrap/Qualified.h b/wrap/Qualified.h deleted file mode 100644 index 416db239d..000000000 --- a/wrap/Qualified.h +++ /dev/null @@ -1,370 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Qualified.h - * @brief Qualified name - * @author Frank Dellaert - * @date Nov 11, 2014 - **/ - -#pragma once - -#include -#include -#include -#include - -namespace wrap { - -/** - * Class to encapuslate a qualified name, i.e., with (nested) namespaces - */ -class Qualified { - -//protected: -public: - - std::vector namespaces_; ///< Stack of namespaces - std::string name_; ///< type name - static std::vector BasicTypedefs; - - friend struct TypeGrammar; - friend class TemplateSubstitution; - -public: - - /// the different categories - typedef enum { - CLASS = 1, EIGEN = 2, BASIS = 3, VOID = 4 - } Category; - Category category; - - /// Default constructor - Qualified() : - category(VOID) { - } - - /// Construct from name and optional category - Qualified(const std::string& n, Category c = CLASS) : - name_(n), category(c) { - } - - /// Construct from scoped name and optional category - Qualified(const std::string& ns1, const std::string& n, Category c = CLASS) : - name_(n), category(c) { - namespaces_.push_back(ns1); - } - - /// Construct from doubly scoped name and optional category - Qualified(const std::string& ns1, const std::string& ns2, - const std::string& n, Category c = CLASS) : - name_(n), category(c) { - namespaces_.push_back(ns1); - namespaces_.push_back(ns2); - } - - /// Construct from arbitrarily scoped name - Qualified(std::vector ns, const std::string& name) : - namespaces_(ns), name_(name), category(CLASS) { - } - - // Destructor - virtual ~Qualified() {} - - std::string name() const { - return name_; - } - - std::vector namespaces() const { - return namespaces_; - } - - // Qualified is 'abused' as template argument name as well - // this function checks whether *this matches with templateArg - bool match(const std::string& templateArg) const { - return (name_ == templateArg && namespaces_.empty()); //TODO && category == CLASS); - } - - bool match(const std::vector& templateArgs) const { - for(const std::string& s: templateArgs) - if (match(s)) return true; - return false; - } - - void rename(const Qualified& q) { - namespaces_ = q.namespaces_; - name_ = q.name_; - category = q.category; - } - - void expand(const std::string& expansion) { - name_ += expansion; - } - - bool operator==(const Qualified& other) const { - return namespaces_ == other.namespaces_ && name_ == other.name_ - && category == other.category; - } - - bool empty() const { - return namespaces_.empty() && name_.empty(); - } - - virtual void clear() { - namespaces_.clear(); - name_.clear(); - category = VOID; - } - - bool isScalar() const { - return (name() == "bool" || name() == "char" - || name() == "unsigned char" || name() == "int" - || name() == "size_t" || name() == "double"); - } - - bool isVoid() const { - return name() == "void"; - } - - bool isString() const { - return name() == "string"; - } - - bool isEigen() const { - return name() == "Vector" || name() == "Matrix"; - } - - bool isBasicTypedef() const { - return std::find(Qualified::BasicTypedefs.begin(), - Qualified::BasicTypedefs.end(), - *this) != Qualified::BasicTypedefs.end(); - } - - bool isNonBasicType() const { - return name() != "This" && !isString() && !isScalar() && !isEigen() && - !isVoid() && !isBasicTypedef(); - } - -public: - - static Qualified MakeClass(std::vector namespaces, - const std::string& name) { - return Qualified(namespaces, name); - } - - static Qualified MakeEigen(const std::string& name) { - return Qualified(name, EIGEN); - } - - static Qualified MakeBasis(const std::string& name) { - return Qualified(name, BASIS); - } - - static Qualified MakeVoid() { - return Qualified("void", VOID); - } - - /// Return a qualified namespace using given delimiter - std::string qualifiedNamespaces(const std::string& delimiter = "") const { - std::string result; - for (std::size_t i = 0; i < namespaces_.size(); ++i) - result += (namespaces_[i] + ((i VectorXd, Matrix --> MatrixXd - std::string pxdClassName() const { - if (isEigen()) - return name_ + "Xd"; - else if (isNonBasicType()) - return "C" + qualifiedName("_", 1); - else return name_; - } - - /// name of Python classes in pyx - /// They have the same name with the corresponding Cython classes in pxd - /// But note that they are different: These are Python classes in the pyx file - /// To refer to a Cython class in pyx, we need to add "pxd.", e.g. pxd.noiseModel_Gaussian - /// see the other function pxd_class_in_pyx for that purpose. - std::string pyxClassName() const { - if (isEigen()) - return name_; - else - return qualifiedName("_", 1); - } - - /// Python type of function arguments in pyx to interface with normal python scripts - /// Eigen types become np.ndarray (There's no Eigen types, e.g. VectorXd, in - /// Python. We have to pass in numpy array in the arguments, which will then be - /// converted to Eigen types in Cython) - std::string pyxArgumentType() const { - if (isEigen()) - return "np.ndarray"; - else - return qualifiedName("_", 1); - } - - /// return the Cython class in pxd corresponding to a Python class in pyx - std::string pxd_class_in_pyx() const { - if (isNonBasicType()) { - return pxdClassName(); - } else if (isEigen()) { - return name_ + "Xd"; - } else // basic types and not Eigen - return name_; - } - - /// the internal Cython shared obj in a Python class wrappper - std::string shared_pxd_obj_in_pyx() const { - return pxdClassName() + "_"; - } - - std::string make_shared_pxd_class_in_pyx() const { - return "make_shared[" + pxd_class_in_pyx() + "]"; - } - - std::string shared_pxd_class_in_pyx() const { - return "shared_ptr[" + pxd_class_in_pyx() + "]"; - } - - friend std::ostream& operator<<(std::ostream& os, const Qualified& q) { - os << q.qualifiedName("::"); - return os; - } -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct TypeGrammar: classic::grammar { - - wrap::Qualified& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - TypeGrammar(wrap::Qualified& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - typedef classic::rule Rule; - - Rule void_p, basisType_p, eigenType_p, namespace_del_p, class_p, type_p; - - definition(TypeGrammar const& self) { - - using namespace wrap; - using namespace classic; - typedef BasicRules Basic; - - // HACK: use const values instead of using enums themselves - somehow this doesn't result in values getting assigned to gibberish - static const Qualified::Category EIGEN = Qualified::EIGEN; - static const Qualified::Category BASIS = Qualified::BASIS; - static const Qualified::Category CLASS = Qualified::CLASS; - static const Qualified::Category VOID = Qualified::VOID; - - void_p = str_p("void") // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, VOID)]; - - basisType_p = Basic::basisType_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, BASIS)]; - - eigenType_p = Basic::eigenType_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, EIGEN)]; - - namespace_del_p = Basic::namespace_p // - [push_back_a(self.result_.namespaces_)] >> str_p("::"); - - class_p = *namespace_del_p >> Basic::className_p // - [assign_a(self.result_.name_)] // - [assign_a(self.result_.category, CLASS)]; - - type_p = void_p | basisType_p | class_p | eigenType_p; - } - - Rule const& start() const { - return type_p; - } - - }; -}; -// type_grammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -template -struct TypeListGrammar: public classic::grammar > { - - typedef std::vector TypeList; - TypeList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - TypeListGrammar(TypeList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - wrap::Qualified type; ///< temporary for use during parsing - TypeGrammar type_g; ///< Individual Type grammars - - classic::rule type_p, typeList_p; - - definition(TypeListGrammar const& self) : - type_g(type) { - using namespace classic; - - type_p = type_g[push_back_a(self.result_, type)][clear_a(type)]; - - typeList_p = OPEN >> !type_p >> *(',' >> type_p) >> CLOSE; - } - - classic::rule const& start() const { - return typeList_p; - } - - }; -}; -// TypeListGrammar - -/* ************************************************************************* */ -// Needed for other parsers in Argument.h and ReturnType.h -static const bool T = true; - -} // \namespace wrap - diff --git a/wrap/README.md b/wrap/README.md index 014577b5a..347cca601 100644 --- a/wrap/README.md +++ b/wrap/README.md @@ -1,27 +1,78 @@ -# WRAP README -The wrap library wraps the GTSAM library into a MATLAB toolbox. +# WRAP -It was designed to be more general than just wrapping GTSAM, but a small amount of GTSAM specific code exists in matlab.h, the include file that is included by the mex files. The GTSAM-specific functionality consists primarily of handling of Eigen Matrix and Vector classes. +The wrap library wraps the GTSAM library into a Python library or MATLAB toolbox. +It was designed to be more general than just wrapping GTSAM. For notes on creating a wrap interface, see `gtsam.h` for what features can be wrapped into a toolbox, as well as the current state of the toolbox for GTSAM. -For notes on creating a wrap interface, see gtsam.h for what features can be wrapped into a toolbox, as well as the current state of the toolbox for gtsam. For more technical details on the interface, please read comments in matlab.h +## Prerequisites: Pybind11 and pyparsing -Some good things to know: +1. This library uses `pybind11`, which is included as a subdirectory in GTSAM. +2. The `interface_parser.py` in this library uses `pyparsing` to parse the interface file `gtsam.h`. Please install it first in your current Python environment before attempting the build. -OBJECT CREATION +``` +python3 -m pip install pyparsing +``` -- Classes are created by special constructors, e.g., new_GaussianFactorGraph_.cpp. - These constructors are called from the MATLAB class @GaussianFactorGraph. - new_GaussianFactorGraph_ calls wrap_constructed in matlab.h, see documentation there - -METHOD (AND CONSTRUCTOR) ARGUMENTS +## Getting Started -- Simple argument types of methods, such as "double", will be converted in the - mex wrappers by calling unwrap, defined in matlab.h -- Vector and Matrix arguments are normally passed by reference in GTSAM, but - in gtsam.h you need to pretend they are passed by value, to trigger the - generation of the correct conversion routines unwrap and unwrap -- passing classes as arguments works, provided they are passed by reference. - This triggers a call to unwrap_shared_ptr +Clone this repository to your local machine and perform the standard CMake install: - \ No newline at end of file +```sh +mkdir build && cd build +cmake .. +make install # use sudo if needed +``` + +Using `wrap` in your project is straightforward from here. In you `CMakeLists.txt` file, you just need to add the following: + +```cmake +include(PybindWrap) + +pybind_wrap(${PROJECT_NAME}_py # target + ${PROJECT_SOURCE_DIR}/cpp/${PROJECT_NAME}.h # interface header file + "${PROJECT_NAME}.cpp" # the generated cpp + "${PROJECT_NAME}" # module_name + "gtsam" # top namespace in the cpp file + "${ignore}" # ignore classes + ${PROJECT_BINARY_DIR}/${PROJECT_NAME}.tpl + ${PROJECT_NAME} # libs + "${PROJECT_NAME}" # dependencies + ON # use boost + ) +``` + +For more information, please follow our [tutorial](https://github.com/borglab/gtsam-project-python). + +## GTSAM Python wrapper + +**WARNING: On macOS, you have to statically build GTSAM to use the wrapper.** + +1. Set `GTSAM_BUILD_PYTHON=ON` while configuring the build with `cmake`. +1. What you can do in the `build` folder: + 1. Just run python then import GTSAM and play around: + ``` + + import gtsam + gtsam.__dir__() + ``` + + 1. Run the unittests: + ``` + python -m unittest discover + ``` + 1. Edit the unittests in `python/gtsam/*.py` and simply rerun the test. + They were symlinked to `/gtsam/*.py` to facilitate fast development. + ``` + python -m unittest gtsam/tests/test_Pose3.py + ``` + - NOTE: You might need to re-run `cmake ..` if files are deleted or added. +1. Do `make install` and `cd /python`. Here, you can: + 1. Run the unittests: + ``` + python setup.py test + ``` + 2. Install `gtsam` to your current Python environment. + ``` + python setup.py install + ``` + - NOTE: It's a good idea to create a virtual environment otherwise it will be installed in your system Python's site-packages. diff --git a/wrap/ReturnType.cpp b/wrap/ReturnType.cpp deleted file mode 100644 index fdf86d975..000000000 --- a/wrap/ReturnType.cpp +++ /dev/null @@ -1,101 +0,0 @@ -/** - * @file ReturnType.cpp - * @date Nov 13, 2014 - * @author Frank Dellaert - */ - -#include "ReturnType.h" -#include "Class.h" -#include "utilities.h" -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void ReturnType::wrap_result(const string& out, const string& result, - FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const { - string cppType = qualifiedName("::"), matlabType = qualifiedName("."); - - if (category == CLASS) { - // Handle Classes - string objCopy, ptrType; - const bool isVirtual = typeAttributes.attributes(cppType).isVirtual; - if (isPtr) - objCopy = result; // a shared pointer can always be passed as is - else { - // but if we want an actual new object, things get more complex - if (isVirtual) - // A virtual class needs to be cloned, so the whole hierarchy is - // returned - objCopy = result + ".clone()"; - else { - // ...but a non-virtual class can just be copied - objCopy = "boost::make_shared<" + cppType + ">(" + result + ")"; - } - } - // e.g. out[1] = wrap_shared_ptr(pairResult.second,"gtsam.Point3", false); - wrapperFile.oss << out << " = wrap_shared_ptr(" << objCopy << ",\"" - << matlabType << "\", " << (isVirtual ? "true" : "false") - << ");\n"; - - } else if (isPtr) { - // Handle shared pointer case for BASIS/EIGEN/VOID - // This case does not actually occur in GTSAM wrappers, so untested! - wrapperFile.oss << " {\n boost::shared_ptr<" << qualifiedName("::") - << "> shared(" << result << ");" << endl; - wrapperFile.oss << out << " = wrap_shared_ptr(shared,\"" << matlabType - << "\");\n }\n"; - - } else if (matlabType != "void") - // Handle normal case case for BASIS/EIGEN - wrapperFile.oss << out << " = wrap< " << qualifiedName("::") << " >(" << result - << ");\n"; -} - -/* ************************************************************************* */ -void ReturnType::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - string cythonType; - if (name() == "This") - cythonType = className; - else if (match(templateArgs)) - cythonType = name(); - else - cythonType = pxdClassName(); - if (isPtr) cythonType = "shared_ptr[" + cythonType + "]"; - file.oss << cythonType; -} - -/* ************************************************************************* */ -std::string ReturnType::pyx_returnType(bool addShared) const { - string retType = pxd_class_in_pyx(); - if (isPtr || (isNonBasicType() && addShared)) - retType = "shared_ptr[" + retType + "]"; - return retType; -} - -/* ************************************************************************* */ -std::string ReturnType::pyx_casting(const std::string& var, - bool isSharedVar) const { - if (isEigen()) { - string s = "ndarray_copy(" + var + ")"; - if (pyxClassName() == "Vector") - return s + ".squeeze()"; - else return s; - } - else if (isNonBasicType()) { - if (isPtr || isSharedVar) - return pyxClassName() + ".cyCreateFromShared(" + var + ")"; - else { - // construct a shared_ptr if var is not a shared ptr - return pyxClassName() + ".cyCreateFromShared(" + make_shared_pxd_class_in_pyx() + - + "(" + var + "))"; - } - } else - return var; -} - -/* ************************************************************************* */ diff --git a/wrap/ReturnType.h b/wrap/ReturnType.h deleted file mode 100644 index ba5086507..000000000 --- a/wrap/ReturnType.h +++ /dev/null @@ -1,89 +0,0 @@ -/** - * @file ReturnValue.h - * @brief Encapsulates a return type of a method - * @date Nov 13, 2014 - * @author Frank Dellaert - */ - -#include "Qualified.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" -#include - -#pragma once - -namespace wrap { - -/** - * Encapsulates return value of a method or function - */ -struct ReturnType : public Qualified { - bool isPtr; - - friend struct ReturnValueGrammar; - - /// Makes a void type - ReturnType() : isPtr(false) {} - - /// Constructor, no namespaces - ReturnType(const std::string& name, Category c = CLASS, bool ptr = false) - : Qualified(name, c), isPtr(ptr) {} - - virtual void clear() { - Qualified::clear(); - isPtr = false; - } - - /// Check if this type is in a set of valid types - template - void verify(TYPES validtypes, const std::string& s) const { - std::string key = qualifiedName("::"); - if (find(validtypes.begin(), validtypes.end(), key) == validtypes.end()) - throw DependencyMissing(key, "checking return type of " + s); - } - - /// @param className the actual class name to use when "This" is specified - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - - std::string pyx_returnType(bool addShared = true) const; - std::string pyx_casting(const std::string& var, - bool isSharedVar = true) const; - -private: - friend struct ReturnValue; - - /// Example: out[1] = wrap_shared_ptr(pairResult.second,"Test", false); - void wrap_result(const std::string& out, const std::string& result, - FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const; -}; - -//****************************************************************************** -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ReturnTypeGrammar : public classic::grammar { - wrap::ReturnType& result_; ///< successful parse will be placed in here - - TypeGrammar type_g; - - /// Construct ReturnType grammar and specify where result is placed - ReturnTypeGrammar(wrap::ReturnType& result) - : result_(result), type_g(result_) {} - - /// Definition of type grammar - template - struct definition { - classic::rule type_p; - - definition(ReturnTypeGrammar const& self) { - using namespace classic; - type_p = self.type_g >> !ch_p('*')[assign_a(self.result_.isPtr, T)]; - } - - classic::rule const& start() const { return type_p; } - }; -}; -// ReturnTypeGrammar - -} // \namespace wrap diff --git a/wrap/ReturnValue.cpp b/wrap/ReturnValue.cpp deleted file mode 100644 index e58e85602..000000000 --- a/wrap/ReturnValue.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/** - * @file ReturnValue.cpp - * @date Dec 1, 2011 - * @author Alex Cunningham - * @author Andrew Melim - * @author Richard Roberts - */ - -#include "ReturnValue.h" -#include "utilities.h" -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -ReturnValue ReturnValue::expandTemplate(const TemplateSubstitution& ts) const { - ReturnValue instRetVal = *this; - instRetVal.type1 = ts.tryToSubstitite(type1); - if (isPair) instRetVal.type2 = ts.tryToSubstitite(type2); - return instRetVal; -} - -/* ************************************************************************* */ -string ReturnValue::returnType() const { - if (isPair) - return "pair< " + type1.qualifiedName("::") + ", " + - type2.qualifiedName("::") + " >"; - else - return type1.qualifiedName("::"); -} - -/* ************************************************************************* */ -string ReturnValue::matlab_returnType() const { - return isPair ? "[first,second]" : "result"; -} - -/* ************************************************************************* */ -void ReturnValue::wrap_result(const string& result, FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const { - if (isPair) { - // For a pair, store the returned pair so we do not evaluate the function - // twice - wrapperFile.oss << " auto pairResult = " << result - << ";\n"; - type1.wrap_result(" out[0]", "pairResult.first", wrapperFile, - typeAttributes); - type2.wrap_result(" out[1]", "pairResult.second", wrapperFile, - typeAttributes); - } else { // Not a pair - type1.wrap_result(" out[0]", result, wrapperFile, typeAttributes); - } -} - -/* ************************************************************************* */ -void ReturnValue::emit_matlab(FileWriter& proxyFile) const { - string output; - if (isPair) - proxyFile.oss << "[ varargout{1} varargout{2} ] = "; - else if (type1.category != ReturnType::VOID) - proxyFile.oss << "varargout{1} = "; -} - -/* ************************************************************************* */ -void ReturnValue::emit_cython_pxd( - FileWriter& file, const std::string& className, - const std::vector& templateArgs) const { - if (isPair) { - file.oss << "pair["; - type1.emit_cython_pxd(file, className, templateArgs); - file.oss << ","; - type2.emit_cython_pxd(file, className, templateArgs); - file.oss << "] "; - } else { - type1.emit_cython_pxd(file, className, templateArgs); - file.oss << " "; - } -} - -/* ************************************************************************* */ -std::string ReturnValue::pyx_returnType() const { - if (isVoid()) return ""; - if (isPair) { - return "pair [" + type1.pyx_returnType(false) + "," + - type2.pyx_returnType(false) + "]"; - } else { - return type1.pyx_returnType(true); - } -} - -/* ************************************************************************* */ -std::string ReturnValue::pyx_casting(const std::string& var) const { - if (isVoid()) return ""; - if (isPair) { - return "(" + type1.pyx_casting(var + ".first", false) + "," + - type2.pyx_casting(var + ".second", false) + ")"; - } else { - return type1.pyx_casting(var); - } -} - -/* ************************************************************************* */ diff --git a/wrap/ReturnValue.h b/wrap/ReturnValue.h deleted file mode 100644 index 721132797..000000000 --- a/wrap/ReturnValue.h +++ /dev/null @@ -1,126 +0,0 @@ -/** - * @file ReturnValue.h - * - * @brief Encapsulates a return value from a method - * @date Dec 1, 2011 - * @author Alex Cunningham - * @author Richard Roberts - */ - -#include "ReturnType.h" -#include "TemplateSubstitution.h" -#include "FileWriter.h" -#include "TypeAttributesTable.h" -#include "utilities.h" - -#pragma once - -namespace wrap { - -/** - * Encapsulates return type of a method or function, possibly a pair - */ -struct ReturnValue { - - bool isPair; - ReturnType type1, type2; - - friend struct ReturnValueGrammar; - - /// Default constructor - ReturnValue() : - isPair(false) { - } - - /// Construct from type - ReturnValue(const ReturnType& type) : - isPair(false), type1(type) { - } - - /// Construct from pair type arguments - ReturnValue(const ReturnType& t1, const ReturnType& t2) : - isPair(true), type1(t1), type2(t2) { - } - - /// Destructor - virtual ~ReturnValue() {} - - virtual void clear() { - type1.clear(); - type2.clear(); - isPair = false; - } - - bool isVoid() const { - return !isPair && !type1.isPtr && (type1.name() == "void"); - } - - bool operator==(const ReturnValue& other) const { - return isPair == other.isPair && type1 == other.type1 - && type2 == other.type2; - } - - /// Substitute template argument - ReturnValue expandTemplate(const TemplateSubstitution& ts) const; - - std::string returnType() const; - - std::string matlab_returnType() const; - - void wrap_result(const std::string& result, FileWriter& wrapperFile, - const TypeAttributesTable& typeAttributes) const; - - void emit_matlab(FileWriter& proxyFile) const; - - /// @param className the actual class name to use when "This" is specified - void emit_cython_pxd(FileWriter& file, const std::string& className, - const std::vector& templateArgs) const; - std::string pyx_returnType() const; - std::string pyx_casting(const std::string& var) const; - - friend std::ostream& operator<<(std::ostream& os, const ReturnValue& r) { - if (!r.isPair && r.type1.category == ReturnType::VOID) - os << "void"; - else - os << r.returnType(); - return os; - } - -}; - -//****************************************************************************** -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct ReturnValueGrammar: public classic::grammar { - - wrap::ReturnValue& result_; ///< successful parse will be placed in here - ReturnTypeGrammar returnType1_g, returnType2_g; ///< Type parsers - - /// Construct type grammar and specify where result is placed - ReturnValueGrammar(wrap::ReturnValue& result) : - result_(result), returnType1_g(result.type1), returnType2_g(result.type2) { - } - - /// Definition of type grammar - template - struct definition { - - classic::rule pair_p, returnValue_p; - - definition(ReturnValueGrammar const& self) { - using namespace classic; - - pair_p = (str_p("pair") >> '<' >> self.returnType1_g >> ',' - >> self.returnType2_g >> '>')[assign_a(self.result_.isPair, T)]; - - returnValue_p = pair_p | self.returnType1_g; - } - - classic::rule const& start() const { - return returnValue_p; - } - - }; -}; -// ReturnValueGrammar - -}// \namespace wrap diff --git a/wrap/StaticMethod.cpp b/wrap/StaticMethod.cpp deleted file mode 100644 index 0f812ea61..000000000 --- a/wrap/StaticMethod.cpp +++ /dev/null @@ -1,151 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file StaticMethod.ccp - * @author Frank Dellaert - * @author Andrew Melim - * @author Richard Roberts - **/ - -#include "StaticMethod.h" -#include "utilities.h" -#include "Class.h" - -#include -#include - -#include -#include - -using namespace std; -using namespace wrap; - -/* ************************************************************************* */ -void StaticMethod::proxy_header(FileWriter& proxyFile) const { - string upperName = matlabName(); - upperName[0] = toupper(upperName[0], locale()); - proxyFile.oss << " function varargout = " << upperName << "(varargin)\n"; -} - -/* ************************************************************************* */ -string StaticMethod::wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const { - // check arguments - // NOTE: for static functions, there is no object passed - wrapperFile.oss << " checkArguments(\"" << matlabUniqueName << "." << name_ - << "\",nargout,nargin," << args.size() << ");\n"; - - // unwrap arguments, see Argument.cpp - args.matlab_unwrap(wrapperFile, 0); // We start at 0 because there is no self object - - // call method and wrap result - // example: out[0]=wrap(staticMethod(t)); - string expanded = cppClassName + "::" + name_; - if (templateArgValue_) - expanded += ("<" + templateArgValue_->qualifiedName("::") + ">"); - - return expanded; -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pxd(FileWriter& file, const Class& cls) const { - for(size_t i = 0; i < nrOverloads(); ++i) { - file.oss << " @staticmethod\n"; - file.oss << " "; - returnVals_[i].emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << name_ + ((i>0)?"_" + to_string(i):"") << " \"" << name_ << "\"" << "("; - argumentList(i).emit_cython_pxd(file, cls.pxdClassName(), cls.templateArgs); - file.oss << ") except +\n"; - } -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_wrapper_pxd(FileWriter& file, - const Class& cls) const { - if (nrOverloads() > 1) { - for (size_t i = 0; i < nrOverloads(); ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " @staticmethod\n"; - file.oss << " cdef tuple " + funcName + "(tuple args, dict kwargs)\n"; - } - } -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pyx_no_overload(FileWriter& file, - const Class& cls) const { - assert(nrOverloads() == 1); - file.oss << " @staticmethod\n"; - file.oss << " def " << name_ << "("; - argumentList(0).emit_cython_pyx(file); - file.oss << "):\n"; - - /// Call cython corresponding function and return - file.oss << argumentList(0).pyx_convertEigenTypeAndStorageOrder(" "); - string call = pyx_functionCall(cls.pxd_class_in_pyx(), name_, 0); - file.oss << " "; - if (!returnVals_[0].isVoid()) { - file.oss << "return " << returnVals_[0].pyx_casting(call) << "\n"; - } else - file.oss << call << "\n"; - file.oss << "\n"; -} - -/* ************************************************************************* */ -void StaticMethod::emit_cython_pyx(FileWriter& file, const Class& cls) const { - size_t N = nrOverloads(); - if (N == 1) { - emit_cython_pyx_no_overload(file, cls); - return; - } - - // Dealing with overloads.. - file.oss << " @staticmethod # overloaded\n"; - file.oss << " def " << name_ << "(*args, **kwargs):\n"; - for (size_t i = 0; i < N; ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " success, results = " << cls.pyxClassName() << "." - << funcName << "(args, kwargs)\n"; - file.oss << " if success:\n return results\n"; - } - file.oss << " raise TypeError('Could not find the correct overload')\n\n"; - - // Create cdef methods for all overloaded methods - for(size_t i = 0; i < N; ++i) { - string funcName = name_ + "_" + to_string(i); - file.oss << " @staticmethod\n"; - file.oss << " cdef tuple " + funcName + "(tuple args, dict kwargs):\n"; - file.oss << " cdef list __params\n"; - if (!returnVals_[i].isVoid()) { - file.oss << " cdef " << returnVals_[i].pyx_returnType() << " return_value\n"; - } - file.oss << " try:\n"; - ArgumentList args = argumentList(i); - file.oss << pyx_resolveOverloadParams(args, false, 3); - - /// Call cython corresponding function and return - file.oss << args.pyx_convertEigenTypeAndStorageOrder(" "); - string pxdFuncName = name_ + ((i>0)?"_" + to_string(i):""); - string call = pyx_functionCall(cls.pxd_class_in_pyx(), pxdFuncName, i); - if (!returnVals_[i].isVoid()) { - file.oss << " return_value = " << call << "\n"; - file.oss << " return True, " << returnVals_[i].pyx_casting("return_value") << "\n"; - } else { - file.oss << " " << call << "\n"; - file.oss << " return True, None\n"; - } - file.oss << " except:\n"; - file.oss << " return False, None\n\n"; - } -} - -/* ************************************************************************* */ diff --git a/wrap/StaticMethod.h b/wrap/StaticMethod.h deleted file mode 100644 index 8392a1cc5..000000000 --- a/wrap/StaticMethod.h +++ /dev/null @@ -1,51 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file StaticMethod.h - * @brief describes and generates code for static methods - * @author Frank Dellaert - * @author Alex Cunningham - * @author Richard Roberts - **/ - -#pragma once - -#include "MethodBase.h" - -namespace wrap { - -/// StaticMethod class -struct StaticMethod: public MethodBase { - - typedef const std::string& Str; - - friend std::ostream& operator<<(std::ostream& os, const StaticMethod& m) { - for (size_t i = 0; i < m.nrOverloads(); i++) - os << "static " << m.returnVals_[i] << " " << m.name_ << m.argLists_[i]; - return os; - } - - void emit_cython_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_wrapper_pxd(FileWriter& file, const Class& cls) const; - void emit_cython_pyx(FileWriter& file, const Class& cls) const; - void emit_cython_pyx_no_overload(FileWriter& file, const Class& cls) const; - -protected: - - virtual void proxy_header(FileWriter& proxyFile) const; - - virtual std::string wrapper_call(FileWriter& wrapperFile, Str cppClassName, - Str matlabUniqueName, const ArgumentList& args) const; -}; - -} // \namespace wrap - diff --git a/wrap/Template.h b/wrap/Template.h deleted file mode 100644 index 32f8e9761..000000000 --- a/wrap/Template.h +++ /dev/null @@ -1,146 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Template.h - * @brief Template name - * @author Frank Dellaert - * @date Nov 11, 2014 - **/ - -#pragma once - -#include - -namespace wrap { - -/// The template specification that goes before a method or a class -class Template { - std::string argName_; - std::vector argValues_; - std::vector intList_; - friend struct TemplateGrammar; -public: - /// The only way to get values into a Template is via our friendly Grammar - Template() { - } - void clear() { - argName_.clear(); - argValues_.clear(); - intList_.clear(); - } - const std::string& argName() const { - return argName_; - } - const std::vector& intList() const { - return intList_; - } - const std::vector& argValues() const { - return argValues_; - } - bool empty() const { - return argValues_.empty() && intList_.empty(); - } - size_t nrValues() const { - return argValues_.size(); - } - const Qualified& operator[](size_t i) const { - return argValues_[i]; - } - bool valid() const { - return !argName_.empty() && argValues_.size() > 0; - } - -}; - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct IntListGrammar: public classic::grammar { - - typedef std::vector IntList; - IntList& result_; ///< successful parse will be placed in here - - /// Construct type grammar and specify where result is placed - IntListGrammar(IntList& result) : - result_(result) { - } - - /// Definition of type grammar - template - struct definition { - - classic::rule integer_p, intList_p; - - definition(IntListGrammar const& self) { - using namespace classic; - - integer_p = int_p[push_back_a(self.result_)]; - - intList_p = '{' >> !integer_p >> *(',' >> integer_p) >> '}'; - } - - classic::rule const& start() const { - return intList_p; - } - - }; -}; -// IntListGrammar - -/* ************************************************************************* */ -// http://boost-spirit.com/distrib/spirit_1_8_2/libs/spirit/doc/grammar.html -struct TemplateGrammar: public classic::grammar { - - Template& result_; ///< successful parse will be placed in here - TypeListGrammar<'{', '}'> argValues_g; ///< TypeList parser - IntListGrammar intList_g; ///< TypeList parser - - /// Construct type grammar and specify where result is placed - TemplateGrammar(Template& result) : - result_(result), argValues_g(result.argValues_), // - intList_g(result.intList_) { - } - - /// Definition of type grammar - template - struct definition: BasicRules { - - classic::rule templateArgValues_p; - - definition(TemplateGrammar const& self) { - using classic::str_p; - using classic::assign_a; - templateArgValues_p = (str_p("template") >> '<' - >> (BasicRules::name_p)[assign_a(self.result_.argName_)] - >> '=' >> (self.argValues_g | self.intList_g) >> '>'); - } - - classic::rule const& start() const { - return templateArgValues_p; - } - - }; -}; -// TemplateGrammar - -/// Cool initializer for tests -static inline boost::optional