diff --git a/.cproject b/.cproject
deleted file mode 100644
index 10b16f91c..000000000
--- a/.cproject
+++ /dev/null
@@ -1,3563 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- make
- -j5
- SmartProjectionFactorExample_kitti_nonbatch.run
- true
- true
- true
-
-
- make
- -j5
- SmartProjectionFactorExample_kitti.run
- true
- true
- true
-
-
- make
- -j5
- SmartProjectionFactorTesting.run
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- tests/testSPQRUtil.run
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j4
- testSimilarity3.run
- true
- true
- true
-
-
- make
- -j5
- testInvDepthCamera3.run
- true
- true
- true
-
-
- make
- -j5
- testTriangulation.run
- true
- true
- true
-
-
- make
- -j4
- testEvent.run
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -k
- check
- true
- false
- true
-
-
- make
-
- tests/testBayesTree.run
- true
- false
- true
-
-
- make
-
- testBinaryBayesNet.run
- true
- false
- true
-
-
- make
- -j2
- testFactorGraph.run
- true
- true
- true
-
-
- make
- -j2
- testISAM.run
- true
- true
- true
-
-
- make
- -j2
- testJunctionTree.run
- true
- true
- true
-
-
- make
- -j2
- testKey.run
- true
- true
- true
-
-
- make
- -j2
- testOrdering.run
- true
- true
- true
-
-
- make
-
- testSymbolicBayesNet.run
- true
- false
- true
-
-
- make
-
- tests/testSymbolicFactor.run
- true
- false
- true
-
-
- make
-
- testSymbolicFactorGraph.run
- true
- false
- true
-
-
- make
- -j2
- timeSymbolMaps.run
- true
- true
- true
-
-
- make
-
- tests/testBayesTree
- true
- false
- true
-
-
- make
- -j2
- tests/testPose2.run
- true
- true
- true
-
-
- make
- -j2
- tests/testPose3.run
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j5
- testAHRS.run
- true
- true
- true
-
-
- make
- -j5
- testInvDepthFactor3.run
- true
- true
- true
-
-
- make
- -j5
- testMultiProjectionFactor.run
- true
- true
- true
-
-
- make
- -j5
- testPoseRotationPrior.run
- true
- true
- true
-
-
- make
- -j5
- testPoseTranslationPrior.run
- true
- true
- true
-
-
- make
- -j5
- testReferenceFrameFactor.run
- true
- true
- true
-
-
- make
- -j5
- testSmartProjectionFactor.run
- true
- true
- true
-
-
- make
- -j5
- testTSAMFactors.run
- true
- true
- true
-
-
- make
- -j5
- testInertialNavFactor_GlobalVelocity.run
- true
- true
- true
-
-
- make
- -j5
- testInvDepthFactorVariant3.run
- true
- true
- true
-
-
- make
- -j5
- testInvDepthFactorVariant1.run
- true
- true
- true
-
-
- make
- -j5
- testEquivInertialNavFactor_GlobalVel.run
- true
- true
- true
-
-
- make
- -j5
- testInvDepthFactorVariant2.run
- true
- true
- true
-
-
- make
- -j5
- testRelativeElevationFactor.run
- true
- true
- true
-
-
- make
- -j5
- testPoseBetweenFactor.run
- true
- true
- true
-
-
- make
- -j5
- testGaussMarkov1stOrderFactor.run
- true
- true
- true
-
-
- make
- -j4
- testSmartStereoProjectionPoseFactor.run
- true
- true
- true
-
-
- make
- -j4
- testTOAFactor.run
- true
- true
- true
-
-
- make
- -j5
- testGaussianFactorGraphUnordered.run
- true
- true
- true
-
-
- make
- -j5
- testGaussianBayesNetUnordered.run
- true
- true
- true
-
-
- make
- -j5
- testGaussianConditional.run
- true
- true
- true
-
-
- make
- -j5
- testGaussianDensity.run
- true
- true
- true
-
-
- make
- -j5
- testGaussianJunctionTree.run
- true
- true
- true
-
-
- make
- -j5
- testHessianFactor.run
- true
- true
- true
-
-
- make
- -j5
- testJacobianFactor.run
- true
- true
- true
-
-
- make
- -j5
- testKalmanFilter.run
- true
- true
- true
-
-
- make
- -j5
- testNoiseModel.run
- true
- true
- true
-
-
- make
- -j5
- testSampler.run
- true
- true
- true
-
-
- make
- -j5
- testSerializationLinear.run
- true
- true
- true
-
-
- make
- -j5
- testVectorValues.run
- true
- true
- true
-
-
- make
- -j5
- testGaussianBayesTree.run
- true
- true
- true
-
-
- make
- -j5
- testCombinedImuFactor.run
- true
- true
- true
-
-
- make
- -j5
- testImuFactor.run
- true
- true
- true
-
-
- make
- -j5
- testAHRSFactor.run
- true
- true
- true
-
-
- make
- -j8
- testAttitudeFactor.run
- true
- true
- true
-
-
- make
- -j5
- clean
- true
- true
- true
-
-
- make
- -j5
- all
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- testGaussianConditional.run
- true
- true
- true
-
-
- make
- -j2
- testGaussianFactor.run
- true
- true
- true
-
-
- make
- -j2
- timeGaussianFactor.run
- true
- true
- true
-
-
- make
- -j2
- timeVectorConfig.run
- true
- true
- true
-
-
- make
- -j2
- testVectorBTree.run
- true
- true
- true
-
-
- make
- -j2
- testVectorMap.run
- true
- true
- true
-
-
- make
- -j2
- testNoiseModel.run
- true
- true
- true
-
-
- make
- -j2
- testBayesNetPreconditioner.run
- true
- true
- true
-
-
- make
-
- testErrors.run
- true
- false
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- tests/testGaussianJunctionTree.run
- true
- true
- true
-
-
- make
- -j2
- tests/testGaussianFactor.run
- true
- true
- true
-
-
- make
- -j2
- tests/testGaussianConditional.run
- true
- true
- true
-
-
- make
- -j2
- tests/timeSLAMlike.run
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- testBTree.run
- true
- true
- true
-
-
- make
- -j2
- testDSF.run
- true
- true
- true
-
-
- make
- -j2
- testDSFVector.run
- true
- true
- true
-
-
- make
- -j2
- testMatrix.run
- true
- true
- true
-
-
- make
- -j2
- testSPQRUtil.run
- true
- true
- true
-
-
- make
- -j2
- testVector.run
- true
- true
- true
-
-
- make
- -j2
- timeMatrix.run
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- testClusterTree.run
- true
- true
- true
-
-
- make
- -j2
- testJunctionTree.run
- true
- true
- true
-
-
- make
- -j2
- tests/testEliminationTree.run
- true
- true
- true
-
-
- make
- -j2
- tests/testSymbolicFactor.run
- true
- true
- true
-
-
- make
- -j2
- tests/testVariableSlots.run
- true
- true
- true
-
-
- make
- -j2
- tests/testConditional.run
- true
- true
- true
-
-
- make
- -j2
- tests/testSymbolicFactorGraph.run
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- testNonlinearConstraint.run
- true
- true
- true
-
-
- make
- -j2
- testLieConfig.run
- true
- true
- true
-
-
- make
- -j2
- testConstraintOptimizer.run
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- testPlanarSLAM.run
- true
- true
- true
-
-
- make
- -j2
- testPose2Config.run
- true
- true
- true
-
-
- make
- -j2
- testPose2Factor.run
- true
- true
- true
-
-
- make
- -j2
- testPose2Prior.run
- true
- true
- true
-
-
- make
- -j2
- testPose2SLAM.run
- true
- true
- true
-
-
- make
- -j2
- testPose3Config.run
- true
- true
- true
-
-
- make
- -j2
- testPose3SLAM.run
- true
- true
- true
-
-
- make
- testSimulated2DOriented.run
- true
- false
- true
-
-
- make
- -j2
- testVSLAMConfig.run
- true
- true
- true
-
-
- make
- -j2
- testVSLAMFactor.run
- true
- true
- true
-
-
- make
- -j2
- testVSLAMGraph.run
- true
- true
- true
-
-
- make
- -j2
- testPose3Factor.run
- true
- true
- true
-
-
- make
- testSimulated2D.run
- true
- false
- true
-
-
- make
- testSimulated3D.run
- true
- false
- true
-
-
- make
- -j2
- tests/testGaussianISAM2
- true
- true
- true
-
-
- make
- -j5
- testBTree.run
- true
- true
- true
-
-
- make
- -j5
- testDSF.run
- true
- true
- true
-
-
- make
- -j5
- testDSFMap.run
- true
- true
- true
-
-
- make
- -j5
- testDSFVector.run
- true
- true
- true
-
-
- make
- -j5
- testFixedVector.run
- true
- true
- true
-
-
- make
- -j5
- testEliminationTree.run
- true
- true
- true
-
-
- make
- -j5
- testInference.run
- true
- true
- true
-
-
- make
- -j5
- testKey.run
- true
- true
- true
-
-
- make
- -j1
- testSymbolicBayesTree.run
- true
- false
- true
-
-
- make
- -j1
- testSymbolicSequentialSolver.run
- true
- false
- true
-
-
- make
- -j4
- testLabeledSymbol.run
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- tests/testLieConfig.run
- true
- true
- true
-
-
- make
- -j3
- install
- true
- false
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j1
- check
- true
- false
- true
-
-
- make
- -j5
- all
- true
- true
- true
-
-
- cmake
- ..
- true
- false
- true
-
-
- make
- -j5
- gtsam-shared
- true
- true
- true
-
-
- make
- -j5
- gtsam-static
- true
- true
- true
-
-
- make
- -j5
- timing
- true
- true
- true
-
-
- make
- -j5
- examples
- true
- true
- true
-
-
- make
- -j5
- VERBOSE=1 all
- true
- true
- true
-
-
- make
- -j5
- VERBOSE=1 check
- true
- true
- true
-
-
- make
- -j5
- check.base
- true
- true
- true
-
-
- make
- -j5
- timing.base
- true
- true
- true
-
-
- make
- -j2 VERBOSE=1
- check.geometry
- true
- false
- true
-
-
- make
- -j5
- timing.geometry
- true
- true
- true
-
-
- make
- -j2 VERBOSE=1
- check.inference
- true
- false
- true
-
-
- make
- -j5
- timing.inference
- true
- true
- true
-
-
- make
- -j2 VERBOSE=1
- check.linear
- true
- false
- true
-
-
- make
- -j5
- timing.linear
- true
- true
- true
-
-
- make
- -j2 VERBOSE=1
- check.nonlinear
- true
- false
- true
-
-
- make
- -j5
- timing.nonlinear
- true
- true
- true
-
-
- make
- -j2 VERBOSE=1
- check.slam
- true
- false
- true
-
-
- make
- -j5
- timing.slam
- true
- true
- true
-
-
- make
- -j5
- wrap_gtsam
- true
- true
- true
-
-
- make
- VERBOSE=1
- wrap_gtsam
- true
- false
- true
-
-
- cpack
-
- -G DEB
- true
- false
- true
-
-
- cpack
-
- -G RPM
- true
- false
- true
-
-
- cpack
-
- -G TGZ
- true
- false
- true
-
-
- cpack
-
- --config CPackSourceConfig.cmake
- true
- false
- true
-
-
- make
- -j5
- check.discrete
- true
- true
- true
-
-
- make
- -j5
- check.discrete_unstable
- true
- true
- true
-
-
- make
- -j5
- check.base_unstable
- true
- true
- true
-
-
- make
- -j5
- check.dynamics_unstable
- true
- true
- true
-
-
- make
- -j5
- check.slam_unstable
- true
- true
- true
-
-
- make
- -j5
- check.unstable
- true
- true
- true
-
-
- make
- -j5
- wrap_gtsam_build
- true
- true
- true
-
-
- make
- -j5
- wrap_gtsam_unstable_build
- true
- true
- true
-
-
- make
- -j5
- wrap_gtsam_unstable
- true
- true
- true
-
-
- make
- -j5
- wrap
- true
- true
- true
-
-
- make
- -j5
- wrap_gtsam_distclean
- true
- true
- true
-
-
- make
- -j5
- wrap_gtsam_unstable_distclean
- true
- true
- true
-
-
- make
- -j5
- doc
- true
- true
- true
-
-
- make
- -j5
- doc_clean
- true
- true
- true
-
-
- make
- -j5
- check
- true
- true
- true
-
-
- make
- -j5
- check.geometry_unstable
- true
- true
- true
-
-
- make
- -j5
- check.linear_unstable
- true
- true
- true
-
-
- make
- -j6 -j8
- gtsam_unstable-shared
- true
- true
- true
-
-
- make
- -j6 -j8
- gtsam_unstable-static
- true
- true
- true
-
-
- make
- -j6 -j8
- check.nonlinear_unstable
- true
- true
- true
-
-
- make
- -j5
- check.tests
- true
- true
- true
-
-
- make
- -j2 VERBOSE=1
- check.navigation
- true
- false
- true
-
-
- make
- -j4
- check.sam
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- install
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- cmake
- ..
- true
- false
- true
-
-
- make
- -j2
- testGaussianFactor.run
- true
- true
- true
-
-
- make
- -j5
- testCal3Bundler.run
- true
- true
- true
-
-
- make
- -j5
- testCal3DS2.run
- true
- true
- true
-
-
- make
- -j5
- testCalibratedCamera.run
- true
- true
- true
-
-
- make
- -j5
- testEssentialMatrix.run
- true
- true
- true
-
-
- make
- -j1 VERBOSE=1
- testHomography2.run
- true
- false
- true
-
-
- make
- -j5
- testPinholeCamera.run
- true
- true
- true
-
-
- make
- -j5
- testPoint2.run
- true
- true
- true
-
-
- make
- -j5
- testPoint3.run
- true
- true
- true
-
-
- make
- -j5
- testPose2.run
- true
- true
- true
-
-
- make
- -j5
- testPose3.run
- true
- true
- true
-
-
- make
- -j5
- testRot3M.run
- true
- true
- true
-
-
- make
- -j5
- testSphere2.run
- true
- true
- true
-
-
- make
- -j5
- testStereoCamera.run
- true
- true
- true
-
-
- make
- -j5
- testCal3Unified.run
- true
- true
- true
-
-
- make
- -j5
- testRot2.run
- true
- true
- true
-
-
- make
- -j5
- testRot3Q.run
- true
- true
- true
-
-
- make
- -j5
- testRot3.run
- true
- true
- true
-
-
- make
- -j4
- testSO3.run
- true
- true
- true
-
-
- make
- -j4
- testQuaternion.run
- true
- true
- true
-
-
- make
- -j4
- testOrientedPlane3.run
- true
- true
- true
-
-
- make
- -j4
- testPinholePose.run
- true
- true
- true
-
-
- make
- -j4
- testCyclic.run
- true
- true
- true
-
-
- make
- -j4
- testUnit3.run
- true
- true
- true
-
-
- make
- -j4
- testBearingRange.run
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j5
- all
- true
- false
- true
-
-
- make
- -j5
- check
- true
- false
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- clean all
- true
- true
- true
-
-
- make
- -j1
- testDiscreteBayesTree.run
- true
- false
- true
-
-
- make
- -j5
- testDiscreteConditional.run
- true
- true
- true
-
-
- make
- -j5
- testDiscreteFactor.run
- true
- true
- true
-
-
- make
- -j5
- testDiscreteFactorGraph.run
- true
- true
- true
-
-
- make
- -j5
- testDiscreteMarginals.run
- true
- true
- true
-
-
- make
- -j5
- testIMUSystem.run
- true
- true
- true
-
-
- make
- -j5
- testPoseRTV.run
- true
- true
- true
-
-
- make
- -j5
- testVelocityConstraint.run
- true
- true
- true
-
-
- make
- -j5
- testVelocityConstraint3.run
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- timeCalibratedCamera.run
- true
- true
- true
-
-
- make
- -j2
- timeRot3.run
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j5
- testWrap.run
- true
- true
- true
-
-
- make
- -j5
- testSpirit.run
- true
- true
- true
-
-
- make
- -j5
- check.wrap
- true
- true
- true
-
-
- make
- -j5
- testMethod.run
- true
- true
- true
-
-
- make
- -j5
- testClass.run
- true
- true
- true
-
-
- make
- -j4
- testType.run
- true
- true
- true
-
-
- make
- -j4
- testArgument.run
- true
- true
- true
-
-
- make
- -j4
- testReturnValue.run
- true
- true
- true
-
-
- make
- -j4
- testTemplate.run
- true
- true
- true
-
-
- make
- -j4
- testGlobalFunction.run
- true
- true
- true
-
-
- make
- -j5
- schedulingExample.run
- true
- true
- true
-
-
- make
- -j5
- schedulingQuals12.run
- true
- true
- true
-
-
- make
- -j5
- schedulingQuals13.run
- true
- true
- true
-
-
- make
- -j5
- testCSP.run
- true
- true
- true
-
-
- make
- -j5
- testScheduler.run
- true
- true
- true
-
-
- make
- -j5
- testSudoku.run
- true
- true
- true
-
-
- make
- -j2
- vSFMexample.run
- true
- true
- true
-
-
- make
- -j2
- testVSLAMGraph
- true
- true
- true
-
-
- make
- -j5
- testMatrix.run
- true
- true
- true
-
-
- make
- -j5
- testVector.run
- true
- true
- true
-
-
- make
- -j5
- testNumericalDerivative.run
- true
- true
- true
-
-
- make
- -j5
- testVerticalBlockMatrix.run
- true
- true
- true
-
-
- make
- -j4
- testOptionalJacobian.run
- true
- true
- true
-
-
- make
- -j4
- testGroup.run
- true
- true
- true
-
-
- make
- -j5
- check.tests
- true
- true
- true
-
-
- make
- -j2
- timeGaussianFactorGraph.run
- true
- true
- true
-
-
- make
- -j5
- testMarginals.run
- true
- true
- true
-
-
- make
- -j5
- testGaussianISAM2.run
- true
- true
- true
-
-
- make
- -j5
- testSymbolicFactorGraphB.run
- true
- true
- true
-
-
- make
- -j2
- timeSequentialOnDataset.run
- true
- true
- true
-
-
- make
- -j5
- testGradientDescentOptimizer.run
- true
- true
- true
-
-
- make
- -j2
- testGaussianFactor.run
- true
- true
- true
-
-
- make
- -j2
- testNonlinearOptimizer.run
- true
- true
- true
-
-
- make
- -j2
- testGaussianBayesNet.run
- true
- true
- true
-
-
- make
- -j2
- testNonlinearISAM.run
- true
- true
- true
-
-
- make
- -j2
- testNonlinearEquality.run
- true
- true
- true
-
-
- make
- -j2
- testExtendedKalmanFilter.run
- true
- true
- true
-
-
- make
- -j5
- timing.tests
- true
- true
- true
-
-
- make
- -j5
- testNonlinearFactor.run
- true
- true
- true
-
-
- make
- -j5
- clean
- true
- true
- true
-
-
- make
- -j5
- testGaussianJunctionTreeB.run
- true
- true
- true
-
-
- make
-
- testGraph.run
- true
- false
- true
-
-
- make
-
- testJunctionTree.run
- true
- false
- true
-
-
- make
-
- testSymbolicBayesNetB.run
- true
- false
- true
-
-
- make
- -j5
- testGaussianISAM.run
- true
- true
- true
-
-
- make
- -j5
- testDoglegOptimizer.run
- true
- true
- true
-
-
- make
- -j5
- testNonlinearFactorGraph.run
- true
- true
- true
-
-
- make
- -j5
- testIterative.run
- true
- true
- true
-
-
- make
- -j5
- testSubgraphSolver.run
- true
- true
- true
-
-
- make
- -j5
- testGaussianFactorGraphB.run
- true
- true
- true
-
-
- make
- -j5
- testSummarization.run
- true
- true
- true
-
-
- make
- -j5
- testManifold.run
- true
- true
- true
-
-
- make
- -j4
- testLie.run
- true
- true
- true
-
-
- make
- -j4
- testSerializationSLAM.run
- true
- true
- true
-
-
- make
- -j5
- testParticleFactor.run
- true
- true
- true
-
-
- make
- -j2
- testGaussianFactor.run
- true
- true
- true
-
-
- make
- -j2
- install
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j5
- testAntiFactor.run
- true
- true
- true
-
-
- make
- -j5
- testPriorFactor.run
- true
- true
- true
-
-
- make
- -j5
- testDataset.run
- true
- true
- true
-
-
- make
- -j5
- testEssentialMatrixFactor.run
- true
- true
- true
-
-
- make
- -j5
- testGeneralSFMFactor_Cal3Bundler.run
- true
- true
- true
-
-
- make
- -j5
- testGeneralSFMFactor.run
- true
- true
- true
-
-
- make
- -j5
- testProjectionFactor.run
- true
- true
- true
-
-
- make
- -j5
- testRotateFactor.run
- true
- true
- true
-
-
- make
- -j5
- testPoseRotationPrior.run
- true
- true
- true
-
-
- make
- -j5
- testImplicitSchurFactor.run
- true
- true
- true
-
-
- make
- -j4
- testOrientedPlane3Factor.run
- true
- true
- true
-
-
- make
- -j4
- testSmartProjectionPoseFactor.run
- true
- true
- true
-
-
- make
- -j4
- testInitializePose3.run
- true
- true
- true
-
-
- make
- -j2
- SimpleRotation.run
- true
- true
- true
-
-
- make
- -j5
- CameraResectioning.run
- true
- true
- true
-
-
- make
- -j5
- PlanarSLAMExample.run
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- easyPoint2KalmanFilter.run
- true
- true
- true
-
-
- make
- -j2
- elaboratePoint2KalmanFilter.run
- true
- true
- true
-
-
- make
- -j5
- Pose2SLAMExample.run
- true
- true
- true
-
-
- make
- -j2
- Pose2SLAMwSPCG_easy.run
- true
- true
- true
-
-
- make
- -j5
- UGM_small.run
- true
- true
- true
-
-
- make
- -j5
- LocalizationExample.run
- true
- true
- true
-
-
- make
- -j5
- OdometryExample.run
- true
- true
- true
-
-
- make
- -j5
- RangeISAMExample_plaza2.run
- true
- true
- true
-
-
- make
- -j5
- SelfCalibrationExample.run
- true
- true
- true
-
-
- make
- -j5
- SFMExample.run
- true
- true
- true
-
-
- make
- -j5
- VisualISAMExample.run
- true
- true
- true
-
-
- make
- -j5
- VisualISAM2Example.run
- true
- true
- true
-
-
- make
- -j5
- Pose2SLAMExample_graphviz.run
- true
- true
- true
-
-
- make
- -j5
- Pose2SLAMExample_graph.run
- true
- true
- true
-
-
- make
- -j5
- SFMExample_bal.run
- true
- true
- true
-
-
- make
- -j5
- Pose2SLAMExample_lago.run
- true
- true
- true
-
-
- make
- -j5
- Pose2SLAMExample_g2o.run
- true
- true
- true
-
-
- make
- -j5
- SFMExample_SmartFactor.run
- true
- true
- true
-
-
- make
- -j4
- Pose2SLAMExampleExpressions.run
- true
- true
- true
-
-
- make
- -j4
- SFMExampleExpressions.run
- true
- true
- true
-
-
- make
- -j4
- SFMExampleExpressions_bal.run
- true
- true
- true
-
-
- make
- -j5
- testLago.run
- true
- true
- true
-
-
- make
- -j5
- testLinearContainerFactor.run
- true
- true
- true
-
-
- make
- -j5
- testOrdering.run
- true
- true
- true
-
-
- make
- -j5
- testValues.run
- true
- true
- true
-
-
- make
- -j5
- testWhiteNoiseFactor.run
- true
- true
- true
-
-
- make
- -j4
- testExpression.run
- true
- true
- true
-
-
- make
- -j4
- testAdaptAutoDiff.run
- true
- true
- true
-
-
- make
- -j4
- testCallRecord.run
- true
- true
- true
-
-
- make
- -j4
- testExpressionFactor.run
- true
- true
- true
-
-
- make
- -j4
- testExecutionTrace.run
- true
- true
- true
-
-
- make
- -j4
- testSerializationNonlinear.run
- true
- true
- true
-
-
- make
- -j4
- testImuFactor.run
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- tests/testGaussianISAM2
- true
- false
- true
-
-
- make
- -j5
- timeCalibratedCamera.run
- true
- true
- true
-
-
- make
- -j5
- timePinholeCamera.run
- true
- true
- true
-
-
- make
- -j5
- timeStereoCamera.run
- true
- true
- true
-
-
- make
- -j5
- timeLago.run
- true
- true
- true
-
-
- make
- -j5
- timePose3.run
- true
- true
- true
-
-
- make
- -j4
- timeAdaptAutoDiff.run
- true
- true
- true
-
-
- make
- -j4
- timeCameraExpression.run
- true
- true
- true
-
-
- make
- -j4
- timeOneCameraExpression.run
- true
- true
- true
-
-
- make
- -j4
- timeSFMExpressions.run
- true
- true
- true
-
-
- make
- -j4
- timeIncremental.run
- true
- true
- true
-
-
- make
- -j4
- timeSchurFactors.run
- true
- true
- true
-
-
- make
- -j4
- timeRot2.run
- true
- true
- true
-
-
- make
- -j2
- testRot3.run
- true
- true
- true
-
-
- make
- -j2
- testRot2.run
- true
- true
- true
-
-
- make
- -j2
- testPose3.run
- true
- true
- true
-
-
- make
- -j2
- timeRot3.run
- true
- true
- true
-
-
- make
- -j2
- testPose2.run
- true
- true
- true
-
-
- make
- -j2
- testCal3_S2.run
- true
- true
- true
-
-
- make
- -j2
- testSimpleCamera.run
- true
- true
- true
-
-
- make
- -j2
- testHomography2.run
- true
- true
- true
-
-
- make
- -j2
- testCalibratedCamera.run
- true
- true
- true
-
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- testPoint2.run
- true
- true
- true
-
-
- make
- -j4
- testBearingFactor.run
- true
- true
- true
-
-
- make
- -j4
- testRangeFactor.run
- true
- true
- true
-
-
- make
- -j4
- testBearingRangeFactor.run
- true
- true
- true
-
-
- make
- -j5
- wrap
- true
- true
- true
-
-
-
-
diff --git a/.github/scripts/python.sh b/.github/scripts/python.sh
new file mode 100644
index 000000000..a71e14c97
--- /dev/null
+++ b/.github/scripts/python.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+##########################################################
+# Build and test the GTSAM Python wrapper.
+##########################################################
+
+set -x -e
+
+# install TBB with _debug.so files
+function install_tbb()
+{
+ TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download
+ TBB_VERSION=4.4.5
+ TBB_DIR=tbb44_20160526oss
+ TBB_SAVEPATH="/tmp/tbb.tgz"
+
+ if [ "$(uname)" == "Linux" ]; then
+ OS_SHORT="lin"
+ TBB_LIB_DIR="intel64/gcc4.4"
+ SUDO="sudo"
+
+ elif [ "$(uname)" == "Darwin" ]; then
+ OS_SHORT="osx"
+ TBB_LIB_DIR=""
+ SUDO=""
+
+ fi
+
+ wget "${TBB_BASEURL}/${TBB_VERSION}/${TBB_DIR}_${OS_SHORT}.tgz" -O $TBB_SAVEPATH
+ tar -C /tmp -xf $TBB_SAVEPATH
+
+ TBBROOT=/tmp/$TBB_DIR
+ # Copy the needed files to the correct places.
+ # This works correctly for CI builds, instead of setting path variables.
+ # This is what Homebrew does to install TBB on Macs
+ $SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/
+ $SUDO cp -R $TBBROOT/include/ /usr/local/include/
+
+}
+
+if [ -z ${PYTHON_VERSION+x} ]; then
+ echo "Please provide the Python version to build against!"
+ exit 127
+fi
+
+PYTHON="python${PYTHON_VERSION}"
+
+if [[ $(uname) == "Darwin" ]]; then
+ brew install wget
+else
+ # Install a system package required by our library
+ sudo apt-get install -y wget libicu-dev python3-pip python3-setuptools
+fi
+
+PATH=$PATH:$($PYTHON -c "import site; print(site.USER_BASE)")/bin
+
+[ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
+
+
+BUILD_PYBIND="ON"
+TYPEDEF_POINTS_TO_VECTORS="ON"
+
+sudo $PYTHON -m pip install -r $GITHUB_WORKSPACE/python/requirements.txt
+
+mkdir $GITHUB_WORKSPACE/build
+cd $GITHUB_WORKSPACE/build
+
+cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=Release \
+ -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \
+ -DGTSAM_USE_QUATERNIONS=OFF \
+ -DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
+ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
+ -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
+ -DGTSAM_BUILD_PYTHON=${BUILD_PYBIND} \
+ -DGTSAM_TYPEDEF_POINTS_TO_VECTORS=${TYPEDEF_POINTS_TO_VECTORS} \
+ -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
+ -DPYTHON_EXECUTABLE:FILEPATH=$(which $PYTHON) \
+ -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=OFF \
+ -DCMAKE_INSTALL_PREFIX=$GITHUB_WORKSPACE/gtsam_install
+
+make -j$(nproc) install
+
+
+cd $GITHUB_WORKSPACE/build/python
+$PYTHON setup.py install --user --prefix=
+cd $GITHUB_WORKSPACE/python/gtsam/tests
+$PYTHON -m unittest discover
diff --git a/.travis.sh b/.github/scripts/unix.sh
old mode 100755
new mode 100644
similarity index 69%
rename from .travis.sh
rename to .github/scripts/unix.sh
index 9fc09a3f8..55a8ac372
--- a/.travis.sh
+++ b/.github/scripts/unix.sh
@@ -1,20 +1,25 @@
#!/bin/bash
+##########################################################
+# Build and test GTSAM for *nix based systems.
+# Specifically Linux and macOS.
+##########################################################
+
# install TBB with _debug.so files
function install_tbb()
{
TBB_BASEURL=https://github.com/oneapi-src/oneTBB/releases/download
- TBB_VERSION=4.4.2
- TBB_DIR=tbb44_20151115oss
+ TBB_VERSION=4.4.5
+ TBB_DIR=tbb44_20160526oss
TBB_SAVEPATH="/tmp/tbb.tgz"
- if [ "$TRAVIS_OS_NAME" == "linux" ]; then
+ if [ "$(uname)" == "Linux" ]; then
OS_SHORT="lin"
TBB_LIB_DIR="intel64/gcc4.4"
SUDO="sudo"
- elif [ "$TRAVIS_OS_NAME" == "osx" ]; then
- OS_SHORT="lin"
+ elif [ "$(uname)" == "Darwin" ]; then
+ OS_SHORT="osx"
TBB_LIB_DIR=""
SUDO=""
@@ -25,7 +30,7 @@ function install_tbb()
TBBROOT=/tmp/$TBB_DIR
# Copy the needed files to the correct places.
- # This works correctly for travis builds, instead of setting path variables.
+ # This works correctly for CI builds, instead of setting path variables.
# This is what Homebrew does to install TBB on Macs
$SUDO cp -R $TBBROOT/lib/$TBB_LIB_DIR/* /usr/local/lib/
$SUDO cp -R $TBBROOT/include/ /usr/local/include/
@@ -38,15 +43,14 @@ function configure()
set -e # Make sure any error makes the script to return an error code
set -x # echo
- SOURCE_DIR=`pwd`
- BUILD_DIR=build
+ SOURCE_DIR=$GITHUB_WORKSPACE
+ BUILD_DIR=$GITHUB_WORKSPACE/build
#env
- git clean -fd || true
rm -fr $BUILD_DIR || true
mkdir $BUILD_DIR && cd $BUILD_DIR
- install_tbb
+ [ "${GTSAM_WITH_TBB:-OFF}" = "ON" ] && install_tbb
if [ ! -z "$GCC_VERSION" ]; then
export CC=gcc-$GCC_VERSION
@@ -59,11 +63,15 @@ function configure()
-DGTSAM_BUILD_TESTS=${GTSAM_BUILD_TESTS:-OFF} \
-DGTSAM_BUILD_UNSTABLE=${GTSAM_BUILD_UNSTABLE:-ON} \
-DGTSAM_WITH_TBB=${GTSAM_WITH_TBB:-OFF} \
- -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
-DGTSAM_BUILD_EXAMPLES_ALWAYS=${GTSAM_BUILD_EXAMPLES_ALWAYS:-ON} \
- -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=${GTSAM_ALLOW_DEPRECATED_SINCE_V4:-OFF} \
+ -DGTSAM_ALLOW_DEPRECATED_SINCE_V41=${GTSAM_ALLOW_DEPRECATED_SINCE_V41:-OFF} \
+ -DGTSAM_USE_QUATERNIONS=${GTSAM_USE_QUATERNIONS:-OFF} \
+ -DGTSAM_ROT3_EXPMAP=${GTSAM_ROT3_EXPMAP:-ON} \
+ -DGTSAM_POSE3_EXPMAP=${GTSAM_POSE3_EXPMAP:-ON} \
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
- -DCMAKE_VERBOSE_MAKEFILE=ON
+ -DBOOST_ROOT=$BOOST_ROOT \
+ -DBoost_NO_SYSTEM_PATHS=ON \
+ -DBoost_ARCHITECTURE=-x64
}
@@ -71,7 +79,7 @@ function configure()
function finish ()
{
# Print ccache stats
- ccache -s
+ [ -x "$(command -v ccache)" ] && ccache -s
cd $SOURCE_DIR
}
@@ -111,4 +119,4 @@ case $1 in
-t)
test
;;
-esac
+esac
\ No newline at end of file
diff --git a/.github/workflows/build-linux.yml b/.github/workflows/build-linux.yml
new file mode 100644
index 000000000..be1da35bb
--- /dev/null
+++ b/.github/workflows/build-linux.yml
@@ -0,0 +1,85 @@
+name: Linux CI
+
+on: [push, pull_request]
+
+jobs:
+ build:
+ name: ${{ matrix.name }} ${{ matrix.build_type }}
+ runs-on: ${{ matrix.os }}
+
+ env:
+ CTEST_OUTPUT_ON_FAILURE: ON
+ CTEST_PARALLEL_LEVEL: 2
+ CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
+ GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
+
+ strategy:
+ fail-fast: false
+ matrix:
+ # Github Actions requires a single row to be added to the build matrix.
+ # See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
+ name: [
+ ubuntu-18.04-gcc-5,
+ ubuntu-18.04-gcc-9,
+ ubuntu-18.04-clang-9,
+ ]
+
+ build_type: [Debug, Release]
+ build_unstable: [ON]
+ include:
+ - name: ubuntu-18.04-gcc-5
+ os: ubuntu-18.04
+ compiler: gcc
+ version: "5"
+
+ - name: ubuntu-18.04-gcc-9
+ os: ubuntu-18.04
+ compiler: gcc
+ version: "9"
+
+ - name: ubuntu-18.04-clang-9
+ os: ubuntu-18.04
+ compiler: clang
+ version: "9"
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@master
+ - name: Install (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ # LLVM (clang) 9 is not in Bionic's repositories so we add the official LLVM repository.
+ if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
+ # (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool
+ # ipv4 avoids potential timeouts because of crappy IPv6 infrastructure
+ # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository
+ # This key is not in the keystore by default for Ubuntu so we need to add it.
+ LLVM_KEY=15CF4D18AF4F7421
+ gpg --keyserver ipv4.pool.sks-keyservers.net --recv-key $LLVM_KEY || gpg --keyserver ha.pool.sks-keyservers.net --recv-key $LLVM_KEY
+ gpg -a --export $LLVM_KEY | sudo apt-key add -
+ sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
+ fi
+ sudo apt-get -y update
+
+ sudo apt install cmake build-essential pkg-config libpython-dev python-numpy
+
+ echo "BOOST_ROOT=$(echo $BOOST_ROOT_1_72_0)" >> $GITHUB_ENV
+ echo "LD_LIBRARY_PATH=$(echo $BOOST_ROOT_1_72_0/lib)" >> $GITHUB_ENV
+
+ if [ "${{ matrix.compiler }}" = "gcc" ]; then
+ sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
+ echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
+ echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
+ else
+ sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
+ echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV
+ echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV
+ fi
+ - name: Check Boost version
+ if: runner.os == 'Linux'
+ run: |
+ echo "BOOST_ROOT = $BOOST_ROOT"
+ - name: Build and Test (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ bash .github/scripts/unix.sh -t
diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml
new file mode 100644
index 000000000..69873980a
--- /dev/null
+++ b/.github/workflows/build-macos.yml
@@ -0,0 +1,53 @@
+name: macOS CI
+
+on: [pull_request]
+
+jobs:
+ build:
+ name: ${{ matrix.name }} ${{ matrix.build_type }}
+ runs-on: ${{ matrix.os }}
+
+ env:
+ CTEST_OUTPUT_ON_FAILURE: ON
+ CTEST_PARALLEL_LEVEL: 2
+ CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
+ GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
+ strategy:
+ fail-fast: false
+ matrix:
+ # Github Actions requires a single row to be added to the build matrix.
+ # See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
+ name: [
+ macOS-10.15-xcode-11.3.1,
+ ]
+
+ build_type: [Debug, Release]
+ build_unstable: [ON]
+ include:
+ - name: macOS-10.15-xcode-11.3.1
+ os: macOS-10.15
+ compiler: xcode
+ version: "11.3.1"
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@master
+ - name: Install (macOS)
+ if: runner.os == 'macOS'
+ run: |
+ brew tap ProfFan/robotics
+ brew install cmake ninja
+ brew install ProfFan/robotics/boost
+ if [ "${{ matrix.compiler }}" = "gcc" ]; then
+ brew install gcc@${{ matrix.version }}
+ echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
+ echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
+ else
+ sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
+ echo "CC=clang" >> $GITHUB_ENV
+ echo "CXX=clang++" >> $GITHUB_ENV
+ fi
+ - name: Build and Test (macOS)
+ if: runner.os == 'macOS'
+ run: |
+ bash .github/scripts/unix.sh -t
diff --git a/.github/workflows/build-python.yml b/.github/workflows/build-python.yml
new file mode 100644
index 000000000..3f9a2e98a
--- /dev/null
+++ b/.github/workflows/build-python.yml
@@ -0,0 +1,113 @@
+name: Python CI
+
+on: [pull_request]
+
+jobs:
+ build:
+ name: ${{ matrix.name }} ${{ matrix.build_type }} Python ${{ matrix.python_version }}
+ runs-on: ${{ matrix.os }}
+
+ env:
+ CTEST_OUTPUT_ON_FAILURE: ON
+ CTEST_PARALLEL_LEVEL: 2
+ CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
+ PYTHON_VERSION: ${{ matrix.python_version }}
+ strategy:
+ fail-fast: false
+ matrix:
+ # Github Actions requires a single row to be added to the build matrix.
+ # See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
+ name: [
+ ubuntu-18.04-gcc-5,
+ ubuntu-18.04-gcc-9,
+ ubuntu-18.04-clang-9,
+ macOS-10.15-xcode-11.3.1,
+ ubuntu-18.04-gcc-5-tbb,
+ ]
+
+ build_type: [Debug, Release]
+ python_version: [3]
+ include:
+ - name: ubuntu-18.04-gcc-5
+ os: ubuntu-18.04
+ compiler: gcc
+ version: "5"
+
+ - name: ubuntu-18.04-gcc-9
+ os: ubuntu-18.04
+ compiler: gcc
+ version: "9"
+
+ - name: ubuntu-18.04-clang-9
+ os: ubuntu-18.04
+ compiler: clang
+ version: "9"
+
+ - name: macOS-10.15-xcode-11.3.1
+ os: macOS-10.15
+ compiler: xcode
+ version: "11.3.1"
+
+ - name: ubuntu-18.04-gcc-5-tbb
+ os: ubuntu-18.04
+ compiler: gcc
+ version: "5"
+ flag: tbb
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@master
+ - name: Install (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
+ # (ipv4|ha).pool.sks-keyservers.net is the SKS GPG global keyserver pool
+ # ipv4 avoids potential timeouts because of crappy IPv6 infrastructure
+ # 15CF4D18AF4F7421 is the GPG key for the LLVM apt repository
+ # This key is not in the keystore by default for Ubuntu so we need to add it.
+ LLVM_KEY=15CF4D18AF4F7421
+ gpg --keyserver ipv4.pool.sks-keyservers.net --recv-key $LLVM_KEY || gpg --keyserver ha.pool.sks-keyservers.net --recv-key $LLVM_KEY
+ gpg -a --export $LLVM_KEY | sudo apt-key add -
+ sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
+ fi
+ sudo apt-get -y update
+
+ sudo apt install cmake build-essential pkg-config libpython-dev python-numpy libboost-all-dev
+
+ if [ "${{ matrix.compiler }}" = "gcc" ]; then
+ sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
+ echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
+ echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
+ else
+ sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
+ echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV
+ echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV
+ fi
+ - name: Install (macOS)
+ if: runner.os == 'macOS'
+ run: |
+ brew tap ProfFan/robotics
+ brew install cmake ninja
+ brew install ProfFan/robotics/boost
+ if [ "${{ matrix.compiler }}" = "gcc" ]; then
+ brew install gcc@${{ matrix.version }}
+ echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
+ echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
+ else
+ sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
+ echo "CC=clang" >> $GITHUB_ENV
+ echo "CXX=clang++" >> $GITHUB_ENV
+ fi
+ - name: Set GTSAM_WITH_TBB Flag
+ if: matrix.flag == 'tbb'
+ run: |
+ echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV
+ echo "GTSAM Uses TBB"
+ - name: Build (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ bash .github/scripts/python.sh
+ - name: Build (macOS)
+ if: runner.os == 'macOS'
+ run: |
+ bash .github/scripts/python.sh
diff --git a/.github/workflows/build-special.yml b/.github/workflows/build-special.yml
new file mode 100644
index 000000000..532f917c1
--- /dev/null
+++ b/.github/workflows/build-special.yml
@@ -0,0 +1,128 @@
+name: Special Cases CI
+
+on: [pull_request]
+
+jobs:
+ build:
+ name: ${{ matrix.name }} ${{ matrix.build_type }}
+ runs-on: ${{ matrix.os }}
+
+ env:
+ CTEST_OUTPUT_ON_FAILURE: ON
+ CTEST_PARALLEL_LEVEL: 2
+ CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
+ GTSAM_BUILD_UNSTABLE: ON
+
+ strategy:
+ fail-fast: false
+
+ matrix:
+ # Github Actions requires a single row to be added to the build matrix.
+ # See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
+ name:
+ [
+ ubuntu-gcc-deprecated,
+ ubuntu-gcc-quaternions,
+ ubuntu-gcc-tbb,
+ ubuntu-cayleymap,
+ ]
+
+ build_type: [Debug, Release]
+
+ include:
+ - name: ubuntu-gcc-deprecated
+ os: ubuntu-18.04
+ compiler: gcc
+ version: "9"
+ flag: deprecated
+
+ - name: ubuntu-gcc-quaternions
+ os: ubuntu-18.04
+ compiler: gcc
+ version: "9"
+ flag: quaternions
+
+ - name: ubuntu-gcc-tbb
+ os: ubuntu-18.04
+ compiler: gcc
+ version: "9"
+ flag: tbb
+
+ - name: ubuntu-cayleymap
+ os: ubuntu-18.04
+ compiler: gcc
+ version: "9"
+ flag: cayley
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@master
+
+ - name: Install (Linux)
+ if: runner.os == 'Linux'
+ run: |
+ # LLVM 9 is not in Bionic's repositories so we add the official LLVM repository.
+ if [ "${{ matrix.compiler }}" = "clang" ] && [ "${{ matrix.version }}" = "9" ]; then
+ gpg --keyserver pool.sks-keyservers.net --recv-key 15CF4D18AF4F7421
+ gpg -a --export 15CF4D18AF4F7421 | sudo apt-key add -
+ sudo add-apt-repository "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main"
+ fi
+ sudo apt-get -y update
+
+ sudo apt install cmake build-essential pkg-config libpython-dev python-numpy
+
+ echo "BOOST_ROOT=$(echo $BOOST_ROOT_1_72_0)" >> $GITHUB_ENV
+ echo "LD_LIBRARY_PATH=$(echo $BOOST_ROOT_1_72_0/lib)" >> $GITHUB_ENV
+
+ if [ "${{ matrix.compiler }}" = "gcc" ]; then
+ sudo apt-get install -y g++-${{ matrix.version }} g++-${{ matrix.version }}-multilib
+ echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
+ echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
+ else
+ sudo apt-get install -y clang-${{ matrix.version }} g++-multilib
+ echo "CC=clang-${{ matrix.version }}" >> $GITHUB_ENV
+ echo "CXX=clang++-${{ matrix.version }}" >> $GITHUB_ENV
+ fi
+
+ - name: Install (macOS)
+ if: runner.os == 'macOS'
+ run: |
+ brew install cmake ninja boost
+ if [ "${{ matrix.compiler }}" = "gcc" ]; then
+ brew install gcc@${{ matrix.version }}
+ echo "CC=gcc-${{ matrix.version }}" >> $GITHUB_ENV
+ echo "CXX=g++-${{ matrix.version }}" >> $GITHUB_ENV
+ else
+ sudo xcode-select -switch /Applications/Xcode_${{ matrix.version }}.app
+ echo "CC=clang" >> $GITHUB_ENV
+ echo "CXX=clang++" >> $GITHUB_ENV
+ fi
+
+ - name: Set Allow Deprecated Flag
+ if: matrix.flag == 'deprecated'
+ run: |
+ echo "GTSAM_ALLOW_DEPRECATED_SINCE_V41=ON" >> $GITHUB_ENV
+ echo "Allow deprecated since version 4.1"
+
+ - name: Set Use Quaternions Flag
+ if: matrix.flag == 'quaternions'
+ run: |
+ echo "GTSAM_USE_QUATERNIONS=ON" >> $GITHUB_ENV
+ echo "Use Quaternions for rotations"
+
+ - name: Set GTSAM_WITH_TBB Flag
+ if: matrix.flag == 'tbb'
+ run: |
+ echo "GTSAM_WITH_TBB=ON" >> $GITHUB_ENV
+ echo "GTSAM Uses TBB"
+
+ - name: Use Cayley Transform for Rot3
+ if: matrix.flag == 'cayley'
+ run: |
+ echo "GTSAM_POSE3_EXPMAP=OFF" >> $GITHUB_ENV
+ echo "GTSAM_ROT3_EXPMAP=OFF" >> $GITHUB_ENV
+ echo "GTSAM Uses Cayley map for Rot3"
+
+ - name: Build & Test
+ run: |
+ bash .github/scripts/unix.sh -t
diff --git a/.github/workflows/build-windows.yml b/.github/workflows/build-windows.yml
new file mode 100644
index 000000000..887d41972
--- /dev/null
+++ b/.github/workflows/build-windows.yml
@@ -0,0 +1,78 @@
+name: Windows CI
+
+on: [pull_request]
+
+jobs:
+ build:
+ name: ${{ matrix.name }} ${{ matrix.build_type }}
+ runs-on: ${{ matrix.os }}
+
+ env:
+ CTEST_OUTPUT_ON_FAILURE: ON
+ CTEST_PARALLEL_LEVEL: 2
+ CMAKE_BUILD_TYPE: ${{ matrix.build_type }}
+ GTSAM_BUILD_UNSTABLE: ${{ matrix.build_unstable }}
+ strategy:
+ fail-fast: false
+ matrix:
+ # Github Actions requires a single row to be added to the build matrix.
+ # See https://help.github.com/en/articles/workflow-syntax-for-github-actions.
+ name: [
+ #TODO This build keeps timing out, need to understand why.
+ # windows-2016-cl,
+ windows-2019-cl,
+ ]
+
+ build_type: [Debug, Release]
+ build_unstable: [ON]
+ include:
+
+ #TODO This build keeps timing out, need to understand why.
+ # - name: windows-2016-cl
+ # os: windows-2016
+ # compiler: cl
+
+ - name: windows-2019-cl
+ os: windows-2019
+ compiler: cl
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@master
+ - name: Install (Windows)
+ if: runner.os == 'Windows'
+ shell: powershell
+ run: |
+ Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh')
+ scoop install ninja --global
+ if ("${{ matrix.compiler }}".StartsWith("clang")) {
+ scoop install llvm --global
+ }
+ if ("${{ matrix.compiler }}" -eq "gcc") {
+ # Chocolatey GCC is broken on the windows-2019 image.
+ # See: https://github.com/DaanDeMeyer/doctest/runs/231595515
+ # See: https://github.community/t5/GitHub-Actions/Something-is-wrong-with-the-chocolatey-installed-version-of-gcc/td-p/32413
+ scoop install gcc --global
+ echo "CC=gcc" >> $GITHUB_ENV
+ echo "CXX=g++" >> $GITHUB_ENV
+ } elseif ("${{ matrix.compiler }}" -eq "clang") {
+ echo "CC=clang" >> $GITHUB_ENV
+ echo "CXX=clang++" >> $GITHUB_ENV
+ } else {
+ echo "CC=${{ matrix.compiler }}" >> $GITHUB_ENV
+ echo "CXX=${{ matrix.compiler }}" >> $GITHUB_ENV
+ }
+ # Scoop modifies the PATH so we make the modified PATH global.
+ echo "$env:PATH" >> $GITHUB_PATH
+ - name: Build (Windows)
+ if: runner.os == 'Windows'
+ run: |
+ cmake -E remove_directory build
+ echo "BOOST_ROOT_1_72_0: ${env:BOOST_ROOT_1_72_0}"
+ cmake -B build -S . -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DBOOST_ROOT="${env:BOOST_ROOT_1_72_0}" -DBOOST_INCLUDEDIR="${env:BOOST_ROOT_1_72_0}\boost\include" -DBOOST_LIBRARYDIR="${env:BOOST_ROOT_1_72_0}\lib"
+ cmake --build build --config ${{ matrix.build_type }} --target gtsam
+ cmake --build build --config ${{ matrix.build_type }} --target gtsam_unstable
+ cmake --build build --config ${{ matrix.build_type }} --target wrap
+ cmake --build build --config ${{ matrix.build_type }} --target check.base
+ cmake --build build --config ${{ matrix.build_type }} --target check.base_unstable
+ cmake --build build --config ${{ matrix.build_type }} --target check.linear
diff --git a/.github/workflows/trigger-python.yml b/.github/workflows/trigger-python.yml
index 8fad9e7ca..1e8981d99 100644
--- a/.github/workflows/trigger-python.yml
+++ b/.github/workflows/trigger-python.yml
@@ -1,8 +1,11 @@
-# This triggers Cython builds on `gtsam-manylinux-build`
+# This triggers Python builds on `gtsam-manylinux-build`
name: Trigger Python Builds
-on: push
+on:
+ push:
+ branches:
+ - develop
jobs:
- triggerCython:
+ triggerPython:
runs-on: ubuntu-latest
steps:
- name: Repository Dispatch
@@ -10,5 +13,5 @@ jobs:
with:
token: ${{ secrets.PYTHON_CI_REPO_ACCESS_TOKEN }}
repository: borglab/gtsam-manylinux-build
- event-type: cython-wrapper
+ event-type: python-wrapper
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'
diff --git a/.gitignore b/.gitignore
index 1d89cac25..cde059767 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,15 +9,10 @@
*.txt.user
*.txt.user.6d59f0c
*.pydevproject
-cython/venv
-cython/gtsam.cpp
-cython/gtsam.cpython-35m-darwin.so
-cython/gtsam.pyx
-cython/gtsam.so
-cython/gtsam_wrapper.pxd
.vscode
.env
/.vs/
/CMakeSettings.json
# for QtCreator:
CMakeLists.txt.user*
+xcode/
diff --git a/.travis.python.sh b/.travis.python.sh
deleted file mode 100644
index 1ef5799aa..000000000
--- a/.travis.python.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-set -x -e
-
-if [ -z ${PYTHON_VERSION+x} ]; then
- echo "Please provide the Python version to build against!"
- exit 127
-fi
-
-PYTHON="python${PYTHON_VERSION}"
-
-if [[ $(uname) == "Darwin" ]]; then
- brew install wget
-else
- # Install a system package required by our library
- sudo apt-get install wget libicu-dev python3-pip python3-setuptools
-fi
-
-CURRDIR=$(pwd)
-
-sudo $PYTHON -m pip install -r ./cython/requirements.txt
-
-mkdir $CURRDIR/build
-cd $CURRDIR/build
-
-cmake $CURRDIR -DCMAKE_BUILD_TYPE=Release \
- -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_UNSTABLE=ON \
- -DGTSAM_USE_QUATERNIONS=OFF \
- -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
- -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
- -DGTSAM_INSTALL_CYTHON_TOOLBOX=ON \
- -DGTSAM_PYTHON_VERSION=$PYTHON_VERSION \
- -DGTSAM_ALLOW_DEPRECATED_SINCE_V4=OFF \
- -DCMAKE_INSTALL_PREFIX=$CURRDIR/../gtsam_install
-
-make -j$(nproc) install
-
-cd $CURRDIR/../gtsam_install/cython
-
-sudo $PYTHON setup.py install
-
-cd $CURRDIR/cython/gtsam/tests
-
-$PYTHON -m unittest discover
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index ca6a426ea..000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,140 +0,0 @@
-language: cpp
-cache: ccache
-sudo: required
-dist: xenial
-
-addons:
- apt:
- sources:
- - ubuntu-toolchain-r-test
- - sourceline: 'deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main'
- key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key'
- packages:
- - g++-9
- - clang-9
- - build-essential pkg-config
- - cmake
- - libpython-dev python-numpy
- - libboost-all-dev
-
-# before_install:
- # - if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi
-
-install:
- - if [ "$TRAVIS_OS_NAME" == "osx" ]; then HOMEBREW_NO_AUTO_UPDATE=1 brew install ccache ; fi
- - if [ "$TRAVIS_OS_NAME" == "osx" ]; then export PATH="/usr/local/opt/ccache/libexec:$PATH" ; fi
-
-# We first do the compile stage specified below, then the matrix expansion specified after.
-stages:
- - compile
- - test
- - special
-
-env:
- global:
- - MAKEFLAGS="-j2"
- - CCACHE_SLOPPINESS=pch_defines,time_macros
-
-# Compile stage without building examples/tests to populate the caches.
-jobs:
-# -------- STAGE 1: COMPILE -----------
- include:
-# on Mac, GCC
- - stage: compile
- os: osx
- compiler: gcc
- env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
- script: bash .travis.sh -b
- - stage: compile
- os: osx
- compiler: gcc
- env: CMAKE_BUILD_TYPE=Release
- script: bash .travis.sh -b
-# on Mac, CLANG
- - stage: compile
- os: osx
- compiler: clang
- env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
- script: bash .travis.sh -b
- - stage: compile
- os: osx
- compiler: clang
- env: CMAKE_BUILD_TYPE=Release
- script: bash .travis.sh -b
-# on Linux, GCC
- - stage: compile
- os: linux
- compiler: gcc
- env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
- script: bash .travis.sh -b
- - stage: compile
- os: linux
- compiler: gcc
- env: CMAKE_BUILD_TYPE=Release
- script: bash .travis.sh -b
-# on Linux, CLANG
- - stage: compile
- os: linux
- compiler: clang
- env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
- script: bash .travis.sh -b
- - stage: compile
- os: linux
- compiler: clang
- env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release
- script: bash .travis.sh -b
-# on Linux, with deprecated ON to make sure that path still compiles/tests
- - stage: special
- os: linux
- compiler: clang
- env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_ALLOW_DEPRECATED_SINCE_V4=ON
- script: bash .travis.sh -b
-# on Linux, with GTSAM_WITH_TBB on to make sure GTSAM still compiles/tests
- - stage: special
- os: linux
- compiler: gcc
- env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF GTSAM_WITH_TBB=ON
- script: bash .travis.sh -t
-# -------- STAGE 2: TESTS -----------
-# on Mac, GCC
- - stage: test
- os: osx
- compiler: clang
- env: CMAKE_BUILD_TYPE=Release
- script: bash .travis.sh -t
- - stage: test
- os: osx
- compiler: clang
- env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
- script: bash .travis.sh -t
- - stage: test
- os: linux
- compiler: gcc
- env: CMAKE_BUILD_TYPE=Release
- script: bash .travis.sh -t
- - stage: test
- os: linux
- compiler: gcc
- env: CMAKE_BUILD_TYPE=Debug GTSAM_BUILD_UNSTABLE=OFF
- script: bash .travis.sh -t
- - stage: test
- os: linux
- compiler: clang
- env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release
- script: bash .travis.sh -t
-# on Linux, with quaternions ON to make sure that path still compiles/tests
- - stage: special
- os: linux
- compiler: clang
- env: CC=clang-9 CXX=clang++-9 CMAKE_BUILD_TYPE=Release GTSAM_BUILD_UNSTABLE=OFF GTSAM_USE_QUATERNIONS=ON
- script: bash .travis.sh -t
- - stage: special
- os: linux
- compiler: gcc
- env: PYTHON_VERSION=3
- script: bash .travis.python.sh
- - stage: special
- os: osx
- compiler: clang
- env: PYTHON_VERSION=3
- script: bash .travis.python.sh
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a810ac9df..0c39089c1 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -9,25 +9,23 @@ endif()
# Set the version number for the library
set (GTSAM_VERSION_MAJOR 4)
-set (GTSAM_VERSION_MINOR 0)
-set (GTSAM_VERSION_PATCH 2)
+set (GTSAM_VERSION_MINOR 1)
+set (GTSAM_VERSION_PATCH 0)
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
+set (CMAKE_PROJECT_VERSION ${GTSAM_VERSION_STRING})
+set (CMAKE_PROJECT_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
+set (CMAKE_PROJECT_VERSION_MINOR ${GTSAM_VERSION_MINOR})
+set (CMAKE_PROJECT_VERSION_PATCH ${GTSAM_VERSION_PATCH})
+
###############################################################################
# Gather information, perform checks, set defaults
-# Set the default install path to home
-#set (CMAKE_INSTALL_PREFIX ${HOME} CACHE PATH "Install prefix for library")
-
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
include(GtsamMakeConfigFile)
include(GNUInstallDirs)
-# Record the root dir for gtsam - needed during external builds, e.g., ROS
-set(GTSAM_SOURCE_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR})
-message(STATUS "GTSAM_SOURCE_ROOT_DIR: [${GTSAM_SOURCE_ROOT_DIR}]")
-
# Load build type flags and default to Debug mode
include(GtsamBuildTypes)
@@ -40,378 +38,21 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
endif()
-# See whether gtsam_unstable is available (it will be present only if we're using a git checkout)
-if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable")
- set(GTSAM_UNSTABLE_AVAILABLE 1)
-else()
- set(GTSAM_UNSTABLE_AVAILABLE 0)
-endif()
-
-# ----------------------------------------------------------------------------
-# Uninstall target, for "make uninstall"
-# ----------------------------------------------------------------------------
-configure_file(
- "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
- "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
- IMMEDIATE @ONLY)
-
-add_custom_target(uninstall
- "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake")
-
-
-###############################################################################
-# Set up options
-
-# Configurable Options
-if(GTSAM_UNSTABLE_AVAILABLE)
- option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON)
-endif()
-option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
-option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
-option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." OFF)
-option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." OFF)
-option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
-option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
-option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
-option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
-option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
-option(GTSAM_ALLOW_DEPRECATED_SINCE_V4 "Allow use of methods/functions deprecated in GTSAM 4" ON)
-option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typedef Point2 and Point3 to Eigen::Vector equivalents" OFF)
-option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
-option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
-if(NOT MSVC AND NOT XCODE_VERSION)
- option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
-endif()
-
-if(NOT MSVC AND NOT XCODE_VERSION)
- # Set the build type to upper case for downstream use
- string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER)
-
- # Set the GTSAM_BUILD_TAG variable.
- # If build type is Release, set to blank (""), else set to the build type.
- if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "RELEASE")
- set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory
- else()
- set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}")
- endif()
-endif()
-
-# Options relating to MATLAB wrapper
-# TODO: Check for matlab mex binary before handling building of binaries
-option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
-option(GTSAM_INSTALL_CYTHON_TOOLBOX "Enable/Disable installation of Cython toolbox" OFF)
-option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab/cython wrap utility (necessary for matlab/cython interface)" ON)
-set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of python to build the cython wrapper for (or Default)")
-
-# Check / set dependent variables for MATLAB wrapper
-if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND NOT GTSAM_BUILD_WRAP)
- message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX or GTSAM_INSTALL_CYTHON_TOOLBOX is enabled, please also enable GTSAM_BUILD_WRAP")
-endif()
-if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND GTSAM_BUILD_TYPE_POSTFIXES)
- set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX})
-endif()
-if(GTSAM_INSTALL_WRAP AND NOT GTSAM_BUILD_WRAP)
- message(FATAL_ERROR "GTSAM_INSTALL_WRAP is enabled, please also enable GTSAM_BUILD_WRAP")
-endif()
-
-if(GTSAM_INSTALL_MATLAB_TOOLBOX AND NOT BUILD_SHARED_LIBS)
- message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.")
-endif()
-
-if(GTSAM_INSTALL_MATLAB_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS)
- message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the MATLAB toolbox cannot deal with this yet. Please turn one of the two options off.")
-endif()
-
-if(GTSAM_INSTALL_CYTHON_TOOLBOX AND GTSAM_TYPEDEF_POINTS_TO_VECTORS)
- message(FATAL_ERROR "GTSAM_INSTALL_CYTHON_TOOLBOX and GTSAM_TYPEDEF_POINTS_TO_VECTORS are both enabled. For now, the CYTHON toolbox cannot deal with this yet. Please turn one of the two options off.")
-endif()
-
-# Flags for choosing default packaging tools
-set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator")
-set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator")
-
-###############################################################################
-# Find boost
-
-# To change the path for boost, you will need to set:
-# BOOST_ROOT: path to install prefix for boost
-# Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT
-
-if(MSVC)
- # By default, boost only builds static libraries on windows
- set(Boost_USE_STATIC_LIBS ON) # only find static libs
- # If we ever reset above on windows and, ...
- # If we use Boost shared libs, disable auto linking.
- # Some libraries, at least Boost Program Options, rely on this to export DLL symbols.
- if(NOT Boost_USE_STATIC_LIBS)
- list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK)
- endif()
- # Virtual memory range for PCH exceeded on VS2015
- if(MSVC_VERSION LESS 1910) # older than VS2017
- list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295)
- endif()
-endif()
-
-# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT()
-# or explicit instantiation will generate build errors.
-# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017
-#
-if(MSVC AND BUILD_SHARED_LIBS)
- list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
-endif()
-
-# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such.
-set(BOOST_FIND_MINIMUM_VERSION 1.43)
-set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
-
-find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS})
-
-# Required components
-if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR
- NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY)
- message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.")
-endif()
-
-option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
-# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
-set(GTSAM_BOOST_LIBRARIES
- Boost::serialization
- Boost::system
- Boost::filesystem
- Boost::thread
- Boost::date_time
- Boost::regex
-)
-if (GTSAM_DISABLE_NEW_TIMERS)
- message("WARNING: GTSAM timing instrumentation manually disabled")
- list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS)
-else()
- if(Boost_TIMER_LIBRARY)
- list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono)
- else()
- list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt
- message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.")
- endif()
-endif()
-
-###############################################################################
-# Find TBB
-find_package(TBB 4.4 COMPONENTS tbb tbbmalloc)
-
-# Set up variables if we're using TBB
-if(TBB_FOUND AND GTSAM_WITH_TBB)
- set(GTSAM_USE_TBB 1) # This will go into config.h
- if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
- set(TBB_GREATER_EQUAL_2020 1)
- else()
- set(TBB_GREATER_EQUAL_2020 0)
- endif()
- # all definitions and link requisites will go via imported targets:
- # tbb & tbbmalloc
- list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
-else()
- set(GTSAM_USE_TBB 0) # This will go into config.h
-endif()
-
-###############################################################################
-# Prohibit Timing build mode in combination with TBB
-if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
- message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
-endif()
-
-
-###############################################################################
-# Find Google perftools
-find_package(GooglePerfTools)
-
-###############################################################################
-# Support ccache, if installed
-if(NOT MSVC AND NOT XCODE_VERSION)
- find_program(CCACHE_FOUND ccache)
- if(CCACHE_FOUND)
- if(GTSAM_BUILD_WITH_CCACHE)
- set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
- set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
- else()
- set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "")
- set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "")
- endif()
- endif(CCACHE_FOUND)
-endif()
-
-###############################################################################
-# Find MKL
-find_package(MKL)
-
-if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL)
- set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h
- set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL
- list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES})
-
- # --no-as-needed is required with gcc according to the MKL link advisor
- if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
- set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed")
- endif()
-else()
- set(GTSAM_USE_EIGEN_MKL 0)
- set(EIGEN_USE_MKL_ALL 0)
-endif()
-
-###############################################################################
-# Find OpenMP (if we're also using MKL)
-find_package(OpenMP) # do this here to generate correct message if disabled
-
-if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL)
- if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP)
- set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h
- list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC ${OpenMP_CXX_FLAGS})
- endif()
-endif()
-
-
-###############################################################################
-# Option for using system Eigen or GTSAM-bundled Eigen
-### These patches only affect usage of MKL. If you want to enable MKL, you *must*
-### use our patched version of Eigen
-### See: http://eigen.tuxfamily.org/bz/show_bug.cgi?id=704 (Householder QR MKL selection)
-### http://eigen.tuxfamily.org/bz/show_bug.cgi?id=705 (Fix MKL LLT return code)
-option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF)
-option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF)
-
-# Switch for using system Eigen or GTSAM-bundled Eigen
-if(GTSAM_USE_SYSTEM_EIGEN)
- find_package(Eigen3 REQUIRED)
-
- # Use generic Eigen include paths e.g.
- set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}")
-
- # check if MKL is also enabled - can have one or the other, but not both!
- # Note: Eigen >= v3.2.5 includes our patches
- if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5))
- message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL")
- endif()
-
- # Check for Eigen version which doesn't work with MKL
- # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details.
- if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4))
- message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.")
- endif()
-
- # The actual include directory (for BUILD cmake target interface):
- set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}")
-else()
- # Use bundled Eigen include path.
- # Clear any variables set by FindEigen3
- if(EIGEN3_INCLUDE_DIR)
- set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE)
- endif()
-
- # set full path to be used by external projects
- # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in
- set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/")
-
- # The actual include directory (for BUILD cmake target interface):
- set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/")
-endif()
-
-# Detect Eigen version:
-set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h")
-if (EXISTS ${EIGEN_VER_H})
- file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION)
-
- # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc...
-
- string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}")
- string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}")
-
- string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}")
- string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}")
-
- string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}")
- string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}")
-
- set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}")
-
- message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}")
-else()
- message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`")
-endif ()
-
-if (MSVC)
- if (BUILD_SHARED_LIBS)
- # mute eigen static assert to avoid errors in shared lib
- list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
- endif()
- list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen
-endif()
-
-if (APPLE AND BUILD_SHARED_LIBS)
- # Set the default install directory on macOS
- set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib")
-endif()
-
-###############################################################################
-# Global compile options
+include(cmake/HandleBoost.cmake) # Boost
+include(cmake/HandleCCache.cmake) # ccache
+include(cmake/HandleCPack.cmake) # CPack
+include(cmake/HandleEigen.cmake) # Eigen3
+include(cmake/HandleGeneralOptions.cmake) # CMake build options
+include(cmake/HandleMKL.cmake) # MKL
+include(cmake/HandleOpenMP.cmake) # OpenMP
+include(cmake/HandlePerfTools.cmake) # Google perftools
+include(cmake/HandlePython.cmake) # Python options and commands
+include(cmake/HandleTBB.cmake) # TBB
+include(cmake/HandleUninstall.cmake) # for "make uninstall"
-# Build list of possible allocators
-set(possible_allocators "")
-if(GTSAM_USE_TBB)
- list(APPEND possible_allocators TBB)
- set(preferred_allocator TBB)
-else()
- list(APPEND possible_allocators BoostPool STL)
- set(preferred_allocator STL)
-endif()
-if(GOOGLE_PERFTOOLS_FOUND)
- list(APPEND possible_allocators tcmalloc)
-endif()
+include(cmake/HandleAllocators.cmake) # Must be after tbb, pertools
-# Check if current allocator choice is valid and set cache option
-list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid)
-if(allocator_valid EQUAL -1)
- set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE)
-else()
- set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator")
-endif()
-set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators})
-mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR)
-
-# Define compile flags depending on allocator
-if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool")
- set(GTSAM_ALLOCATOR_BOOSTPOOL 1)
-elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL")
- set(GTSAM_ALLOCATOR_STL 1)
-elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB")
- set(GTSAM_ALLOCATOR_TBB 1)
-elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc")
- set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator
- list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc")
-endif()
-
-if(MSVC)
- list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS)
- list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings
- list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code
-endif()
-
-# GCC 4.8+ complains about local typedefs which we use for shared_ptr etc.
-if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
- if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8)
- list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
- endif()
-endif()
-
-# As of XCode 7, clang also complains about this
-if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
- if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
- list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
- endif()
-endif()
-
-if(GTSAM_ENABLE_CONSISTENCY_CHECKS)
- # This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h
- list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS)
-endif()
+include(cmake/HandleGlobalBuildFlags.cmake) # Build flags
###############################################################################
# Add components
@@ -419,14 +60,16 @@ endif()
# Build CppUnitLite
add_subdirectory(CppUnitLite)
-# Build wrap
-if (GTSAM_BUILD_WRAP)
+# This is the new wrapper
+if(GTSAM_BUILD_PYTHON)
+ # Need to set this for the wrap package so we don't use the default value.
+ set(WRAP_PYTHON_VERSION ${GTSAM_PYTHON_VERSION}
+ CACHE STRING "The Python version to use for wrapping")
+
add_subdirectory(wrap)
- # suppress warning of cython line being too long
- if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-misleading-indentation")
- endif()
-endif(GTSAM_BUILD_WRAP)
+ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/wrap/cmake")
+ add_subdirectory(python)
+endif()
# Build GTSAM library
add_subdirectory(gtsam)
@@ -447,29 +90,13 @@ endif()
# Matlab toolbox
if (GTSAM_INSTALL_MATLAB_TOOLBOX)
- add_subdirectory(matlab)
+ add_subdirectory(matlab)
endif()
-# Cython wrap
-if (GTSAM_INSTALL_CYTHON_TOOLBOX)
- set(GTSAM_INSTALL_CYTHON_TOOLBOX 1)
- # Set up cache options
- set(GTSAM_CYTHON_INSTALL_PATH "" CACHE PATH "Cython toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/cython")
- if(NOT GTSAM_CYTHON_INSTALL_PATH)
- set(GTSAM_CYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/cython")
- endif()
- set(GTSAM_EIGENCY_INSTALL_PATH ${GTSAM_CYTHON_INSTALL_PATH}/gtsam_eigency)
- add_subdirectory(cython)
-else()
- set(GTSAM_INSTALL_CYTHON_TOOLBOX 0) # This will go into config.h
-endif()
-
-
# Install config and export files
GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake)
-
# Check for doxygen availability - optional dependency
find_package(Doxygen)
@@ -481,142 +108,11 @@ endif()
# CMake Tools
add_subdirectory(cmake)
-
-###############################################################################
-# Set up CPack
-set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM")
-set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology")
-set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu")
-set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md")
-set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
-set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
-set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR})
-set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH})
-set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}")
-#set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory
-#set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error
-set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$")
-set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/")
-set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/")
-set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
-#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs
-
-# Deb-package specific cpack
-set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev")
-set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.43)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)")
-
-
-###############################################################################
# Print configuration variables
-message(STATUS "===============================================================")
-message(STATUS "================ Configuration Options ======================")
-message(STATUS " CMAKE_CXX_COMPILER_ID type : ${CMAKE_CXX_COMPILER_ID}")
-message(STATUS " CMAKE_CXX_COMPILER_VERSION : ${CMAKE_CXX_COMPILER_VERSION}")
-message(STATUS " CMake version : ${CMAKE_VERSION}")
-message(STATUS " CMake generator : ${CMAKE_GENERATOR}")
-message(STATUS " CMake build tool : ${CMAKE_BUILD_TOOL}")
-message(STATUS "Build flags ")
-print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
-print_config_flag(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all' ")
-print_config_flag(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'")
-if (DOXYGEN_FOUND)
- print_config_flag(${GTSAM_BUILD_DOCS} "Build Docs ")
-endif()
-print_config_flag(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries ")
-print_config_flag(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name ")
-if(GTSAM_UNSTABLE_AVAILABLE)
- print_config_flag(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ")
-endif()
-
-if(NOT MSVC AND NOT XCODE_VERSION)
- print_config_flag(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ")
- message(STATUS " Build type : ${CMAKE_BUILD_TYPE}")
- message(STATUS " C compilation flags : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
- message(STATUS " C++ compilation flags : ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
-endif()
-
-print_build_options_for_target(gtsam)
-
-message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
-
-if(GTSAM_USE_TBB)
- message(STATUS " Use Intel TBB : Yes")
-elseif(TBB_FOUND)
- message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
-else()
- message(STATUS " Use Intel TBB : TBB not found")
-endif()
-if(GTSAM_USE_EIGEN_MKL)
- message(STATUS " Eigen will use MKL : Yes")
-elseif(MKL_FOUND)
- message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
-else()
- message(STATUS " Eigen will use MKL : MKL not found")
-endif()
-if(GTSAM_USE_EIGEN_MKL_OPENMP)
- message(STATUS " Eigen will use MKL and OpenMP : Yes")
-elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
- message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
-elseif(OPENMP_FOUND AND NOT MKL_FOUND)
- message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
-elseif(OPENMP_FOUND)
- message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
-else()
- message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
-endif()
-message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
-
-if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
- message(STATUS " Cheirality exceptions enabled : YES")
-else()
- message(STATUS " Cheirality exceptions enabled : NO")
-endif()
-
-if(NOT MSVC AND NOT XCODE_VERSION)
- if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
- message(STATUS " Build with ccache : Yes")
- elseif(CCACHE_FOUND)
- message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
- else()
- message(STATUS " Build with ccache : No")
- endif()
-endif()
-
-message(STATUS "Packaging flags ")
-message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
-message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
-
-message(STATUS "GTSAM flags ")
-print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
-print_config_flag(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
-print_config_flag(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
-print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
-print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 allowed ")
-print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
-print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
-print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
-print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
-
-message(STATUS "MATLAB toolbox flags ")
-print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ")
-
-message(STATUS "Cython toolbox flags ")
-print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
-if(GTSAM_INSTALL_CYTHON_TOOLBOX)
- message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
-endif()
-message(STATUS "===============================================================")
+include(cmake/HandlePrintConfiguration.cmake)
# Print warnings at the end
-if(GTSAM_WITH_TBB AND NOT TBB_FOUND)
- message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.")
-endif()
-if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND)
- message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.")
-endif()
-if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND)
- message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.")
-endif()
+include(cmake/HandleFinalChecks.cmake)
# Include CPack *after* all flags
include(CPack)
diff --git a/CppUnitLite/Test.h b/CppUnitLite/Test.h
index b1fb0cf08..a898c83ef 100644
--- a/CppUnitLite/Test.h
+++ b/CppUnitLite/Test.h
@@ -64,7 +64,7 @@ class Test
class testGroup##testName##Test : public Test \
{ public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, true) {} \
virtual ~testGroup##testName##Test () {};\
- void run (TestResult& result_);} \
+ void run (TestResult& result_) override;} \
testGroup##testName##Instance; \
void testGroup##testName##Test::run (TestResult& result_)
@@ -82,7 +82,7 @@ class Test
class testGroup##testName##Test : public Test \
{ public: testGroup##testName##Test () : Test (#testName "Test", __FILE__, __LINE__, false) {} \
virtual ~testGroup##testName##Test () {};\
- void run (TestResult& result_);} \
+ void run (TestResult& result_) override;} \
testGroup##testName##Instance; \
void testGroup##testName##Test::run (TestResult& result_)
diff --git a/GTSAM-Concepts.md b/GTSAM-Concepts.md
index a6cfee984..953357ede 100644
--- a/GTSAM-Concepts.md
+++ b/GTSAM-Concepts.md
@@ -72,9 +72,9 @@ A Lie group is both a manifold *and* a group. Hence, a LIE_GROUP type should imp
However, we now also need to be able to evaluate the derivatives of compose and inverse.
Hence, we have the following extra valid static functions defined in the struct `gtsam::traits`:
-* `r = traits::Compose(p,q,Hq,Hp)`
+* `r = traits::Compose(p,q,Hp,Hq)`
* `q = traits::Inverse(p,Hp)`
-* `r = traits::Between(p,q,Hq,H2p)`
+* `r = traits::Between(p,q,Hp,Hq)`
where above the *H* arguments stand for optional Jacobian arguments.
That makes it possible to create factors implementing priors (PriorFactor) or relations between two instances of a Lie group type (BetweenFactor).
diff --git a/INSTALL.md b/INSTALL.md
index b8f73f153..1fddf4df0 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -13,7 +13,7 @@ $ make install
## Important Installation Notes
1. GTSAM requires the following libraries to be installed on your system:
- - BOOST version 1.43 or greater (install through Linux repositories or MacPorts)
+ - BOOST version 1.58 or greater (install through Linux repositories or MacPorts)
- Cmake version 3.0 or higher
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
@@ -70,7 +70,34 @@ execute commands as follows for an out-of-source build:
This will build the library and unit tests, run all of the unit tests,
and then install the library itself.
-## CMake Configuration Options and Details
+# Windows Installation
+
+This section details how to build a GTSAM `.sln` file using Visual Studio.
+
+### Prerequisites
+
+- Visual Studio with C++ CMake tools for Windows
+- All the other pre-requisites listed above.
+
+### Steps
+
+1. Open Visual Studio.
+2. Select `Open a local folder` and select the GTSAM source directory.
+3. Go to `Project -> CMake Settings`.
+ - (Optional) Set `Configuration name`.
+ - (Optional) Set `Configuration type`.
+ - Set the `Toolset` to `msvc_x64_x64`. If you know what toolset you require, then skip this step.
+ - Update the `Build root` to `${projectDir}\build\${name}`.
+ - You can optionally create a new configuration for a `Release` build.
+ - Set the necessary CMake variables for your use case.
+ - Click on `Show advanced settings`.
+ - For `CMake generator`, select a version which matches `Visual Studio Win64`, e.g. `Visual Studio 16 2019 Win64`.
+ - Save the settings (Ctrl + S).
+4. Click on `Project -> Generate Cache`. This will generate the CMake build files (as seen in the Output window).
+5. The last step will generate a `GTSAM.sln` file in the `build` directory. At this point, GTSAM can be used as a regular Visual Studio project.
+
+
+# CMake Configuration Options and Details
GTSAM has a number of options that can be configured, which is best done with
one of the following:
@@ -78,7 +105,7 @@ one of the following:
- ccmake the curses GUI for cmake
- cmake-gui a real GUI for cmake
-### Important Options:
+## Important Options:
#### CMAKE_BUILD_TYPE
We support several build configurations for GTSAM (case insensitive)
@@ -173,7 +200,7 @@ NOTE: If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against g
Intel has a guide for installing MKL on Linux through APT repositories at .
After following the instructions, add the following to your `~/.bashrc` (and afterwards, open a new terminal before compiling GTSAM):
-`LD_PRELOAD` need only be set if you are building the cython wrapper to use GTSAM from python.
+`LD_PRELOAD` need only be set if you are building the python wrapper to use GTSAM from python.
```sh
source /opt/intel/mkl/bin/mklvars.sh intel64
export LD_PRELOAD="$LD_PRELOAD:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_sequential.so"
@@ -190,6 +217,6 @@ Failing to specify `LD_PRELOAD` may lead to errors such as:
`ImportError: /opt/intel/mkl/lib/intel64/libmkl_vml_avx2.so: undefined symbol: mkl_serv_getenv`
or
`Intel MKL FATAL ERROR: Cannot load libmkl_avx2.so or libmkl_def.so.`
-when importing GTSAM using the cython wrapper in python.
+when importing GTSAM using the python wrapper.
diff --git a/LICENSE b/LICENSE
index d828deb55..228a6b942 100644
--- a/LICENSE
+++ b/LICENSE
@@ -23,3 +23,5 @@ ordering library
- Included unmodified in gtsam/3rdparty/metis
- Licenced under Apache License v 2.0, provided in
gtsam/3rdparty/metis/LICENSE.txt
+- Spectra v0.9.0: Sparse Eigenvalue Computation Toolkit as a Redesigned ARPACK.
+ - Licenced under MPL2, provided at https://github.com/yixuan/spectra
diff --git a/README.md b/README.md
index 093e35f0f..60eff197a 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,11 @@
# README - Georgia Tech Smoothing and Mapping Library
+**Important Note**
+
+As of August 1 2020, the `develop` branch is officially in "Pre 4.1" mode, and features deprecated in 4.0 have been removed. Please use the last [4.0.3 release](https://github.com/borglab/gtsam/releases/tag/4.0.3) if you need those features.
+
+However, most are easily converted and can be tracked down (in 4.0.3) by disabling the cmake flag `GTSAM_ALLOW_DEPRECATED_SINCE_V4`.
+
## What is GTSAM?
GTSAM is a C++ library that implements smoothing and
@@ -7,13 +13,16 @@ mapping (SAM) in robotics and vision, using Factor Graphs and Bayes
Networks as the underlying computing paradigm rather than sparse
matrices.
-| Platform | Build Status |
-|:---------:|:-------------:|
-| gcc/clang | [![Build Status](https://travis-ci.com/borglab/gtsam.svg?branch=develop)](https://travis-ci.com/borglab/gtsam/) |
-| MSVC | [![Build status](https://ci.appveyor.com/api/projects/status/3enllitj52jsxwfg/branch/develop?svg=true)](https://ci.appveyor.com/project/dellaert/gtsam) |
+The current support matrix is:
+| Platform | Compiler | Build Status |
+|:------------:|:---------:|:-------------:|
+| Ubuntu 18.04 | gcc/clang | ![Linux CI](https://github.com/borglab/gtsam/workflows/Linux%20CI/badge.svg) |
+| macOS | clang | ![macOS CI](https://github.com/borglab/gtsam/workflows/macOS%20CI/badge.svg) |
+| Windows | MSVC | ![Windows CI](https://github.com/borglab/gtsam/workflows/Windows%20CI/badge.svg) |
-On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](##Wrappers).
+
+On top of the C++ library, GTSAM includes [wrappers for MATLAB & Python](#wrappers).
## Quickstart
@@ -31,7 +40,7 @@ $ make install
Prerequisites:
-- [Boost](http://www.boost.org/users/download/) >= 1.43 (Ubuntu: `sudo apt-get install libboost-all-dev`)
+- [Boost](http://www.boost.org/users/download/) >= 1.58 (Ubuntu: `sudo apt-get install libboost-all-dev`)
- [CMake](http://www.cmake.org/cmake/resources/software.html) >= 3.0 (Ubuntu: `sudo apt-get install cmake`)
- A modern compiler, i.e., at least gcc 4.7.3 on Linux.
@@ -44,13 +53,16 @@ Optional prerequisites - used automatically if findable by CMake:
## GTSAM 4 Compatibility
-GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. We also deprecate some legacy functionality and wrongly named methods, but by default the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 is enabled, allowing anyone to just pull V4 and compile. To build the python toolbox, however, you will have to explicitly disable that flag.
+GTSAM 4 introduces several new features, most notably Expressions and a Python toolbox. It also introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect.
+
+GTSAM 4 also deprecated some legacy functionality and wrongly named methods. If you are on a 4.0.X release, you can define the flag GTSAM_ALLOW_DEPRECATED_SINCE_V4 to use the deprecated methods.
+
+GTSAM 4.1 added a new pybind wrapper, and **removed** the deprecated functionality. There is a flag GTSAM_ALLOW_DEPRECATED_SINCE_V41 for newly deprecated methods since the 4.1 release, which is on by default, allowing anyone to just pull version 4.1 and compile.
-Also, GTSAM 4 introduces traits, a C++ technique that allows optimizing with non-GTSAM types. That opens the door to retiring geometric types such as Point2 and Point3 to pure Eigen types, which we also do. A significant change which will not trigger a compile error is that zero-initializing of Point2 and Point3 is deprecated, so please be aware that this might render functions using their default constructor incorrect.
## Wrappers
-We provide support for [MATLAB](matlab/README.md) and [Python](cython/README.md) wrappers for GTSAM. Please refer to the linked documents for more details.
+We provide support for [MATLAB](matlab/README.md) and [Python](python/README.md) wrappers for GTSAM. Please refer to the linked documents for more details.
## The Preintegrated IMU Factor
diff --git a/appveyor.yml b/appveyor.yml
deleted file mode 100644
index 2c78ca1f2..000000000
--- a/appveyor.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-# version format
-version: 4.0.2-{branch}-build{build}
-
-os: Visual Studio 2019
-
-clone_folder: c:\projects\gtsam
-
-platform: x64
-configuration: Release
-
-environment:
- CTEST_OUTPUT_ON_FAILURE: 1
- BOOST_ROOT: C:/Libraries/boost_1_71_0
-
-build_script:
- - cd c:\projects\gtsam\build
- # As of Dec 2019, not all unit tests build cleanly for MSVC, so we'll just
- # check that parts of GTSAM build correctly:
- #- cmake --build .
- - cmake --build . --config Release --target gtsam
- - cmake --build . --config Release --target gtsam_unstable
- - cmake --build . --config Release --target wrap
- #- cmake --build . --target check
- - cmake --build . --config Release --target check.base
- - cmake --build . --config Release --target check.base_unstable
- - cmake --build . --config Release --target check.linear
-
-before_build:
- - cd c:\projects\gtsam
- - mkdir build
- - cd build
- # Disable examples to avoid AppVeyor timeout
- - cmake -G "Visual Studio 16 2019" .. -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF
diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt
index d612e2fae..451ca38a4 100644
--- a/cmake/CMakeLists.txt
+++ b/cmake/CMakeLists.txt
@@ -17,11 +17,8 @@ install(FILES
GtsamBuildTypes.cmake
GtsamMakeConfigFile.cmake
GtsamMatlabWrap.cmake
- GtsamPythonWrap.cmake
- GtsamCythonWrap.cmake
GtsamTesting.cmake
GtsamPrinting.cmake
- FindCython.cmake
FindNumPy.cmake
README.html
DESTINATION "${SCRIPT_INSTALL_DIR}/GTSAMCMakeTools")
diff --git a/cmake/FindCython.cmake b/cmake/FindCython.cmake
deleted file mode 100644
index e5a32c30d..000000000
--- a/cmake/FindCython.cmake
+++ /dev/null
@@ -1,81 +0,0 @@
-# Modifed from: https://github.com/nest/nest-simulator/blob/master/cmake/FindCython.cmake
-#
-# Find the Cython compiler.
-#
-# This code sets the following variables:
-#
-# CYTHON_FOUND
-# CYTHON_PATH
-# CYTHON_EXECUTABLE
-# CYTHON_VERSION
-#
-# See also UseCython.cmake
-
-#=============================================================================
-# Copyright 2011 Kitware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#=============================================================================
-
-# Use the Cython executable that lives next to the Python executable
-# if it is a local installation.
-if(GTSAM_PYTHON_VERSION STREQUAL "Default")
- find_package(PythonInterp)
-else()
- find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT)
-endif()
-
-if ( PYTHONINTERP_FOUND )
- execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c"
- "import Cython; print(Cython.__path__[0])"
- RESULT_VARIABLE RESULT
- OUTPUT_VARIABLE CYTHON_PATH
- OUTPUT_STRIP_TRAILING_WHITESPACE
- )
-endif ()
-
-# RESULT=0 means ok
-if ( NOT RESULT )
- get_filename_component( _python_path ${PYTHON_EXECUTABLE} PATH )
- find_program( CYTHON_EXECUTABLE
- NAMES cython cython.bat cython3
- HINTS ${_python_path}
- )
-endif ()
-
-# RESULT=0 means ok
-if ( NOT RESULT )
- execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c"
- "import Cython; print(Cython.__version__)"
- RESULT_VARIABLE RESULT
- OUTPUT_VARIABLE CYTHON_VAR_OUTPUT
- ERROR_VARIABLE CYTHON_VAR_OUTPUT
- OUTPUT_STRIP_TRAILING_WHITESPACE
- )
- if ( RESULT EQUAL 0 )
- string( REGEX REPLACE ".* ([0-9]+\\.[0-9]+(\\.[0-9]+)?).*" "\\1"
- CYTHON_VERSION "${CYTHON_VAR_OUTPUT}" )
- endif ()
-endif ()
-
-include( FindPackageHandleStandardArgs )
-find_package_handle_standard_args( Cython
- FOUND_VAR
- CYTHON_FOUND
- REQUIRED_VARS
- CYTHON_PATH
- CYTHON_EXECUTABLE
- VERSION_VAR
- CYTHON_VERSION
- )
-
diff --git a/cmake/FindNumPy.cmake b/cmake/FindNumPy.cmake
index 4f5743aa6..d55a760c6 100644
--- a/cmake/FindNumPy.cmake
+++ b/cmake/FindNumPy.cmake
@@ -40,17 +40,9 @@
# Finding NumPy involves calling the Python interpreter
if(NumPy_FIND_REQUIRED)
- if(GTSAM_PYTHON_VERSION STREQUAL "Default")
- find_package(PythonInterp REQUIRED)
- else()
- find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
- endif()
+ find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
else()
- if(GTSAM_PYTHON_VERSION STREQUAL "Default")
- find_package(PythonInterp)
- else()
- find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT)
- endif()
+ find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT)
endif()
if(NOT PYTHONINTERP_FOUND)
diff --git a/cmake/GtsamBuildTypes.cmake b/cmake/GtsamBuildTypes.cmake
index 15a02b6e8..3155161be 100644
--- a/cmake/GtsamBuildTypes.cmake
+++ b/cmake/GtsamBuildTypes.cmake
@@ -1,3 +1,10 @@
+include(CheckCXXCompilerFlag) # for check_cxx_compiler_flag()
+
+# Set cmake policy to recognize the AppleClang compiler
+# independently from the Clang compiler.
+if(POLICY CMP0025)
+ cmake_policy(SET CMP0025 NEW)
+endif()
# function: list_append_cache(var [new_values ...])
# Like "list(APPEND ...)" but working for CACHE variables.
@@ -99,8 +106,27 @@ if(MSVC)
set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING /MD /O2 CACHE STRING "(User editable) Private compiler flags for Timing configuration.")
else()
# Common to all configurations, next for each configuration:
- # "-fPIC" is to ensure proper code generation for shared libraries
- set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON -Wall -fPIC CACHE STRING "(User editable) Private compiler flags for all configurations.")
+
+ if (NOT MSVC)
+ check_cxx_compiler_flag(-Wsuggest-override COMPILER_HAS_WSUGGEST_OVERRIDE)
+ check_cxx_compiler_flag(-Wmissing COMPILER_HAS_WMISSING_OVERRIDE)
+ if (COMPILER_HAS_WSUGGEST_OVERRIDE)
+ set(flag_override_ -Wsuggest-override) # -Werror=suggest-override: Add again someday
+ elseif(COMPILER_HAS_WMISSING_OVERRIDE)
+ set(flag_override_ -Wmissing-override) # -Werror=missing-override: Add again someday
+ endif()
+ endif()
+
+ set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON
+ -Wall # Enable common warnings
+ -fPIC # ensure proper code generation for shared libraries
+ $<$:-Wreturn-local-addr -Werror=return-local-addr> # Error: return local address
+ $<$:-Wreturn-stack-address -Werror=return-stack-address> # Error: return local address
+ -Wreturn-type -Werror=return-type # Error on missing return()
+ -Wformat -Werror=format-security # Error on wrong printf() arguments
+ $<$:${flag_override_}> # Enforce the use of the override keyword
+ #
+ CACHE STRING "(User editable) Private compiler flags for all configurations.")
set(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG -g -fno-inline CACHE STRING "(User editable) Private compiler flags for Debug configuration.")
set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELWITHDEBINFO -g -O3 CACHE STRING "(User editable) Private compiler flags for RelWithDebInfo configuration.")
set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELEASE -O3 CACHE STRING "(User editable) Private compiler flags for Release configuration.")
@@ -242,3 +268,17 @@ function(gtsam_apply_build_flags target_name_)
target_compile_options(${target_name_} PRIVATE ${GTSAM_COMPILE_OPTIONS_PRIVATE})
endfunction(gtsam_apply_build_flags)
+
+
+if(NOT MSVC AND NOT XCODE_VERSION)
+ # Set the build type to upper case for downstream use
+ string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_UPPER)
+
+ # Set the GTSAM_BUILD_TAG variable.
+ # If build type is Release, set to blank (""), else set to the build type.
+ if(${CMAKE_BUILD_TYPE_UPPER} STREQUAL "RELEASE")
+ set(GTSAM_BUILD_TAG "") # Don't create release mode tag on installed directory
+ else()
+ set(GTSAM_BUILD_TAG "${CMAKE_BUILD_TYPE}")
+ endif()
+endif()
diff --git a/cmake/GtsamCythonWrap.cmake b/cmake/GtsamCythonWrap.cmake
deleted file mode 100644
index f1382729f..000000000
--- a/cmake/GtsamCythonWrap.cmake
+++ /dev/null
@@ -1,281 +0,0 @@
-# Check Cython version, need to be >=0.25.2
-# Unset these cached variables to avoid surprises when the python/cython
-# in the current environment are different from the cached!
-unset(PYTHON_EXECUTABLE CACHE)
-unset(CYTHON_EXECUTABLE CACHE)
-unset(PYTHON_INCLUDE_DIR CACHE)
-unset(PYTHON_MAJOR_VERSION CACHE)
-unset(PYTHON_LIBRARY CACHE)
-
-# Allow override from command line
-if(NOT DEFINED GTSAM_USE_CUSTOM_PYTHON_LIBRARY)
- if(GTSAM_PYTHON_VERSION STREQUAL "Default")
- find_package(PythonInterp REQUIRED)
- find_package(PythonLibs REQUIRED)
- else()
- find_package(PythonInterp ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
- find_package(PythonLibs ${GTSAM_PYTHON_VERSION} EXACT REQUIRED)
- endif()
-endif()
-find_package(Cython 0.25.2 REQUIRED)
-
-execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
- "from __future__ import print_function;import sys;print(sys.version[0], end='')"
- OUTPUT_VARIABLE PYTHON_MAJOR_VERSION
-)
-
-# User-friendly Cython wrapping and installing function.
-# Builds a Cython module from the provided interface_header.
-# For example, for the interface header gtsam.h,
-# this will build the wrap module 'gtsam'.
-#
-# Arguments:
-#
-# interface_header: The relative path to the wrapper interface definition file.
-# extra_imports: extra header to import in the Cython pxd file.
-# For example, to use Cython gtsam.pxd in your own module,
-# use "from gtsam cimport *"
-# install_path: destination to install the library
-# libs: libraries to link with
-# dependencies: Dependencies which need to be built before the wrapper
-function(wrap_and_install_library_cython interface_header extra_imports install_path libs dependencies)
- # Paths for generated files
- get_filename_component(module_name "${interface_header}" NAME_WE)
- set(generated_files_path "${PROJECT_BINARY_DIR}/cython/${module_name}")
- wrap_library_cython("${interface_header}" "${generated_files_path}" "${extra_imports}" "${libs}" "${dependencies}")
- install_cython_wrapped_library("${interface_header}" "${generated_files_path}" "${install_path}")
-endfunction()
-
-function(set_up_required_cython_packages)
- # Set up building of cython module
- include_directories(${PYTHON_INCLUDE_DIRS})
- find_package(NumPy REQUIRED)
- include_directories(${NUMPY_INCLUDE_DIRS})
-endfunction()
-
-
-# Convert pyx to cpp by executing cython
-# This is the first step to compile cython from the command line
-# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html
-#
-# Arguments:
-# - target: The specified target for this step
-# - pyx_file: The input pyx_file in full *absolute* path
-# - generated_cpp: The output cpp file in full absolute path
-# - include_dirs: Directories to include when executing cython
-function(pyx_to_cpp target pyx_file generated_cpp include_dirs)
- foreach(dir ${include_dirs})
- set(includes_for_cython ${includes_for_cython} -I ${dir})
- endforeach()
-
- add_custom_command(
- OUTPUT ${generated_cpp}
- COMMAND
- ${CYTHON_EXECUTABLE} -X boundscheck=False -v --fast-fail --cplus -${PYTHON_MAJOR_VERSION} ${includes_for_cython} ${pyx_file} -o ${generated_cpp}
- VERBATIM)
- add_custom_target(${target} ALL DEPENDS ${generated_cpp})
-endfunction()
-
-# Build the cpp file generated by converting pyx using cython
-# This is the second step to compile cython from the command line
-# as described at: http://cython.readthedocs.io/en/latest/src/reference/compilation.html
-#
-# Arguments:
-# - target: The specified target for this step
-# - cpp_file: The input cpp_file in full *absolute* path
-# - output_lib_we: The output lib filename only (without extension)
-# - output_dir: The output directory
-function(build_cythonized_cpp target cpp_file output_lib_we output_dir)
- add_library(${target} MODULE ${cpp_file})
-
- if(WIN32)
- # Use .pyd extension instead of .dll on Windows
- set_target_properties(${target} PROPERTIES SUFFIX ".pyd")
-
- # Add full path to the Python library
- target_link_libraries(${target} ${PYTHON_LIBRARIES})
- endif()
-
- if(APPLE)
- set(link_flags "-undefined dynamic_lookup")
- endif()
- set_target_properties(${target}
- PROPERTIES COMPILE_FLAGS "-w"
- LINK_FLAGS "${link_flags}"
- OUTPUT_NAME ${output_lib_we}
- PREFIX ""
- ${CMAKE_BUILD_TYPE_UPPER}_POSTFIX ""
- LIBRARY_OUTPUT_DIRECTORY ${output_dir})
-endfunction()
-
-# Cythonize a pyx from the command line as described at
-# http://cython.readthedocs.io/en/latest/src/reference/compilation.html
-# Arguments:
-# - target: The specified target
-# - pyx_file: The input pyx_file in full *absolute* path
-# - output_lib_we: The output lib filename only (without extension)
-# - output_dir: The output directory
-# - include_dirs: Directories to include when executing cython
-# - libs: Libraries to link with
-# - interface_header: For dependency. Any update in interface header will re-trigger cythonize
-function(cythonize target pyx_file output_lib_we output_dir include_dirs libs interface_header dependencies)
- get_filename_component(pyx_path "${pyx_file}" DIRECTORY)
- get_filename_component(pyx_name "${pyx_file}" NAME_WE)
- set(generated_cpp "${output_dir}/${pyx_name}.cpp")
-
- set_up_required_cython_packages()
- pyx_to_cpp(${target}_pyx2cpp ${pyx_file} ${generated_cpp} "${include_dirs}")
-
- # Late dependency injection, to make sure this gets called whenever the interface header is updated
- # See: https://stackoverflow.com/questions/40032593/cmake-does-not-rebuild-dependent-after-prerequisite-changes
- add_custom_command(OUTPUT ${generated_cpp} DEPENDS ${interface_header} ${pyx_file} APPEND)
- if (NOT "${dependencies}" STREQUAL "")
- add_dependencies(${target}_pyx2cpp "${dependencies}")
- endif()
-
- build_cythonized_cpp(${target} ${generated_cpp} ${output_lib_we} ${output_dir})
- if (NOT "${libs}" STREQUAL "")
- target_link_libraries(${target} "${libs}")
- endif()
- add_dependencies(${target} ${target}_pyx2cpp)
-endfunction()
-
-# Internal function that wraps a library and compiles the wrapper
-function(wrap_library_cython interface_header generated_files_path extra_imports libs dependencies)
- # Wrap codegen interface
- # Extract module path and name from interface header file name
- # wrap requires interfacePath to be *absolute*
- get_filename_component(interface_header "${interface_header}" ABSOLUTE)
- get_filename_component(module_path "${interface_header}" PATH)
- get_filename_component(module_name "${interface_header}" NAME_WE)
-
- # Wrap module to Cython pyx
- message(STATUS "Cython wrapper generating ${module_name}.pyx")
- set(generated_pyx "${generated_files_path}/${module_name}.pyx")
- file(MAKE_DIRECTORY "${generated_files_path}")
- add_custom_command(
- OUTPUT ${generated_pyx}
- DEPENDS ${interface_header} wrap
- COMMAND
- wrap --cython ${module_path} ${module_name} ${generated_files_path} "${extra_imports}"
- VERBATIM
- WORKING_DIRECTORY ${generated_files_path}/../)
- add_custom_target(cython_wrap_${module_name}_pyx ALL DEPENDS ${generated_pyx})
- if(NOT "${dependencies}" STREQUAL "")
- add_dependencies(cython_wrap_${module_name}_pyx ${dependencies})
- endif()
-
- message(STATUS "Cythonize and build ${module_name}.pyx")
- get_property(include_dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES)
- cythonize(cythonize_${module_name} ${generated_pyx} ${module_name}
- ${generated_files_path} "${include_dirs}" "${libs}" ${interface_header} cython_wrap_${module_name}_pyx)
-
- # distclean
- add_custom_target(wrap_${module_name}_cython_distclean
- COMMAND cmake -E remove_directory ${generated_files_path})
-endfunction()
-
-# Internal function that installs a wrap toolbox
-function(install_cython_wrapped_library interface_header generated_files_path install_path)
- get_filename_component(module_name "${interface_header}" NAME_WE)
-
- # NOTE: only installs .pxd and .pyx and binary files (not .cpp) - the trailing slash on the directory name
- # here prevents creating the top-level module name directory in the destination.
- # Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH/subdirectory if there is one
- get_filename_component(location "${install_path}" PATH)
- get_filename_component(name "${install_path}" NAME)
- message(STATUS "Installing Cython Toolbox to ${location}${GTSAM_BUILD_TAG}/${name}") #${GTSAM_CYTHON_INSTALL_PATH}"
-
- if(GTSAM_BUILD_TYPE_POSTFIXES)
- foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
- string(TOUPPER "${build_type}" build_type_upper)
- if(${build_type_upper} STREQUAL "RELEASE")
- set(build_type_tag "") # Don't create release mode tag on installed directory
- else()
- set(build_type_tag "${build_type}")
- endif()
-
- install(DIRECTORY "${generated_files_path}/" DESTINATION "${location}${build_type_tag}/${name}"
- CONFIGURATIONS "${build_type}"
- PATTERN "build" EXCLUDE
- PATTERN "CMakeFiles" EXCLUDE
- PATTERN "Makefile" EXCLUDE
- PATTERN "*.cmake" EXCLUDE
- PATTERN "*.cpp" EXCLUDE
- PATTERN "*.py" EXCLUDE)
- endforeach()
- else()
- install(DIRECTORY "${generated_files_path}/" DESTINATION ${install_path}
- PATTERN "build" EXCLUDE
- PATTERN "CMakeFiles" EXCLUDE
- PATTERN "Makefile" EXCLUDE
- PATTERN "*.cmake" EXCLUDE
- PATTERN "*.cpp" EXCLUDE
- PATTERN "*.py" EXCLUDE)
- endif()
-endfunction()
-
-# Helper function to install Cython scripts and handle multiple build types where the scripts
-# should be installed to all build type toolboxes
-#
-# Arguments:
-# source_directory: The source directory to be installed. "The last component of each directory
-# name is appended to the destination directory but a trailing slash may be
-# used to avoid this because it leaves the last component empty."
-# (https://cmake.org/cmake/help/v3.3/command/install.html?highlight=install#installing-directories)
-# dest_directory: The destination directory to install to.
-# patterns: list of file patterns to install
-function(install_cython_scripts source_directory dest_directory patterns)
- set(patterns_args "")
- set(exclude_patterns "")
-
- foreach(pattern ${patterns})
- list(APPEND patterns_args PATTERN "${pattern}")
- endforeach()
- if(GTSAM_BUILD_TYPE_POSTFIXES)
- foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
- string(TOUPPER "${build_type}" build_type_upper)
- if(${build_type_upper} STREQUAL "RELEASE")
- set(build_type_tag "") # Don't create release mode tag on installed directory
- else()
- set(build_type_tag "${build_type}")
- endif()
- # Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
- get_filename_component(location "${dest_directory}" PATH)
- get_filename_component(name "${dest_directory}" NAME)
- install(DIRECTORY "${source_directory}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}"
- FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
- endforeach()
- else()
- install(DIRECTORY "${source_directory}" DESTINATION "${dest_directory}" FILES_MATCHING ${patterns_args} PATTERN "${exclude_patterns}" EXCLUDE)
- endif()
-
-endfunction()
-
-# Helper function to install specific files and handle multiple build types where the scripts
-# should be installed to all build type toolboxes
-#
-# Arguments:
-# source_files: The source files to be installed.
-# dest_directory: The destination directory to install to.
-function(install_cython_files source_files dest_directory)
-
- if(GTSAM_BUILD_TYPE_POSTFIXES)
- foreach(build_type ${CMAKE_CONFIGURATION_TYPES})
- string(TOUPPER "${build_type}" build_type_upper)
- if(${build_type_upper} STREQUAL "RELEASE")
- set(build_type_tag "") # Don't create release mode tag on installed directory
- else()
- set(build_type_tag "${build_type}")
- endif()
- # Split up filename to strip trailing '/' in GTSAM_CYTHON_INSTALL_PATH if there is one
- get_filename_component(location "${dest_directory}" PATH)
- get_filename_component(name "${dest_directory}" NAME)
- install(FILES "${source_files}" DESTINATION "${location}/${name}${build_type_tag}" CONFIGURATIONS "${build_type}")
- endforeach()
- else()
- install(FILES "${source_files}" DESTINATION "${dest_directory}")
- endif()
-
-endfunction()
-
diff --git a/cmake/GtsamMatlabWrap.cmake b/cmake/GtsamMatlabWrap.cmake
index 5fc829bf2..b76f96a4e 100644
--- a/cmake/GtsamMatlabWrap.cmake
+++ b/cmake/GtsamMatlabWrap.cmake
@@ -1,46 +1,64 @@
+# Check / set dependent variables for MATLAB wrapper
+if(GTSAM_INSTALL_MATLAB_TOOLBOX)
+ find_package(Matlab COMPONENTS MEX_COMPILER REQUIRED)
+ if(NOT Matlab_MEX_COMPILER)
+ message(FATAL_ERROR "Cannot find MEX compiler binary. Please check your Matlab installation and ensure MEX in installed as well.")
+ endif()
+
+ if(GTSAM_BUILD_TYPE_POSTFIXES)
+ set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE_UPPER}_POSTFIX})
+ endif()
+
+ if(NOT BUILD_SHARED_LIBS)
+ message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX and BUILD_SHARED_LIBS=OFF. The MATLAB wrapper cannot be compiled with a static GTSAM library because mex modules are themselves shared libraries. If you want a self-contained mex module, enable GTSAM_MEX_BUILD_STATIC_MODULE instead of BUILD_SHARED_LIBS=OFF.")
+ endif()
+endif()
+
# Set up cache options
option(GTSAM_MEX_BUILD_STATIC_MODULE "Build MATLAB wrapper statically (increases build time)" OFF)
set(GTSAM_BUILD_MEX_BINARY_FLAGS "" CACHE STRING "Extra flags for running Matlab MEX compilation")
set(GTSAM_TOOLBOX_INSTALL_PATH "" CACHE PATH "Matlab toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/gtsam_toolbox")
if(NOT GTSAM_TOOLBOX_INSTALL_PATH)
- set(GTSAM_TOOLBOX_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/gtsam_toolbox")
+ set(GTSAM_TOOLBOX_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/gtsam_toolbox")
endif()
# GTSAM_MEX_BUILD_STATIC_MODULE is not for Windows - on Windows any static
# are already compiled into the library by the linker
if(GTSAM_MEX_BUILD_STATIC_MODULE AND WIN32)
- message(FATAL_ERROR "GTSAM_MEX_BUILD_STATIC_MODULE should not be set on Windows - the linker already automatically compiles in any dependent static libraries. To create a standalone toolbox pacakge, simply ensure that CMake finds the static versions of all dependent libraries (Boost, etc).")
+ message(FATAL_ERROR "GTSAM_MEX_BUILD_STATIC_MODULE should not be set on Windows - the linker already automatically compiles in any dependent static libraries. To create a standalone toolbox pacakge, simply ensure that CMake finds the static versions of all dependent libraries (Boost, etc).")
endif()
-# Try to automatically configure mex path
-if(APPLE)
- file(GLOB matlab_bin_directories "/Applications/MATLAB*/bin")
- set(mex_program_name "mex")
-elseif(WIN32)
- file(GLOB matlab_bin_directories "C:/Program Files*/MATLAB/*/bin")
- set(mex_program_name "mex.bat")
-else()
- file(GLOB matlab_bin_directories "/usr/local/MATLAB/*/bin")
- set(mex_program_name "mex")
-endif()
-# Run find_program explicitly putting $PATH after our predefined program
-# directories using 'ENV PATH' and 'NO_SYSTEM_ENVIRONMENT_PATH' - this prevents
-# finding the LaTeX mex program (totally unrelated to MATLAB Mex) when LaTeX is
-# on the system path.
-list(REVERSE matlab_bin_directories) # Reverse list so the highest version (sorted alphabetically) is preferred
-find_program(MEX_COMMAND ${mex_program_name}
- PATHS ${matlab_bin_directories} ENV PATH
- NO_DEFAULT_PATH)
-mark_as_advanced(FORCE MEX_COMMAND)
-# Now that we have mex, trace back to find the Matlab installation root
-get_filename_component(MEX_COMMAND "${MEX_COMMAND}" REALPATH)
-get_filename_component(mex_path "${MEX_COMMAND}" PATH)
-if(mex_path MATCHES ".*/win64$")
- get_filename_component(MATLAB_ROOT "${mex_path}/../.." ABSOLUTE)
-else()
- get_filename_component(MATLAB_ROOT "${mex_path}/.." ABSOLUTE)
+set(MEX_COMMAND ${Matlab_MEX_COMPILER} CACHE PATH "Path to MATLAB MEX compiler")
+set(MATLAB_ROOT ${Matlab_ROOT_DIR} CACHE PATH "Path to MATLAB installation root (e.g. /usr/local/MATLAB/R2012a)")
+
+# Try to automatically configure mex path from provided custom `bin` path.
+if(GTSAM_CUSTOM_MATLAB_PATH)
+ set(matlab_bin_directory ${GTSAM_CUSTOM_MATLAB_PATH})
+
+ if(WIN32)
+ set(mex_program_name "mex.bat")
+ else()
+ set(mex_program_name "mex")
+ endif()
+
+ # Run find_program explicitly putting $PATH after our predefined program
+ # directories using 'ENV PATH' and 'NO_SYSTEM_ENVIRONMENT_PATH' - this prevents
+ # finding the LaTeX mex program (totally unrelated to MATLAB Mex) when LaTeX is
+ # on the system path.
+ find_program(MEX_COMMAND ${mex_program_name}
+ PATHS ${matlab_bin_directory} ENV PATH
+ NO_DEFAULT_PATH)
+
+ mark_as_advanced(FORCE MEX_COMMAND)
+ # Now that we have mex, trace back to find the Matlab installation root
+ get_filename_component(MEX_COMMAND "${MEX_COMMAND}" REALPATH)
+ get_filename_component(mex_path "${MEX_COMMAND}" PATH)
+ if(mex_path MATCHES ".*/win64$")
+ get_filename_component(MATLAB_ROOT "${mex_path}/../.." ABSOLUTE)
+ else()
+ get_filename_component(MATLAB_ROOT "${mex_path}/.." ABSOLUTE)
+ endif()
endif()
-set(MATLAB_ROOT "${MATLAB_ROOT}" CACHE PATH "Path to MATLAB installation root (e.g. /usr/local/MATLAB/R2012a)")
# User-friendly wrapping function. Builds a mex module from the provided
@@ -209,15 +227,34 @@ function(wrap_library_internal interfaceHeader linkLibraries extraIncludeDirs ex
# Set up generation of module source file
file(MAKE_DIRECTORY "${generated_files_path}")
- add_custom_command(
+
+ find_package(PythonInterp
+ ${GTSAM_PYTHON_VERSION}
+ EXACT
+ REQUIRED)
+ find_package(PythonLibs
+ ${GTSAM_PYTHON_VERSION}
+ EXACT
+ REQUIRED)
+
+
+ set(_ignore gtsam::Point2
+ gtsam::Point3)
+
+ # set the matlab wrapping script variable
+ set(MATLAB_WRAP_SCRIPT "${GTSAM_SOURCE_DIR}/wrap/scripts/matlab_wrap.py")
+
+ add_custom_command(
OUTPUT ${generated_cpp_file}
- DEPENDS ${interfaceHeader} wrap ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects}
- COMMAND
- wrap --matlab
- ${modulePath}
- ${moduleName}
- ${generated_files_path}
- ${matlab_h_path}
+ DEPENDS ${interfaceHeader} ${module_library_target} ${otherLibraryTargets} ${otherSourcesAndObjects}
+ COMMAND
+ ${PYTHON_EXECUTABLE}
+ ${MATLAB_WRAP_SCRIPT}
+ --src ${interfaceHeader}
+ --module_name ${moduleName}
+ --out ${generated_files_path}
+ --top_module_namespaces ${moduleName}
+ --ignore ${_ignore}
VERBATIM
WORKING_DIRECTORY ${generated_files_path})
diff --git a/cmake/GtsamPrinting.cmake b/cmake/GtsamPrinting.cmake
index e53f9c54f..c68679667 100644
--- a/cmake/GtsamPrinting.cmake
+++ b/cmake/GtsamPrinting.cmake
@@ -1,14 +1,3 @@
-# print configuration variables
-# Usage:
-#print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
-function(print_config_flag flag msg)
- if (flag)
- message(STATUS " ${msg}: Enabled")
- else ()
- message(STATUS " ${msg}: Disabled")
- endif ()
-endfunction()
-
# Based on https://github.com/jimbraun/XCDF/blob/master/cmake/CMakePadString.cmake
function(string_pad RESULT_NAME DESIRED_LENGTH VALUE)
string(LENGTH "${VALUE}" VALUE_LENGTH)
@@ -26,6 +15,27 @@ endfunction()
set(GTSAM_PRINT_SUMMARY_PADDING_LENGTH 50 CACHE STRING "Padding of cmake summary report lines after configuring.")
mark_as_advanced(GTSAM_PRINT_SUMMARY_PADDING_LENGTH)
+# print configuration variables with automatic padding
+# Usage:
+# print_config(${GTSAM_BUILD_TESTS} "Build Tests")
+function(print_config config msg)
+ string_pad(padded_config ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${config}")
+ message(STATUS "${padded_config}: ${msg}")
+endfunction()
+
+# print configuration variable with enabled/disabled value
+# Usage:
+# print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests ")
+function(print_enabled_config config msg)
+ string_pad(padded_msg ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${msg}")
+ if (config)
+ message(STATUS "${padded_msg}: Enabled")
+ else ()
+ message(STATUS "${padded_msg}: Disabled")
+ endif ()
+endfunction()
+
+
# Print " var: ${var}" padding with spaces as needed
function(print_padded variable_name)
string_pad(padded_prop ${GTSAM_PRINT_SUMMARY_PADDING_LENGTH} " ${variable_name}")
@@ -36,16 +46,16 @@ endfunction()
# Prints all the relevant CMake build options for a given target:
function(print_build_options_for_target target_name_)
print_padded(GTSAM_COMPILE_FEATURES_PUBLIC)
- print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE)
+ # print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE)
print_padded(GTSAM_COMPILE_OPTIONS_PUBLIC)
- print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE)
+ # print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE)
print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC)
foreach(build_type ${GTSAM_CMAKE_CONFIGURATION_TYPES})
string(TOUPPER "${build_type}" build_type_toupper)
- print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE_${build_type_toupper})
+ # print_padded(GTSAM_COMPILE_OPTIONS_PRIVATE_${build_type_toupper})
print_padded(GTSAM_COMPILE_OPTIONS_PUBLIC_${build_type_toupper})
- print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE_${build_type_toupper})
+ # print_padded(GTSAM_COMPILE_DEFINITIONS_PRIVATE_${build_type_toupper})
print_padded(GTSAM_COMPILE_DEFINITIONS_PUBLIC_${build_type_toupper})
endforeach()
endfunction()
diff --git a/cmake/GtsamPythonWrap.cmake b/cmake/GtsamPythonWrap.cmake
deleted file mode 100644
index 714e37488..000000000
--- a/cmake/GtsamPythonWrap.cmake
+++ /dev/null
@@ -1,102 +0,0 @@
-#Setup cache options
-set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "Target python version for GTSAM python module. Use 'Default' to chose the default version")
-set(GTSAM_BUILD_PYTHON_FLAGS "" CACHE STRING "Extra flags for running Matlab PYTHON compilation")
-set(GTSAM_PYTHON_INSTALL_PATH "" CACHE PATH "Python toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/borg/python")
-if(NOT GTSAM_PYTHON_INSTALL_PATH)
- set(GTSAM_PYTHON_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/borg/python")
-endif()
-
-#Author: Paul Furgale Modified by Andrew Melim
-function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY)
- # # Boost
- # find_package(Boost COMPONENTS python filesystem system REQUIRED)
- # include_directories(${Boost_INCLUDE_DIRS})
-
- # # Find Python
- # FIND_PACKAGE(PythonLibs 2.7 REQUIRED)
- # INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS})
-
- IF(APPLE)
- # The apple framework headers don't include the numpy headers for some reason.
- GET_FILENAME_COMPONENT(REAL_PYTHON_INCLUDE ${PYTHON_INCLUDE_DIRS} REALPATH)
- IF( ${REAL_PYTHON_INCLUDE} MATCHES Python.framework)
- message("Trying to find extra headers for numpy from ${REAL_PYTHON_INCLUDE}.")
- message("Looking in ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy")
- FIND_PATH(NUMPY_INCLUDE_DIR arrayobject.h
- ${REAL_PYTHON_INCLUDE}/../../Extras/lib/python/numpy/core/include/numpy
- ${REAL_PYTHON_INCLUDE}/numpy
- )
- IF(${NUMPY_INCLUDE_DIR} MATCHES NOTFOUND)
- message("Unable to find numpy include directories: ${NUMPY_INCLUDE_DIR}")
- ELSE()
- message("Found headers at ${NUMPY_INCLUDE_DIR}")
- INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR})
- INCLUDE_DIRECTORIES(${NUMPY_INCLUDE_DIR}/..)
- ENDIF()
- ENDIF()
- ENDIF(APPLE)
-
- if(MSVC)
- add_library(${moduleName}_python MODULE ${ARGN})
- set_target_properties(${moduleName}_python PROPERTIES
- OUTPUT_NAME ${moduleName}_python
- CLEAN_DIRECT_OUTPUT 1
- VERSION 1
- SOVERSION 0
- SUFFIX ".pyd")
- target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp
-
- set(PYLIB_OUTPUT_FILE $)
- message(${PYLIB_OUTPUT_FILE})
- get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE)
- set(PYLIB_SO_NAME ${PYLIB_OUTPUT_NAME}.pyd)
-
- ELSE()
- # Create a shared library
- add_library(${moduleName}_python SHARED ${generated_cpp_file})
-
- set_target_properties(${moduleName}_python PROPERTIES
- OUTPUT_NAME ${moduleName}_python
- CLEAN_DIRECT_OUTPUT 1)
- target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp
- # On OSX and Linux, the python library must end in the extension .so. Build this
- # filename here.
- get_property(PYLIB_OUTPUT_FILE TARGET ${moduleName}_python PROPERTY LOCATION)
- set(PYLIB_OUTPUT_FILE $)
- message(${PYLIB_OUTPUT_FILE})
- get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE)
- set(PYLIB_SO_NAME lib${moduleName}_python.so)
- ENDIF(MSVC)
-
- # Installs the library in the gtsam folder, which is used by setup.py to create the gtsam package
- set(PYTHON_MODULE_DIRECTORY ${CMAKE_SOURCE_DIR}/python/gtsam)
- # Cause the library to be output in the correct directory.
- add_custom_command(TARGET ${moduleName}_python
- POST_BUILD
- COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME}
- WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
- COMMENT "Copying library files to python directory" )
-
- # Cause the library to be output in the correct directory.
- add_custom_command(TARGET ${TARGET_NAME}
- POST_BUILD
- COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME}
- WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
- COMMENT "Copying library files to python directory" )
-
- get_directory_property(AMCF ADDITIONAL_MAKE_CLEAN_FILES)
- list(APPEND AMCF ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME})
- set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${AMCF}")
-endfunction(wrap_python)
-
-# Macro to get list of subdirectories
-macro(SUBDIRLIST result curdir)
- file(GLOB children RELATIVE ${curdir} ${curdir}/*)
- set(dirlist "")
- foreach(child ${children})
- if(IS_DIRECTORY ${curdir}/${child})
- list(APPEND dirlist ${child})
- endif()
- endforeach()
- set(${result} ${dirlist})
-endmacro()
diff --git a/cmake/GtsamTesting.cmake b/cmake/GtsamTesting.cmake
index 3b42ffa21..573fb696a 100644
--- a/cmake/GtsamTesting.cmake
+++ b/cmake/GtsamTesting.cmake
@@ -88,36 +88,36 @@ enable_testing()
option(GTSAM_BUILD_TESTS "Enable/Disable building of tests" ON)
option(GTSAM_BUILD_EXAMPLES_ALWAYS "Build examples with 'make all' (build with 'make examples' if not)" ON)
- option(GTSAM_BUILD_TIMING_ALWAYS "Build timing scripts with 'make all' (build with 'make timing' if not" OFF)
-
- # Add option for combining unit tests
- if(MSVC OR XCODE_VERSION)
- option(GTSAM_SINGLE_TEST_EXE "Combine unit tests into single executable (faster compile)" ON)
- else()
- option(GTSAM_SINGLE_TEST_EXE "Combine unit tests into single executable (faster compile)" OFF)
- endif()
- mark_as_advanced(GTSAM_SINGLE_TEST_EXE)
-
- # Enable make check (http://www.cmake.org/Wiki/CMakeEmulateMakeCheck)
- if(GTSAM_BUILD_TESTS)
- add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} -C $ --output-on-failure)
- # Also add alternative checks using valgrind.
- # We don't look for valgrind being installed in the system, since these
- # targets are not invoked unless directly instructed by the user.
- if (UNIX)
- # Run all tests using valgrind:
- add_custom_target(check_valgrind)
- endif()
+option(GTSAM_BUILD_TIMING_ALWAYS "Build timing scripts with 'make all' (build with 'make timing' if not" OFF)
+
+# Add option for combining unit tests
+if(MSVC OR XCODE_VERSION)
+ option(GTSAM_SINGLE_TEST_EXE "Combine unit tests into single executable (faster compile)" ON)
+else()
+ option(GTSAM_SINGLE_TEST_EXE "Combine unit tests into single executable (faster compile)" OFF)
+endif()
+mark_as_advanced(GTSAM_SINGLE_TEST_EXE)
+
+# Enable make check (http://www.cmake.org/Wiki/CMakeEmulateMakeCheck)
+if(GTSAM_BUILD_TESTS)
+ add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} -C $ --output-on-failure)
+ # Also add alternative checks using valgrind.
+ # We don't look for valgrind being installed in the system, since these
+ # targets are not invoked unless directly instructed by the user.
+ if (UNIX)
+ # Run all tests using valgrind:
+ add_custom_target(check_valgrind)
+ endif()
- # Add target to build tests without running
- add_custom_target(all.tests)
- endif()
+ # Add target to build tests without running
+ add_custom_target(all.tests)
+endif()
- # Add examples target
- add_custom_target(examples)
+# Add examples target
+add_custom_target(examples)
- # Add timing target
- add_custom_target(timing)
+# Add timing target
+add_custom_target(timing)
# Implementations of this file's macros:
diff --git a/cmake/HandleAllocators.cmake b/cmake/HandleAllocators.cmake
new file mode 100644
index 000000000..63411b17b
--- /dev/null
+++ b/cmake/HandleAllocators.cmake
@@ -0,0 +1,34 @@
+# Build list of possible allocators
+set(possible_allocators "")
+if(GTSAM_USE_TBB)
+ list(APPEND possible_allocators TBB)
+ set(preferred_allocator TBB)
+else()
+ list(APPEND possible_allocators BoostPool STL)
+ set(preferred_allocator STL)
+endif()
+if(GOOGLE_PERFTOOLS_FOUND)
+ list(APPEND possible_allocators tcmalloc)
+endif()
+
+# Check if current allocator choice is valid and set cache option
+list(FIND possible_allocators "${GTSAM_DEFAULT_ALLOCATOR}" allocator_valid)
+if(allocator_valid EQUAL -1)
+ set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator" FORCE)
+else()
+ set(GTSAM_DEFAULT_ALLOCATOR ${preferred_allocator} CACHE STRING "Default allocator")
+endif()
+set_property(CACHE GTSAM_DEFAULT_ALLOCATOR PROPERTY STRINGS ${possible_allocators})
+mark_as_advanced(GTSAM_DEFAULT_ALLOCATOR)
+
+# Define compile flags depending on allocator
+if("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "BoostPool")
+ set(GTSAM_ALLOCATOR_BOOSTPOOL 1)
+elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "STL")
+ set(GTSAM_ALLOCATOR_STL 1)
+elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "TBB")
+ set(GTSAM_ALLOCATOR_TBB 1)
+elseif("${GTSAM_DEFAULT_ALLOCATOR}" STREQUAL "tcmalloc")
+ set(GTSAM_ALLOCATOR_STL 1) # tcmalloc replaces malloc, so to use it we use the STL allocator
+ list(APPEND GTSAM_ADDITIONAL_LIBRARIES "tcmalloc")
+endif()
diff --git a/cmake/HandleBoost.cmake b/cmake/HandleBoost.cmake
new file mode 100644
index 000000000..e73c2237d
--- /dev/null
+++ b/cmake/HandleBoost.cmake
@@ -0,0 +1,56 @@
+###############################################################################
+# Find boost
+
+# To change the path for boost, you will need to set:
+# BOOST_ROOT: path to install prefix for boost
+# Boost_NO_SYSTEM_PATHS: set to true to keep the find script from ignoring BOOST_ROOT
+
+if(MSVC)
+ # By default, boost only builds static libraries on windows
+ set(Boost_USE_STATIC_LIBS ON) # only find static libs
+ # If we ever reset above on windows and, ...
+ # If we use Boost shared libs, disable auto linking.
+ # Some libraries, at least Boost Program Options, rely on this to export DLL symbols.
+ if(NOT Boost_USE_STATIC_LIBS)
+ list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC BOOST_ALL_NO_LIB BOOST_ALL_DYN_LINK)
+ endif()
+ # Virtual memory range for PCH exceeded on VS2015
+ if(MSVC_VERSION LESS 1910) # older than VS2017
+ list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Zm295)
+ endif()
+endif()
+
+
+# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such.
+set(BOOST_FIND_MINIMUM_VERSION 1.58)
+set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
+
+find_package(Boost ${BOOST_FIND_MINIMUM_VERSION} COMPONENTS ${BOOST_FIND_MINIMUM_COMPONENTS})
+
+# Required components
+if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILESYSTEM_LIBRARY OR
+ NOT Boost_THREAD_LIBRARY OR NOT Boost_DATE_TIME_LIBRARY)
+ message(FATAL_ERROR "Missing required Boost components >= v1.58, please install/upgrade Boost or configure your search paths.")
+endif()
+
+option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
+# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
+set(GTSAM_BOOST_LIBRARIES
+ Boost::serialization
+ Boost::system
+ Boost::filesystem
+ Boost::thread
+ Boost::date_time
+ Boost::regex
+)
+if (GTSAM_DISABLE_NEW_TIMERS)
+ message("WARNING: GTSAM timing instrumentation manually disabled")
+ list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS)
+else()
+ if(Boost_TIMER_LIBRARY)
+ list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono)
+ else()
+ list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt
+ message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.")
+ endif()
+endif()
diff --git a/cmake/HandleCCache.cmake b/cmake/HandleCCache.cmake
new file mode 100644
index 000000000..9eabb1905
--- /dev/null
+++ b/cmake/HandleCCache.cmake
@@ -0,0 +1,14 @@
+###############################################################################
+# Support ccache, if installed
+if(NOT MSVC AND NOT XCODE_VERSION)
+ find_program(CCACHE_FOUND ccache)
+ if(CCACHE_FOUND)
+ if(GTSAM_BUILD_WITH_CCACHE)
+ set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
+ set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
+ else()
+ set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "")
+ set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "")
+ endif()
+ endif(CCACHE_FOUND)
+endif()
diff --git a/cmake/HandleCPack.cmake b/cmake/HandleCPack.cmake
new file mode 100644
index 000000000..1c32433a4
--- /dev/null
+++ b/cmake/HandleCPack.cmake
@@ -0,0 +1,28 @@
+#JLBC: is all this actually used by someone? could it be removed?
+
+# Flags for choosing default packaging tools
+set(CPACK_SOURCE_GENERATOR "TGZ" CACHE STRING "CPack Default Source Generator")
+set(CPACK_GENERATOR "TGZ" CACHE STRING "CPack Default Binary Generator")
+
+###############################################################################
+# Set up CPack
+set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM")
+set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology")
+set(CPACK_PACKAGE_CONTACT "Frank Dellaert, dellaert@cc.gatech.edu")
+set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README.md")
+set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
+set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
+set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR})
+set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH})
+set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}")
+#set(CPACK_INSTALLED_DIRECTORIES "doc;.") # Include doc directory
+#set(CPACK_INSTALLED_DIRECTORIES ".") # FIXME: throws error
+set(CPACK_SOURCE_IGNORE_FILES "/build*;/\\\\.;/makestats.sh$")
+set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/gtsam_unstable/")
+set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}" "/package_scripts/")
+set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
+#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs
+
+# Deb-package specific cpack
+set(CPACK_DEBIAN_PACKAGE_NAME "libgtsam-dev")
+set(CPACK_DEBIAN_PACKAGE_DEPENDS "libboost-dev (>= 1.58)") #Example: "libc6 (>= 2.3.1-6), libgcc1 (>= 1:3.4.2-12)")
diff --git a/cmake/HandleEigen.cmake b/cmake/HandleEigen.cmake
new file mode 100644
index 000000000..fda441907
--- /dev/null
+++ b/cmake/HandleEigen.cmake
@@ -0,0 +1,77 @@
+###############################################################################
+# Option for using system Eigen or GTSAM-bundled Eigen
+
+option(GTSAM_USE_SYSTEM_EIGEN "Find and use system-installed Eigen. If 'off', use the one bundled with GTSAM" OFF)
+
+if(NOT GTSAM_USE_SYSTEM_EIGEN)
+ # This option only makes sense if using the embedded copy of Eigen, it is
+ # used to decide whether to *install* the "unsupported" module:
+ option(GTSAM_WITH_EIGEN_UNSUPPORTED "Install Eigen's unsupported modules" OFF)
+endif()
+
+# Switch for using system Eigen or GTSAM-bundled Eigen
+if(GTSAM_USE_SYSTEM_EIGEN)
+ find_package(Eigen3 REQUIRED)
+
+ # Use generic Eigen include paths e.g.
+ set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "${EIGEN3_INCLUDE_DIR}")
+
+ # check if MKL is also enabled - can have one or the other, but not both!
+ # Note: Eigen >= v3.2.5 includes our patches
+ if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_LESS 3.2.5))
+ message(FATAL_ERROR "MKL requires at least Eigen 3.2.5, and your system appears to have an older version. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, or disable GTSAM_WITH_EIGEN_MKL")
+ endif()
+
+ # Check for Eigen version which doesn't work with MKL
+ # See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527 for details.
+ if(EIGEN_USE_MKL_ALL AND (EIGEN3_VERSION VERSION_EQUAL 3.3.4))
+ message(FATAL_ERROR "MKL does not work with Eigen 3.3.4 because of a bug in Eigen. See http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1527. Disable GTSAM_USE_SYSTEM_EIGEN to use GTSAM's copy of Eigen, disable GTSAM_WITH_EIGEN_MKL, or upgrade/patch your installation of Eigen.")
+ endif()
+
+ # The actual include directory (for BUILD cmake target interface):
+ set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${EIGEN3_INCLUDE_DIR}")
+else()
+ # Use bundled Eigen include path.
+ # Clear any variables set by FindEigen3
+ if(EIGEN3_INCLUDE_DIR)
+ set(EIGEN3_INCLUDE_DIR NOTFOUND CACHE STRING "" FORCE)
+ endif()
+
+ # set full path to be used by external projects
+ # this will be added to GTSAM_INCLUDE_DIR by gtsam_extra.cmake.in
+ set(GTSAM_EIGEN_INCLUDE_FOR_INSTALL "include/gtsam/3rdparty/Eigen/")
+
+ # The actual include directory (for BUILD cmake target interface):
+ set(GTSAM_EIGEN_INCLUDE_FOR_BUILD "${CMAKE_SOURCE_DIR}/gtsam/3rdparty/Eigen/")
+endif()
+
+# Detect Eigen version:
+set(EIGEN_VER_H "${GTSAM_EIGEN_INCLUDE_FOR_BUILD}/Eigen/src/Core/util/Macros.h")
+if (EXISTS ${EIGEN_VER_H})
+ file(READ "${EIGEN_VER_H}" STR_EIGEN_VERSION)
+
+ # Extract the Eigen version from the Macros.h file, lines "#define EIGEN_WORLD_VERSION XX", etc...
+
+ string(REGEX MATCH "EIGEN_WORLD_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${STR_EIGEN_VERSION}")
+ string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_WORLD "${GTSAM_EIGEN_VERSION_WORLD}")
+
+ string(REGEX MATCH "EIGEN_MAJOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${STR_EIGEN_VERSION}")
+ string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MAJOR "${GTSAM_EIGEN_VERSION_MAJOR}")
+
+ string(REGEX MATCH "EIGEN_MINOR_VERSION[ ]+[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${STR_EIGEN_VERSION}")
+ string(REGEX MATCH "[0-9]+" GTSAM_EIGEN_VERSION_MINOR "${GTSAM_EIGEN_VERSION_MINOR}")
+
+ set(GTSAM_EIGEN_VERSION "${GTSAM_EIGEN_VERSION_WORLD}.${GTSAM_EIGEN_VERSION_MAJOR}.${GTSAM_EIGEN_VERSION_MINOR}")
+
+ message(STATUS "Found Eigen version: ${GTSAM_EIGEN_VERSION}")
+else()
+ message(WARNING "Cannot determine Eigen version, missing file: `${EIGEN_VER_H}`")
+endif ()
+
+if (MSVC)
+ if (BUILD_SHARED_LIBS)
+ # mute eigen static assert to avoid errors in shared lib
+ list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
+ endif()
+ list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE "/wd4244") # Disable loss of precision which is thrown all over our Eigen
+endif()
diff --git a/cmake/HandleFinalChecks.cmake b/cmake/HandleFinalChecks.cmake
new file mode 100644
index 000000000..f91fc7fdb
--- /dev/null
+++ b/cmake/HandleFinalChecks.cmake
@@ -0,0 +1,10 @@
+# Print warnings at the end
+if(GTSAM_WITH_TBB AND NOT TBB_FOUND)
+ message(WARNING "TBB 4.4 or newer was not found - this is ok, but note that GTSAM parallelization will be disabled. Set GTSAM_WITH_TBB to 'Off' to avoid this warning.")
+endif()
+if(GTSAM_WITH_EIGEN_MKL AND NOT MKL_FOUND)
+ message(WARNING "MKL was not found - this is ok, but note that MKL will be disabled. Set GTSAM_WITH_EIGEN_MKL to 'Off' to disable this warning. See INSTALL.md for notes on performance.")
+endif()
+if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND)
+ message(WARNING "Your compiler does not support OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning. See INSTALL.md for notes on performance.")
+endif()
diff --git a/cmake/HandleGeneralOptions.cmake b/cmake/HandleGeneralOptions.cmake
new file mode 100644
index 000000000..ee86066a2
--- /dev/null
+++ b/cmake/HandleGeneralOptions.cmake
@@ -0,0 +1,45 @@
+###############################################################################
+# Set up options
+
+# See whether gtsam_unstable is available (it will be present only if we're using a git checkout)
+if(EXISTS "${PROJECT_SOURCE_DIR}/gtsam_unstable" AND IS_DIRECTORY "${PROJECT_SOURCE_DIR}/gtsam_unstable")
+ set(GTSAM_UNSTABLE_AVAILABLE 1)
+else()
+ set(GTSAM_UNSTABLE_AVAILABLE 0)
+endif()
+
+# Configurable Options
+if(GTSAM_UNSTABLE_AVAILABLE)
+ option(GTSAM_BUILD_UNSTABLE "Enable/Disable libgtsam_unstable" ON)
+ option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON)
+ option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF)
+endif()
+option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
+option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
+option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
+option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
+option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
+option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
+option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
+option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
+option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
+option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
+option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
+option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
+option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
+option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
+if(NOT MSVC AND NOT XCODE_VERSION)
+ option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
+endif()
+
+# Enable GTSAM_ROT3_EXPMAP if GTSAM_POSE3_EXPMAP is enabled, and vice versa.
+if(GTSAM_POSE3_EXPMAP)
+ message(STATUS "GTSAM_POSE3_EXPMAP=ON, enabling GTSAM_ROT3_EXPMAP as well")
+ set(GTSAM_ROT3_EXPMAP 1 CACHE BOOL "" FORCE)
+elseif(GTSAM_ROT3_EXPMAP)
+ message(STATUS "GTSAM_ROT3_EXPMAP=ON, enabling GTSAM_POSE3_EXPMAP as well")
+ set(GTSAM_POSE3_EXPMAP 1 CACHE BOOL "" FORCE)
+endif()
+
+# Set the default Python version. This is later updated in HandlePython.cmake.
+set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of Python to build the wrappers against.")
diff --git a/cmake/HandleGlobalBuildFlags.cmake b/cmake/HandleGlobalBuildFlags.cmake
new file mode 100644
index 000000000..f33e12b94
--- /dev/null
+++ b/cmake/HandleGlobalBuildFlags.cmake
@@ -0,0 +1,52 @@
+# JLBC: These should ideally be ported to "modern cmake" via target properties.
+#
+
+if (CMAKE_GENERATOR STREQUAL "Ninja" AND
+ ((CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) OR
+ (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5)))
+ # Force colored warnings in Ninja's output, if the compiler has -fdiagnostics-color support.
+ # Rationale in https://github.com/ninja-build/ninja/issues/814
+ add_compile_options(-fdiagnostics-color=always)
+endif()
+
+
+# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT()
+# or explicit instantiation will generate build errors.
+# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017
+#
+if(MSVC AND BUILD_SHARED_LIBS)
+ list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
+endif()
+
+if (APPLE AND BUILD_SHARED_LIBS)
+ # Set the default install directory on macOS
+ set(CMAKE_INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib")
+endif()
+
+###############################################################################
+# Global compile options
+
+if(MSVC)
+ list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE _CRT_SECURE_NO_WARNINGS _SCL_SECURE_NO_WARNINGS)
+ list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /wd4251 /wd4275 /wd4251 /wd4661 /wd4344 /wd4503) # Disable non-DLL-exported base class and other warnings
+ list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE /bigobj) # Allow large object files for template-based code
+endif()
+
+# GCC 4.8+ complains about local typedefs which we use for shared_ptr etc.
+if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+ if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.8)
+ list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
+ endif()
+endif()
+
+# As of XCode 7, clang also complains about this
+if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+ if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0)
+ list_append_cache(GTSAM_COMPILE_OPTIONS_PRIVATE -Wno-unused-local-typedefs)
+ endif()
+endif()
+
+if(GTSAM_ENABLE_CONSISTENCY_CHECKS)
+ # This should be made PUBLIC if GTSAM_EXTRA_CONSISTENCY_CHECKS is someday used in a public .h
+ list_append_cache(GTSAM_COMPILE_DEFINITIONS_PRIVATE GTSAM_EXTRA_CONSISTENCY_CHECKS)
+endif()
diff --git a/cmake/HandleMKL.cmake b/cmake/HandleMKL.cmake
new file mode 100644
index 000000000..5d7ec365b
--- /dev/null
+++ b/cmake/HandleMKL.cmake
@@ -0,0 +1,17 @@
+###############################################################################
+# Find MKL
+find_package(MKL)
+
+if(MKL_FOUND AND GTSAM_WITH_EIGEN_MKL)
+ set(GTSAM_USE_EIGEN_MKL 1) # This will go into config.h
+ set(EIGEN_USE_MKL_ALL 1) # This will go into config.h - it makes Eigen use MKL
+ list(APPEND GTSAM_ADDITIONAL_LIBRARIES ${MKL_LIBRARIES})
+
+ # --no-as-needed is required with gcc according to the MKL link advisor
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+ set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-as-needed")
+ endif()
+else()
+ set(GTSAM_USE_EIGEN_MKL 0)
+ set(EIGEN_USE_MKL_ALL 0)
+endif()
diff --git a/cmake/HandleOpenMP.cmake b/cmake/HandleOpenMP.cmake
new file mode 100644
index 000000000..4f27aa633
--- /dev/null
+++ b/cmake/HandleOpenMP.cmake
@@ -0,0 +1,11 @@
+
+###############################################################################
+# Find OpenMP (if we're also using MKL)
+find_package(OpenMP) # do this here to generate correct message if disabled
+
+if(GTSAM_WITH_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP AND GTSAM_USE_EIGEN_MKL)
+ if(OPENMP_FOUND AND GTSAM_USE_EIGEN_MKL AND GTSAM_WITH_EIGEN_MKL_OPENMP)
+ set(GTSAM_USE_EIGEN_MKL_OPENMP 1) # This will go into config.h
+ list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC ${OpenMP_CXX_FLAGS})
+ endif()
+endif()
diff --git a/cmake/HandlePerfTools.cmake b/cmake/HandlePerfTools.cmake
new file mode 100644
index 000000000..499caf80a
--- /dev/null
+++ b/cmake/HandlePerfTools.cmake
@@ -0,0 +1,4 @@
+
+###############################################################################
+# Find Google perftools
+find_package(GooglePerfTools)
diff --git a/cmake/HandlePrintConfiguration.cmake b/cmake/HandlePrintConfiguration.cmake
new file mode 100644
index 000000000..4ffd00e54
--- /dev/null
+++ b/cmake/HandlePrintConfiguration.cmake
@@ -0,0 +1,104 @@
+###############################################################################
+# Print configuration variables
+message(STATUS "===============================================================")
+message(STATUS "================ Configuration Options ======================")
+print_config("CMAKE_CXX_COMPILER_ID type" "${CMAKE_CXX_COMPILER_ID}")
+print_config("CMAKE_CXX_COMPILER_VERSION" "${CMAKE_CXX_COMPILER_VERSION}")
+print_config("CMake version" "${CMAKE_VERSION}")
+print_config("CMake generator" "${CMAKE_GENERATOR}")
+print_config("CMake build tool" "${CMAKE_BUILD_TOOL}")
+message(STATUS "Build flags ")
+print_enabled_config(${GTSAM_BUILD_TESTS} "Build Tests")
+print_enabled_config(${GTSAM_BUILD_EXAMPLES_ALWAYS} "Build examples with 'make all'")
+print_enabled_config(${GTSAM_BUILD_TIMING_ALWAYS} "Build timing scripts with 'make all'")
+if (DOXYGEN_FOUND)
+ print_enabled_config(${GTSAM_BUILD_DOCS} "Build Docs")
+endif()
+print_enabled_config(${BUILD_SHARED_LIBS} "Build shared GTSAM libraries")
+print_enabled_config(${GTSAM_BUILD_TYPE_POSTFIXES} "Put build type in library name")
+if(GTSAM_UNSTABLE_AVAILABLE)
+ print_enabled_config(${GTSAM_BUILD_UNSTABLE} "Build libgtsam_unstable ")
+ print_enabled_config(${GTSAM_UNSTABLE_BUILD_PYTHON} "Build GTSAM unstable Python ")
+ print_enabled_config(${GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX} "Build MATLAB Toolbox for unstable")
+endif()
+
+if(NOT MSVC AND NOT XCODE_VERSION)
+ print_enabled_config(${GTSAM_BUILD_WITH_MARCH_NATIVE} "Build for native architecture ")
+ print_config("Build type" "${CMAKE_BUILD_TYPE}")
+ print_config("C compilation flags" "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
+ print_config("C++ compilation flags" "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}")
+endif()
+
+print_build_options_for_target(gtsam)
+
+print_config("Use System Eigen" "${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
+
+if(GTSAM_USE_TBB)
+ print_config("Use Intel TBB" "Yes (Version: ${TBB_VERSION})")
+elseif(TBB_FOUND)
+ print_config("Use Intel TBB" "TBB (Version: ${TBB_VERSION}) found but GTSAM_WITH_TBB is disabled")
+else()
+ print_config("Use Intel TBB" "TBB not found")
+endif()
+if(GTSAM_USE_EIGEN_MKL)
+ print_config("Eigen will use MKL" "Yes")
+elseif(MKL_FOUND)
+ print_config("Eigen will use MKL" "MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
+else()
+ print_config("Eigen will use MKL" "MKL not found")
+endif()
+if(GTSAM_USE_EIGEN_MKL_OPENMP)
+ print_config("Eigen will use MKL and OpenMP" "Yes")
+elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
+ print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
+elseif(OPENMP_FOUND AND NOT MKL_FOUND)
+ print_config("Eigen will use MKL and OpenMP" "OpenMP found but MKL not found")
+elseif(OPENMP_FOUND)
+ print_config("Eigen will use MKL and OpenMP" "OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
+else()
+ print_config("Eigen will use MKL and OpenMP" "OpenMP not found")
+endif()
+print_config("Default allocator" "${GTSAM_DEFAULT_ALLOCATOR}")
+
+if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
+ print_config("Cheirality exceptions enabled" "YES")
+else()
+ print_config("Cheirality exceptions enabled" "NO")
+endif()
+
+if(NOT MSVC AND NOT XCODE_VERSION)
+ if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
+ print_config("Build with ccache" "Yes")
+ elseif(CCACHE_FOUND)
+ print_config("Build with ccache" "ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
+ else()
+ print_config("Build with ccache" "No")
+ endif()
+endif()
+
+message(STATUS "Packaging flags")
+print_config("CPack Source Generator" "${CPACK_SOURCE_GENERATOR}")
+print_config("CPack Generator" "${CPACK_GENERATOR}")
+
+message(STATUS "GTSAM flags ")
+print_enabled_config(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
+print_enabled_config(${GTSAM_ENABLE_CONSISTENCY_CHECKS} "Runtime consistency checking ")
+print_enabled_config(${GTSAM_ROT3_EXPMAP} "Rot3 retract is full ExpMap ")
+print_enabled_config(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full ExpMap ")
+print_enabled_config(${GTSAM_ALLOW_DEPRECATED_SINCE_V41} "Allow features deprecated in GTSAM 4.1")
+print_enabled_config(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
+print_enabled_config(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
+
+message(STATUS "MATLAB toolbox flags")
+print_enabled_config(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
+if (${GTSAM_INSTALL_MATLAB_TOOLBOX})
+ print_config("MATLAB root" "${MATLAB_ROOT}")
+ print_config("MEX binary" "${MEX_COMMAND}")
+endif()
+message(STATUS "Python toolbox flags ")
+print_enabled_config(${GTSAM_BUILD_PYTHON} "Build Python module with pybind ")
+if(GTSAM_BUILD_PYTHON)
+ print_config("Python version" ${GTSAM_PYTHON_VERSION})
+endif()
+
+message(STATUS "===============================================================")
diff --git a/cmake/HandlePython.cmake b/cmake/HandlePython.cmake
new file mode 100644
index 000000000..0c24824bc
--- /dev/null
+++ b/cmake/HandlePython.cmake
@@ -0,0 +1,55 @@
+# Set Python version if either Python or MATLAB wrapper is requested.
+if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
+ if(${GTSAM_PYTHON_VERSION} STREQUAL "Default")
+
+ if(${CMAKE_VERSION} VERSION_LESS "3.12.0")
+ # Use older version of cmake's find_python
+ find_package(PythonInterp)
+
+ if(NOT ${PYTHONINTERP_FOUND})
+ message(
+ FATAL_ERROR
+ "Cannot find Python interpreter. Please install Python >= 3.6.")
+ endif()
+
+ find_package(PythonLibs ${PYTHON_VERSION_STRING})
+
+ set(Python_VERSION_MAJOR ${PYTHON_VERSION_MAJOR})
+ set(Python_VERSION_MINOR ${PYTHON_VERSION_MINOR})
+ set(Python_EXECUTABLE ${PYTHON_EXECUTABLE})
+
+ else()
+ # Get info about the Python3 interpreter
+ # https://cmake.org/cmake/help/latest/module/FindPython3.html#module:FindPython3
+ find_package(Python3 COMPONENTS Interpreter Development)
+
+ if(NOT ${Python3_FOUND})
+ message(
+ FATAL_ERROR
+ "Cannot find Python3 interpreter. Please install Python >= 3.6.")
+ endif()
+
+ set(Python_VERSION_MAJOR ${Python3_VERSION_MAJOR})
+ set(Python_VERSION_MINOR ${Python3_VERSION_MINOR})
+
+ endif()
+
+ set(GTSAM_PYTHON_VERSION
+ "${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}"
+ CACHE STRING "The version of Python to build the wrappers against."
+ FORCE)
+
+ endif()
+endif()
+
+# Check for build of Unstable modules
+if(GTSAM_BUILD_PYTHON)
+ if(GTSAM_UNSTABLE_BUILD_PYTHON)
+ if (NOT GTSAM_BUILD_UNSTABLE)
+ message(WARNING "GTSAM_UNSTABLE_BUILD_PYTHON requires the unstable module to be enabled.")
+ set(GTSAM_UNSTABLE_BUILD_PYTHON OFF)
+ endif()
+ endif()
+
+ set(GTSAM_PY_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/python")
+endif()
diff --git a/cmake/HandleTBB.cmake b/cmake/HandleTBB.cmake
new file mode 100644
index 000000000..cedee55ea
--- /dev/null
+++ b/cmake/HandleTBB.cmake
@@ -0,0 +1,24 @@
+###############################################################################
+# Find TBB
+find_package(TBB 4.4 COMPONENTS tbb tbbmalloc)
+
+# Set up variables if we're using TBB
+if(TBB_FOUND AND GTSAM_WITH_TBB)
+ set(GTSAM_USE_TBB 1) # This will go into config.h
+ if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
+ set(TBB_GREATER_EQUAL_2020 1)
+ else()
+ set(TBB_GREATER_EQUAL_2020 0)
+ endif()
+ # all definitions and link requisites will go via imported targets:
+ # tbb & tbbmalloc
+ list(APPEND GTSAM_ADDITIONAL_LIBRARIES tbb tbbmalloc)
+else()
+ set(GTSAM_USE_TBB 0) # This will go into config.h
+endif()
+
+###############################################################################
+# Prohibit Timing build mode in combination with TBB
+if(GTSAM_USE_TBB AND (CMAKE_BUILD_TYPE STREQUAL "Timing"))
+ message(FATAL_ERROR "Timing build mode cannot be used together with TBB. Use a sampling profiler such as Instruments or Intel VTune Amplifier instead.")
+endif()
diff --git a/cmake/HandleUninstall.cmake b/cmake/HandleUninstall.cmake
new file mode 100644
index 000000000..1859b0273
--- /dev/null
+++ b/cmake/HandleUninstall.cmake
@@ -0,0 +1,10 @@
+# ----------------------------------------------------------------------------
+# Uninstall target, for "make uninstall"
+# ----------------------------------------------------------------------------
+configure_file(
+ "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
+ "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
+ IMMEDIATE @ONLY)
+
+add_custom_target(uninstall
+ "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake")
diff --git a/cmake/dllexport.h.in b/cmake/dllexport.h.in
index 9a0a344b7..7d757edea 100644
--- a/cmake/dllexport.h.in
+++ b/cmake/dllexport.h.in
@@ -47,9 +47,14 @@
# endif
# endif
#else
+#ifdef __APPLE__
+# define @library_name@_EXPORT __attribute__((visibility("default")))
+# define @library_name@_EXTERN_EXPORT extern
+#else
# define @library_name@_EXPORT
# define @library_name@_EXTERN_EXPORT extern
#endif
+#endif
#undef BUILD_SHARED_LIBS
diff --git a/cython/CMakeLists.txt b/cython/CMakeLists.txt
deleted file mode 100644
index 4cc9d2f5d..000000000
--- a/cython/CMakeLists.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-# Install cython components
-include(GtsamCythonWrap)
-
-# Create the cython toolbox for the gtsam library
-if (GTSAM_INSTALL_CYTHON_TOOLBOX)
- # build and include the eigency version of eigency
- add_subdirectory(gtsam_eigency)
- include_directories(${PROJECT_BINARY_DIR}/cython/gtsam_eigency)
-
- # Fix for error "C1128: number of sections exceeded object file format limit"
- if(MSVC)
- add_compile_options(/bigobj)
- endif()
-
- # wrap gtsam
- add_custom_target(gtsam_header DEPENDS "../gtsam.h")
- wrap_and_install_library_cython("../gtsam.h" # interface_header
- "" # extra imports
- "${GTSAM_CYTHON_INSTALL_PATH}/gtsam" # install path
- gtsam # library to link with
- "wrap;cythonize_eigency;gtsam;gtsam_header" # dependencies which need to be built before wrapping
- )
-
- # wrap gtsam_unstable
- if(GTSAM_BUILD_UNSTABLE)
- add_custom_target(gtsam_unstable_header DEPENDS "../gtsam_unstable/gtsam_unstable.h")
- wrap_and_install_library_cython("../gtsam_unstable/gtsam_unstable.h" # interface_header
- "from gtsam.gtsam cimport *" # extra imports
- "${GTSAM_CYTHON_INSTALL_PATH}/gtsam_unstable" # install path
- gtsam_unstable # library to link with
- "gtsam_unstable;gtsam_unstable_header;cythonize_gtsam" # dependencies to be built before wrapping
- )
- endif()
-
- file(READ "${PROJECT_SOURCE_DIR}/cython/requirements.txt" CYTHON_INSTALL_REQUIREMENTS)
- file(READ "${PROJECT_SOURCE_DIR}/README.md" README_CONTENTS)
-
- # Install the custom-generated __init__.py
- # This is to make the build/cython/gtsam folder a python package, so gtsam can be found while wrapping gtsam_unstable
- configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam/__init__.py COPYONLY)
- configure_file(${PROJECT_SOURCE_DIR}/cython/gtsam_unstable/__init__.py ${PROJECT_BINARY_DIR}/cython/gtsam_unstable/__init__.py COPYONLY)
- configure_file(${PROJECT_SOURCE_DIR}/cython/setup.py.in ${PROJECT_BINARY_DIR}/cython/setup.py)
- install_cython_files("${PROJECT_BINARY_DIR}/cython/setup.py" "${GTSAM_CYTHON_INSTALL_PATH}")
- # install scripts and tests
- install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
- install_cython_scripts("${PROJECT_SOURCE_DIR}/cython/gtsam_unstable" "${GTSAM_CYTHON_INSTALL_PATH}" "*.py")
-
-endif ()
diff --git a/cython/README.md b/cython/README.md
deleted file mode 100644
index bc6e346d9..000000000
--- a/cython/README.md
+++ /dev/null
@@ -1,155 +0,0 @@
-# Python Wrapper
-
-This is the Cython/Python wrapper around the GTSAM C++ library.
-
-## Install
-
-- if you want to build the gtsam python library for a specific python version (eg 2.7), use the `-DGTSAM_PYTHON_VERSION=2.7` option when running `cmake` otherwise the default interpreter will be used.
- - If the interpreter is inside an environment (such as an anaconda environment or virtualenv environment) then the environment should be active while building gtsam.
-- This wrapper needs Cython(>=0.25.2), backports_abc>=0.5, and numpy. These can be installed as follows:
-
-```bash
- pip install -r /cython/requirements.txt
-```
-
-- For compatibility with gtsam's Eigen version, it contains its own cloned version of [Eigency](https://github.com/wouterboomsma/eigency.git),
-named **gtsam_eigency**, to interface between C++'s Eigen and Python's numpy.
-
-- Build and install gtsam using cmake with `GTSAM_INSTALL_CYTHON_TOOLBOX` enabled.
-The wrapped module will be installed to `GTSAM_CYTHON_INSTALL_PATH`, which is
-by default: `/cython`
-
-- To use the library without installing system-wide: modify your `PYTHONPATH` to include the `GTSAM_CYTHON_INSTALL_PATH`:
-```bash
-export PYTHONPATH=$PYTHONPATH:
-```
-- To install system-wide: run `make install` then navigate to `GTSAM_CYTHON_INSTALL_PATH` and run `python setup.py install`
- - (the same command can be used to install into a virtual environment if it is active)
- - note: if you don't want gtsam to install to a system directory such as `/usr/local`, pass `-DCMAKE_INSTALL_PREFIX="./install"` to cmake to install gtsam to a subdirectory of the build directory.
- - if you run `setup.py` from the build directory rather than the installation directory, the script will warn you with the message: `setup.py is being run from an unexpected location`.
- Before `make install` is run, not all the components of the package have been copied across, so running `setup.py` from the build directory would result in an incomplete package.
-
-## Unit Tests
-
-The Cython toolbox also has a small set of unit tests located in the
-test directory. To run them:
-
-```bash
- cd
- python -m unittest discover
-```
-
-## Writing Your Own Scripts
-
-See the tests for examples.
-
-### Some Important Notes:
-
-- Vector/Matrix:
- + GTSAM expects double-precision floating point vectors and matrices.
- Hence, you should pass numpy matrices with dtype=float, or 'float64'.
- + Also, GTSAM expects *column-major* matrices, unlike the default storage
- scheme in numpy. Hence, you should pass column-major matrices to gtsam using
- the flag order='F'. And you always get column-major matrices back.
- For more details, see: https://github.com/wouterboomsma/eigency#storage-layout---why-arrays-are-sometimes-transposed
- + Passing row-major matrices of different dtype, e.g. 'int', will also work
- as the wrapper converts them to column-major and dtype float for you,
- using numpy.array.astype(float, order='F', copy=False).
- However, this will result a copy if your matrix is not in the expected type
- and storage order.
-
-- Inner namespace: Classes in inner namespace will be prefixed by _ in Python.
-Examples: noiseModel_Gaussian, noiseModel_mEstimator_Tukey
-
-- Casting from a base class to a derive class must be done explicitly.
-Examples:
-```Python
- noiseBase = factor.noiseModel()
- noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
-```
-
-## Wrapping Your Own Project That Uses GTSAM
-
-- Set PYTHONPATH to include ${GTSAM_CYTHON_INSTALL_PATH}
- + so that it can find gtsam Cython header: gtsam/gtsam.pxd
-
-- In your CMakeList.txt
-```cmake
-find_package(GTSAM REQUIRED) # Make sure gtsam's install folder is in your PATH
-set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${GTSAM_DIR}/../GTSAMCMakeTools")
-
-# Wrap
-include(GtsamCythonWrap)
-include_directories(${GTSAM_EIGENCY_INSTALL_PATH})
-wrap_and_install_library_cython("your_project_interface.h"
- "from gtsam.gtsam cimport *" # extra import of gtsam/gtsam.pxd Cython header
- "your_install_path"
- "libraries_to_link_with_the_cython_module"
- "dependencies_which_need_to_be_built_before_the_wrapper"
- )
-#Optional: install_cython_scripts and install_cython_files. See GtsamCythonWrap.cmake.
-```
-
-## KNOWN ISSUES
-
- - Doesn't work with python3 installed from homebrew
- - size-related issue: can only wrap up to a certain number of classes: up to mEstimator!
- - Guess: 64 vs 32b? disutils Compiler flags?
- - Bug with Cython 0.24: instantiated factor classes return FastVector for keys(), which can't be casted to FastVector
- - Upgrading to 0.25 solves the problem
- - Need default constructor and default copy constructor for almost every classes... :(
- - support these constructors by default and declare "delete" for special classes?
-
-
-### TODO
-
-- [ ] allow duplication of parent' functions in child classes. Not allowed for now due to conflicts in Cython.
-- [ ] a common header for boost shared_ptr? (Or wait until everything is switched to std::shared_ptr in gtsam?)
-- [ ] inner namespaces ==> inner packages?
-- [ ] Wrap fixed-size Matrices/Vectors?
-
-
-### Completed/Cancelled:
-
-- [x] Fix Python tests: don't use " import * ": Bad style!!! (18-03-17 19:50)
-- [x] Unit tests for cython wrappers @done (18-03-17 18:45) -- simply compare generated files
-- [x] Wrap unstable @done (18-03-17 15:30)
-- [x] Unify cython/gtsam.h and the original gtsam.h @done (18-03-17 15:30)
-- [x] 18-03-17: manage to unify the two versions by removing std container stubs from the matlab version,and keeping KeyList/KeyVector/KeySet as in the matlab version. Probably Cython 0.25 fixes the casting problem.
-- [x] 06-03-17: manage to remove the requirements for default and copy constructors
-- [ ] 25-11-16: Try to unify but failed. Main reasons are: Key/size_t, std containers, KeyVector/KeyList/KeySet. Matlab doesn't need to know about Key, but I can't make Cython to ignore Key as it couldn't cast KeyVector, i.e. FastVector, to FastVector.
-- [ ] Marginal and JointMarginal: revert changes @failed (17-03-17 11:00) -- Cython does need a default constructor! It produces cpp code like this: ```gtsam::JointMarginal __pyx_t_1;``` Users don't have to wrap this constructor, however.
-- [x] Convert input numpy Matrix/Vector to float dtype and storage order 'F' automatically, cannot crash! @done (15-03-17 13:00)
-- [x] Remove requirements.txt - Frank: don't bother with only 2 packages and a special case for eigency! @done (08-03-17 10:30)
-- [x] CMake install script @done (25-11-16 02:30)
-- [ ] [REFACTOR] better name for uninstantiateClass: very vague!! @cancelled (25-11-16 02:30) -- lazy
-- [ ] forward declaration? @cancelled (23-11-16 13:00) - nothing to do, seem to work?
-- [x] wrap VariableIndex: why is it in inference? If need to, shouldn't have constructors to specific FactorGraphs @done (23-11-16 13:00)
-- [x] Global functions @done (22-11-16 21:00)
-- [x] [REFACTOR] typesEqual --> isSameSignature @done (22-11-16 21:00)
-- [x] Proper overloads (constructors, static methods, methods) @done (20-11-16 21:00)
-- [x] Allow overloading methods. The current solution is annoying!!! @done (20-11-16 21:00)
-- [x] Casting from parent and grandparents @done (16-11-16 17:00)
-- [x] Allow overloading constructors. The current solution is annoying!!! @done (16-11-16 17:00)
-- [x] Support "print obj" @done (16-11-16 17:00)
-- [x] methods for FastVector: at, [], ... @done (16-11-16 17:00)
-- [x] Cython: Key and size_t: traits doesn't exist @done (16-09-12 18:34)
-- [x] KeyVector, KeyList, KeySet... @done (16-09-13 17:19)
-- [x] [Nice to have] parse typedef @done (16-09-13 17:19)
-- [x] ctypedef at correct places @done (16-09-12 18:34)
-- [x] expand template variable type in constructor/static methods? @done (16-09-12 18:34)
-- [x] NonlinearOptimizer: copy constructor deleted!!! @done (16-09-13 17:20)
-- [x] Value: no default constructor @done (16-09-13 17:20)
-- [x] ctypedef PriorFactor[Vector] PriorFactorVector @done (16-09-19 12:25)
-- [x] Delete duplicate methods in derived class @done (16-09-12 13:38)
-- [x] Fix return properly @done (16-09-11 17:14)
-- [x] handle pair @done (16-09-11 17:14)
-- [x] Eigency: ambiguous call: A(const T&) A(const Vector& v) and Eigency A(Map[Vector]& v) @done (16-09-11 07:59)
-- [x] Eigency: Constructor: ambiguous construct from Vector/Matrix @done (16-09-11 07:59)
-- [x] Eigency: Fix method template of Vector/Matrix: template argument is [Vector] while arugment is Map[Vector] @done (16-09-11 08:22)
-- [x] Robust noise: copy assignment operator is deleted because of shared_ptr of the abstract Base class @done (16-09-10 09:05)
-- [ ] Cython: Constructor: generate default constructor? (hack: if it's serializable?) @cancelled (16-09-13 17:20)
-- [ ] Eigency: Map[] to Block @created(16-09-10 07:59) @cancelled (16-09-11 08:28)
-
-- inference before symbolic/linear
-- what's the purpose of "virtual" ??
diff --git a/cython/gtsam/__init__.py b/cython/gtsam/__init__.py
deleted file mode 100644
index d40ee4502..000000000
--- a/cython/gtsam/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from .gtsam import *
-
-try:
- import gtsam_unstable
-
-
- def _deprecated_wrapper(item, name):
- def wrapper(*args, **kwargs):
- from warnings import warn
- message = ('importing the unstable item "{}" directly from gtsam is deprecated. '.format(name) +
- 'Please import it from gtsam_unstable.')
- warn(message)
- return item(*args, **kwargs)
- return wrapper
-
-
- for name in dir(gtsam_unstable):
- if not name.startswith('__'):
- item = getattr(gtsam_unstable, name)
- if callable(item):
- item = _deprecated_wrapper(item, name)
- globals()[name] = item
-
-except ImportError:
- pass
-
diff --git a/cython/gtsam/examples/ImuFactorExample.py b/cython/gtsam/examples/ImuFactorExample.py
deleted file mode 100644
index 0e01766e7..000000000
--- a/cython/gtsam/examples/ImuFactorExample.py
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
-GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
-Atlanta, Georgia 30332-0415
-All Rights Reserved
-
-See LICENSE for the license information
-
-A script validating and demonstrating the ImuFactor inference.
-
-Author: Frank Dellaert, Varun Agrawal
-"""
-
-from __future__ import print_function
-
-import math
-
-import gtsam
-import matplotlib.pyplot as plt
-import numpy as np
-from gtsam import symbol_shorthand_B as B
-from gtsam import symbol_shorthand_V as V
-from gtsam import symbol_shorthand_X as X
-from gtsam.utils.plot import plot_pose3
-from mpl_toolkits.mplot3d import Axes3D
-
-from PreintegrationExample import POSES_FIG, PreintegrationExample
-
-BIAS_KEY = B(0)
-
-
-np.set_printoptions(precision=3, suppress=True)
-
-
-class ImuFactorExample(PreintegrationExample):
-
- def __init__(self):
- self.velocity = np.array([2, 0, 0])
- self.priorNoise = gtsam.noiseModel_Isotropic.Sigma(6, 0.1)
- self.velNoise = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
-
- # Choose one of these twists to change scenario:
- zero_twist = (np.zeros(3), np.zeros(3))
- forward_twist = (np.zeros(3), self.velocity)
- loop_twist = (np.array([0, -math.radians(30), 0]), self.velocity)
- sick_twist = (
- np.array([math.radians(30), -math.radians(30), 0]), self.velocity)
-
- accBias = np.array([-0.3, 0.1, 0.2])
- gyroBias = np.array([0.1, 0.3, -0.1])
- bias = gtsam.imuBias_ConstantBias(accBias, gyroBias)
-
- dt = 1e-2
- super(ImuFactorExample, self).__init__(sick_twist, bias, dt)
-
- def addPrior(self, i, graph):
- state = self.scenario.navState(i)
- graph.push_back(gtsam.PriorFactorPose3(
- X(i), state.pose(), self.priorNoise))
- graph.push_back(gtsam.PriorFactorVector(
- V(i), state.velocity(), self.velNoise))
-
- def run(self):
- graph = gtsam.NonlinearFactorGraph()
-
- # initialize data structure for pre-integrated IMU measurements
- pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)
-
- T = 12
- num_poses = T + 1 # assumes 1 factor per second
- initial = gtsam.Values()
- initial.insert(BIAS_KEY, self.actualBias)
- for i in range(num_poses):
- state_i = self.scenario.navState(float(i))
-
- poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1)
- pose = state_i.pose().compose(poseNoise)
-
- velocity = state_i.velocity() + np.random.randn(3)*0.1
-
- initial.insert(X(i), pose)
- initial.insert(V(i), velocity)
-
- # simulate the loop
- i = 0 # state index
- actual_state_i = self.scenario.navState(0)
- for k, t in enumerate(np.arange(0, T, self.dt)):
- # get measurements and add them to PIM
- measuredOmega = self.runner.measuredAngularVelocity(t)
- measuredAcc = self.runner.measuredSpecificForce(t)
- pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)
-
- poseNoise = gtsam.Pose3.Expmap(np.random.randn(3)*0.1)
-
- actual_state_i = gtsam.NavState(
- actual_state_i.pose().compose(poseNoise),
- actual_state_i.velocity() + np.random.randn(3)*0.1)
-
- # Plot IMU many times
- if k % 10 == 0:
- self.plotImu(t, measuredOmega, measuredAcc)
-
- # Plot every second
- if k % int(1 / self.dt) == 0:
- self.plotGroundTruthPose(t)
-
- # create IMU factor every second
- if (k + 1) % int(1 / self.dt) == 0:
- factor = gtsam.ImuFactor(X(i), V(i), X(
- i + 1), V(i + 1), BIAS_KEY, pim)
- graph.push_back(factor)
- if True:
- print(factor)
- print(pim.predict(actual_state_i, self.actualBias))
- pim.resetIntegration()
- actual_state_i = self.scenario.navState(t + self.dt)
- i += 1
-
- # add priors on beginning and end
- self.addPrior(0, graph)
- self.addPrior(num_poses - 1, graph)
-
- # optimize using Levenberg-Marquardt optimization
- params = gtsam.LevenbergMarquardtParams()
- params.setVerbosityLM("SUMMARY")
- optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
- result = optimizer.optimize()
-
- # Calculate and print marginal covariances
- marginals = gtsam.Marginals(graph, result)
- print("Covariance on bias:\n", marginals.marginalCovariance(BIAS_KEY))
- for i in range(num_poses):
- print("Covariance on pose {}:\n{}\n".format(
- i, marginals.marginalCovariance(X(i))))
- print("Covariance on vel {}:\n{}\n".format(
- i, marginals.marginalCovariance(V(i))))
-
- # Plot resulting poses
- i = 0
- while result.exists(X(i)):
- pose_i = result.atPose3(X(i))
- plot_pose3(POSES_FIG, pose_i, 0.1)
- i += 1
-
- gtsam.utils.plot.set_axes_equal(POSES_FIG)
-
- print(result.atimuBias_ConstantBias(BIAS_KEY))
-
- plt.ioff()
- plt.show()
-
-
-if __name__ == '__main__':
- ImuFactorExample().run()
diff --git a/cython/gtsam_eigency/CMakeLists.txt b/cython/gtsam_eigency/CMakeLists.txt
deleted file mode 100644
index 77bead834..000000000
--- a/cython/gtsam_eigency/CMakeLists.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-include(GtsamCythonWrap)
-
-# Copy eigency's sources to the build folder
-# so that the cython-generated header "conversions_api.h" can be found when cythonizing eigency's core
-# and eigency's cython pxd headers can be found when cythonizing gtsam
-file(COPY "." DESTINATION ".")
-set(OUTPUT_DIR "${PROJECT_BINARY_DIR}/cython/gtsam_eigency")
-set(EIGENCY_INCLUDE_DIR ${OUTPUT_DIR})
-
-# This is to make the build/cython/gtsam_eigency folder a python package
-configure_file(__init__.py.in ${PROJECT_BINARY_DIR}/cython/gtsam_eigency/__init__.py)
-
-# include eigency headers
-include_directories(${EIGENCY_INCLUDE_DIR})
-
-# Cythonize and build eigency
-message(STATUS "Cythonize and build eigency")
-# Important trick: use "../gtsam_eigency/conversions.pyx" to let cython know that the conversions module is
-# a part of the gtsam_eigency package and generate the function call import_gtsam_igency__conversions()
-# in conversions_api.h correctly!!!
-cythonize(cythonize_eigency_conversions "../gtsam_eigency/conversions.pyx" "conversions"
- "${OUTPUT_DIR}" "${EIGENCY_INCLUDE_DIR}" "" "" "")
-cythonize(cythonize_eigency_core "../gtsam_eigency/core.pyx" "core"
- ${OUTPUT_DIR} "${EIGENCY_INCLUDE_DIR}" "" "" "")
-
-# Include Eigen headers:
-target_include_directories(cythonize_eigency_conversions PUBLIC
- $
- $
-)
-target_include_directories(cythonize_eigency_core PUBLIC
- $
- $
-)
-
-add_dependencies(cythonize_eigency_core cythonize_eigency_conversions)
-add_custom_target(cythonize_eigency)
-add_dependencies(cythonize_eigency cythonize_eigency_conversions cythonize_eigency_core)
-
-# install
-install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
- DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}"
- PATTERN "CMakeLists.txt" EXCLUDE
- PATTERN "__init__.py.in" EXCLUDE)
-install(TARGETS cythonize_eigency_core cythonize_eigency_conversions
- DESTINATION "${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency")
-install(FILES ${OUTPUT_DIR}/conversions_api.h DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency)
-configure_file(__init__.py.in ${OUTPUT_DIR}/__init__.py)
-install(FILES ${OUTPUT_DIR}/__init__.py DESTINATION ${GTSAM_CYTHON_INSTALL_PATH}${GTSAM_BUILD_TAG}/gtsam_eigency)
diff --git a/cython/gtsam_eigency/LICENSE.txt b/cython/gtsam_eigency/LICENSE.txt
deleted file mode 100644
index 71743c864..000000000
--- a/cython/gtsam_eigency/LICENSE.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2016 Wouter Boomsma
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/cython/gtsam_eigency/__init__.py.in b/cython/gtsam_eigency/__init__.py.in
deleted file mode 100644
index a59d51eab..000000000
--- a/cython/gtsam_eigency/__init__.py.in
+++ /dev/null
@@ -1,13 +0,0 @@
-import os
-import numpy as np
-
-__eigen_dir__ = "${GTSAM_EIGEN_INCLUDE_FOR_INSTALL}"
-
-def get_includes(include_eigen=True):
- root = os.path.dirname(__file__)
- parent = os.path.join(root, "..")
- path = [root, parent, np.get_include()]
- if include_eigen:
- path.append(os.path.join(root, __eigen_dir__))
- return path
-
diff --git a/cython/gtsam_eigency/conversions.pxd b/cython/gtsam_eigency/conversions.pxd
deleted file mode 100644
index f4445e585..000000000
--- a/cython/gtsam_eigency/conversions.pxd
+++ /dev/null
@@ -1,62 +0,0 @@
-cimport numpy as np
-
-cdef api np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long outer_stride, long inner_stride)
-
-cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
-cdef api np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long outer_stride, long inner_stride)
-
diff --git a/cython/gtsam_eigency/conversions.pyx b/cython/gtsam_eigency/conversions.pyx
deleted file mode 100644
index 55c9ae0cd..000000000
--- a/cython/gtsam_eigency/conversions.pyx
+++ /dev/null
@@ -1,327 +0,0 @@
-cimport cython
-import numpy as np
-from numpy.lib.stride_tricks import as_strided
-
-@cython.boundscheck(False)
-cdef np.ndarray[double, ndim=2] ndarray_double_C(double *data, long rows, long cols, long row_stride, long col_stride):
- cdef double[:,:] mem_view = data
- dtype = 'double'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[double, ndim=2] ndarray_double_F(double *data, long rows, long cols, long row_stride, long col_stride):
- cdef double[::1,:] mem_view = data
- dtype = 'double'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[double, ndim=2] ndarray_copy_double_C(const double *data, long rows, long cols, long row_stride, long col_stride):
- cdef double[:,:] mem_view = data
- dtype = 'double'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[double, ndim=2] ndarray_copy_double_F(const double *data, long rows, long cols, long row_stride, long col_stride):
- cdef double[::1,:] mem_view = data
- dtype = 'double'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[float, ndim=2] ndarray_float_C(float *data, long rows, long cols, long row_stride, long col_stride):
- cdef float[:,:] mem_view = data
- dtype = 'float'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[float, ndim=2] ndarray_float_F(float *data, long rows, long cols, long row_stride, long col_stride):
- cdef float[::1,:] mem_view = data
- dtype = 'float'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[float, ndim=2] ndarray_copy_float_C(const float *data, long rows, long cols, long row_stride, long col_stride):
- cdef float[:,:] mem_view = data
- dtype = 'float'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[float, ndim=2] ndarray_copy_float_F(const float *data, long rows, long cols, long row_stride, long col_stride):
- cdef float[::1,:] mem_view = data
- dtype = 'float'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[long, ndim=2] ndarray_long_C(long *data, long rows, long cols, long row_stride, long col_stride):
- cdef long[:,:] mem_view = data
- dtype = 'int_'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[long, ndim=2] ndarray_long_F(long *data, long rows, long cols, long row_stride, long col_stride):
- cdef long[::1,:] mem_view = data
- dtype = 'int_'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[long, ndim=2] ndarray_copy_long_C(const long *data, long rows, long cols, long row_stride, long col_stride):
- cdef long[:,:] mem_view = data
- dtype = 'int_'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[long, ndim=2] ndarray_copy_long_F(const long *data, long rows, long cols, long row_stride, long col_stride):
- cdef long[::1,:] mem_view = data
- dtype = 'int_'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_C(unsigned long *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned long[:,:] mem_view = data
- dtype = 'uint'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned long, ndim=2] ndarray_ulong_F(unsigned long *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned long[::1,:] mem_view = data
- dtype = 'uint'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_C(const unsigned long *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned long[:,:] mem_view = data
- dtype = 'uint'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned long, ndim=2] ndarray_copy_ulong_F(const unsigned long *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned long[::1,:] mem_view = data
- dtype = 'uint'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[int, ndim=2] ndarray_int_C(int *data, long rows, long cols, long row_stride, long col_stride):
- cdef int[:,:] mem_view = data
- dtype = 'int'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[int, ndim=2] ndarray_int_F(int *data, long rows, long cols, long row_stride, long col_stride):
- cdef int[::1,:] mem_view = data
- dtype = 'int'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[int, ndim=2] ndarray_copy_int_C(const int *data, long rows, long cols, long row_stride, long col_stride):
- cdef int[:,:] mem_view = data
- dtype = 'int'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[int, ndim=2] ndarray_copy_int_F(const int *data, long rows, long cols, long row_stride, long col_stride):
- cdef int[::1,:] mem_view = data
- dtype = 'int'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_C(unsigned int *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned int[:,:] mem_view = data
- dtype = 'uint'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned int, ndim=2] ndarray_uint_F(unsigned int *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned int[::1,:] mem_view = data
- dtype = 'uint'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_C(const unsigned int *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned int[:,:] mem_view = data
- dtype = 'uint'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned int, ndim=2] ndarray_copy_uint_F(const unsigned int *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned int[::1,:] mem_view = data
- dtype = 'uint'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[short, ndim=2] ndarray_short_C(short *data, long rows, long cols, long row_stride, long col_stride):
- cdef short[:,:] mem_view = data
- dtype = 'short'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[short, ndim=2] ndarray_short_F(short *data, long rows, long cols, long row_stride, long col_stride):
- cdef short[::1,:] mem_view = data
- dtype = 'short'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[short, ndim=2] ndarray_copy_short_C(const short *data, long rows, long cols, long row_stride, long col_stride):
- cdef short[:,:] mem_view = data
- dtype = 'short'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[short, ndim=2] ndarray_copy_short_F(const short *data, long rows, long cols, long row_stride, long col_stride):
- cdef short[::1,:] mem_view = data
- dtype = 'short'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_C(unsigned short *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned short[:,:] mem_view = data
- dtype = 'ushort'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned short, ndim=2] ndarray_ushort_F(unsigned short *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned short[::1,:] mem_view = data
- dtype = 'ushort'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_C(const unsigned short *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned short[:,:] mem_view = data
- dtype = 'ushort'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned short, ndim=2] ndarray_copy_ushort_F(const unsigned short *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned short[::1,:] mem_view = data
- dtype = 'ushort'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[signed char, ndim=2] ndarray_schar_C(signed char *data, long rows, long cols, long row_stride, long col_stride):
- cdef signed char[:,:] mem_view = data
- dtype = 'int8'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[signed char, ndim=2] ndarray_schar_F(signed char *data, long rows, long cols, long row_stride, long col_stride):
- cdef signed char[::1,:] mem_view = data
- dtype = 'int8'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_C(const signed char *data, long rows, long cols, long row_stride, long col_stride):
- cdef signed char[:,:] mem_view = data
- dtype = 'int8'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[signed char, ndim=2] ndarray_copy_schar_F(const signed char *data, long rows, long cols, long row_stride, long col_stride):
- cdef signed char[::1,:] mem_view = data
- dtype = 'int8'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_C(unsigned char *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned char[:,:] mem_view = data
- dtype = 'uint8'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned char, ndim=2] ndarray_uchar_F(unsigned char *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned char[::1,:] mem_view = data
- dtype = 'uint8'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_C(const unsigned char *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned char[:,:] mem_view = data
- dtype = 'uint8'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[unsigned char, ndim=2] ndarray_copy_uchar_F(const unsigned char *data, long rows, long cols, long row_stride, long col_stride):
- cdef unsigned char[::1,:] mem_view = data
- dtype = 'uint8'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_C(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
- cdef np.complex128_t[:,:] mem_view = data
- dtype = 'complex128'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[np.complex128_t, ndim=2] ndarray_complex_double_F(np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
- cdef np.complex128_t[::1,:] mem_view = data
- dtype = 'complex128'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_C(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
- cdef np.complex128_t[:,:] mem_view = data
- dtype = 'complex128'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[np.complex128_t, ndim=2] ndarray_copy_complex_double_F(const np.complex128_t *data, long rows, long cols, long row_stride, long col_stride):
- cdef np.complex128_t[::1,:] mem_view = data
- dtype = 'complex128'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
-
-@cython.boundscheck(False)
-cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_C(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
- cdef np.complex64_t[:,:] mem_view = data
- dtype = 'complex64'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize])
-@cython.boundscheck(False)
-cdef np.ndarray[np.complex64_t, ndim=2] ndarray_complex_float_F(np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
- cdef np.complex64_t[::1,:] mem_view = data
- dtype = 'complex64'
- cdef int itemsize = np.dtype(dtype).itemsize
- return as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize])
-
-@cython.boundscheck(False)
-cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_C(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
- cdef np.complex64_t[:,:] mem_view = data
- dtype = 'complex64'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="C"), strides=[row_stride*itemsize, col_stride*itemsize]))
-@cython.boundscheck(False)
-cdef np.ndarray[np.complex64_t, ndim=2] ndarray_copy_complex_float_F(const np.complex64_t *data, long rows, long cols, long row_stride, long col_stride):
- cdef np.complex64_t[::1,:] mem_view = data
- dtype = 'complex64'
- cdef int itemsize = np.dtype(dtype).itemsize
- return np.copy(as_strided(np.asarray(mem_view, dtype=dtype, order="F"), strides=[row_stride*itemsize, col_stride*itemsize]))
-
diff --git a/cython/gtsam_eigency/core.pxd b/cython/gtsam_eigency/core.pxd
deleted file mode 100644
index 9a84c3c16..000000000
--- a/cython/gtsam_eigency/core.pxd
+++ /dev/null
@@ -1,917 +0,0 @@
-cimport cython
-cimport numpy as np
-
-ctypedef signed char schar;
-ctypedef unsigned char uchar;
-
-ctypedef fused dtype:
- uchar
- schar
- short
- int
- long
- float
- double
-
-ctypedef fused DenseType:
- Matrix
- Array
-
-ctypedef fused Rows:
- _1
- _2
- _3
- _4
- _5
- _6
- _7
- _8
- _9
- _10
- _11
- _12
- _13
- _14
- _15
- _16
- _17
- _18
- _19
- _20
- _21
- _22
- _23
- _24
- _25
- _26
- _27
- _28
- _29
- _30
- _31
- _32
- Dynamic
-
-ctypedef Rows Cols
-ctypedef Rows StrideOuter
-ctypedef Rows StrideInner
-
-ctypedef fused DenseTypeShort:
- Vector1i
- Vector2i
- Vector3i
- Vector4i
- VectorXi
- RowVector1i
- RowVector2i
- RowVector3i
- RowVector4i
- RowVectorXi
- Matrix1i
- Matrix2i
- Matrix3i
- Matrix4i
- MatrixXi
- Vector1f
- Vector2f
- Vector3f
- Vector4f
- VectorXf
- RowVector1f
- RowVector2f
- RowVector3f
- RowVector4f
- RowVectorXf
- Matrix1f
- Matrix2f
- Matrix3f
- Matrix4f
- MatrixXf
- Vector1d
- Vector2d
- Vector3d
- Vector4d
- VectorXd
- RowVector1d
- RowVector2d
- RowVector3d
- RowVector4d
- RowVectorXd
- Matrix1d
- Matrix2d
- Matrix3d
- Matrix4d
- MatrixXd
- Vector1cf
- Vector2cf
- Vector3cf
- Vector4cf
- VectorXcf
- RowVector1cf
- RowVector2cf
- RowVector3cf
- RowVector4cf
- RowVectorXcf
- Matrix1cf
- Matrix2cf
- Matrix3cf
- Matrix4cf
- MatrixXcf
- Vector1cd
- Vector2cd
- Vector3cd
- Vector4cd
- VectorXcd
- RowVector1cd
- RowVector2cd
- RowVector3cd
- RowVector4cd
- RowVectorXcd
- Matrix1cd
- Matrix2cd
- Matrix3cd
- Matrix4cd
- MatrixXcd
- Array22i
- Array23i
- Array24i
- Array2Xi
- Array32i
- Array33i
- Array34i
- Array3Xi
- Array42i
- Array43i
- Array44i
- Array4Xi
- ArrayX2i
- ArrayX3i
- ArrayX4i
- ArrayXXi
- Array2i
- Array3i
- Array4i
- ArrayXi
- Array22f
- Array23f
- Array24f
- Array2Xf
- Array32f
- Array33f
- Array34f
- Array3Xf
- Array42f
- Array43f
- Array44f
- Array4Xf
- ArrayX2f
- ArrayX3f
- ArrayX4f
- ArrayXXf
- Array2f
- Array3f
- Array4f
- ArrayXf
- Array22d
- Array23d
- Array24d
- Array2Xd
- Array32d
- Array33d
- Array34d
- Array3Xd
- Array42d
- Array43d
- Array44d
- Array4Xd
- ArrayX2d
- ArrayX3d
- ArrayX4d
- ArrayXXd
- Array2d
- Array3d
- Array4d
- ArrayXd
- Array22cf
- Array23cf
- Array24cf
- Array2Xcf
- Array32cf
- Array33cf
- Array34cf
- Array3Xcf
- Array42cf
- Array43cf
- Array44cf
- Array4Xcf
- ArrayX2cf
- ArrayX3cf
- ArrayX4cf
- ArrayXXcf
- Array2cf
- Array3cf
- Array4cf
- ArrayXcf
- Array22cd
- Array23cd
- Array24cd
- Array2Xcd
- Array32cd
- Array33cd
- Array34cd
- Array3Xcd
- Array42cd
- Array43cd
- Array44cd
- Array4Xcd
- ArrayX2cd
- ArrayX3cd
- ArrayX4cd
- ArrayXXcd
- Array2cd
- Array3cd
- Array4cd
- ArrayXcd
-
-ctypedef fused StorageOrder:
- RowMajor
- ColMajor
-
-ctypedef fused MapOptions:
- Aligned
- Unaligned
-
-cdef extern from "eigency_cpp.h" namespace "eigency":
-
- cdef cppclass _1 "1":
- pass
-
- cdef cppclass _2 "2":
- pass
-
- cdef cppclass _3 "3":
- pass
-
- cdef cppclass _4 "4":
- pass
-
- cdef cppclass _5 "5":
- pass
-
- cdef cppclass _6 "6":
- pass
-
- cdef cppclass _7 "7":
- pass
-
- cdef cppclass _8 "8":
- pass
-
- cdef cppclass _9 "9":
- pass
-
- cdef cppclass _10 "10":
- pass
-
- cdef cppclass _11 "11":
- pass
-
- cdef cppclass _12 "12":
- pass
-
- cdef cppclass _13 "13":
- pass
-
- cdef cppclass _14 "14":
- pass
-
- cdef cppclass _15 "15":
- pass
-
- cdef cppclass _16 "16":
- pass
-
- cdef cppclass _17 "17":
- pass
-
- cdef cppclass _18 "18":
- pass
-
- cdef cppclass _19 "19":
- pass
-
- cdef cppclass _20 "20":
- pass
-
- cdef cppclass _21 "21":
- pass
-
- cdef cppclass _22 "22":
- pass
-
- cdef cppclass _23 "23":
- pass
-
- cdef cppclass _24 "24":
- pass
-
- cdef cppclass _25 "25":
- pass
-
- cdef cppclass _26 "26":
- pass
-
- cdef cppclass _27 "27":
- pass
-
- cdef cppclass _28 "28":
- pass
-
- cdef cppclass _29 "29":
- pass
-
- cdef cppclass _30 "30":
- pass
-
- cdef cppclass _31 "31":
- pass
-
- cdef cppclass _32 "32":
- pass
-
- cdef cppclass PlainObjectBase:
- pass
-
- cdef cppclass Map[DenseTypeShort](PlainObjectBase):
- Map() except +
- Map(np.ndarray array) except +
-
- cdef cppclass FlattenedMap[DenseType, dtype, Rows, Cols]:
- FlattenedMap() except +
- FlattenedMap(np.ndarray array) except +
-
- cdef cppclass FlattenedMapWithOrder "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder]:
- FlattenedMapWithOrder() except +
- FlattenedMapWithOrder(np.ndarray array) except +
-
- cdef cppclass FlattenedMapWithStride "eigency::FlattenedMap" [DenseType, dtype, Rows, Cols, StorageOrder, MapOptions, StrideOuter, StrideInner]:
- FlattenedMapWithStride() except +
- FlattenedMapWithStride(np.ndarray array) except +
-
- cdef np.ndarray ndarray_view(PlainObjectBase &)
- cdef np.ndarray ndarray_copy(PlainObjectBase &)
- cdef np.ndarray ndarray(PlainObjectBase &)
-
-
-cdef extern from "eigency_cpp.h" namespace "Eigen":
-
- cdef cppclass Dynamic:
- pass
-
- cdef cppclass RowMajor:
- pass
-
- cdef cppclass ColMajor:
- pass
-
- cdef cppclass Aligned:
- pass
-
- cdef cppclass Unaligned:
- pass
-
- cdef cppclass Matrix(PlainObjectBase):
- pass
-
- cdef cppclass Array(PlainObjectBase):
- pass
-
- cdef cppclass VectorXd(PlainObjectBase):
- pass
-
- cdef cppclass Vector1i(PlainObjectBase):
- pass
-
- cdef cppclass Vector2i(PlainObjectBase):
- pass
-
- cdef cppclass Vector3i(PlainObjectBase):
- pass
-
- cdef cppclass Vector4i(PlainObjectBase):
- pass
-
- cdef cppclass VectorXi(PlainObjectBase):
- pass
-
- cdef cppclass RowVector1i(PlainObjectBase):
- pass
-
- cdef cppclass RowVector2i(PlainObjectBase):
- pass
-
- cdef cppclass RowVector3i(PlainObjectBase):
- pass
-
- cdef cppclass RowVector4i(PlainObjectBase):
- pass
-
- cdef cppclass RowVectorXi(PlainObjectBase):
- pass
-
- cdef cppclass Matrix1i(PlainObjectBase):
- pass
-
- cdef cppclass Matrix2i(PlainObjectBase):
- pass
-
- cdef cppclass Matrix3i(PlainObjectBase):
- pass
-
- cdef cppclass Matrix4i(PlainObjectBase):
- pass
-
- cdef cppclass MatrixXi(PlainObjectBase):
- pass
-
- cdef cppclass Vector1f(PlainObjectBase):
- pass
-
- cdef cppclass Vector2f(PlainObjectBase):
- pass
-
- cdef cppclass Vector3f(PlainObjectBase):
- pass
-
- cdef cppclass Vector4f(PlainObjectBase):
- pass
-
- cdef cppclass VectorXf(PlainObjectBase):
- pass
-
- cdef cppclass RowVector1f(PlainObjectBase):
- pass
-
- cdef cppclass RowVector2f(PlainObjectBase):
- pass
-
- cdef cppclass RowVector3f(PlainObjectBase):
- pass
-
- cdef cppclass RowVector4f(PlainObjectBase):
- pass
-
- cdef cppclass RowVectorXf(PlainObjectBase):
- pass
-
- cdef cppclass Matrix1f(PlainObjectBase):
- pass
-
- cdef cppclass Matrix2f(PlainObjectBase):
- pass
-
- cdef cppclass Matrix3f(PlainObjectBase):
- pass
-
- cdef cppclass Matrix4f(PlainObjectBase):
- pass
-
- cdef cppclass MatrixXf(PlainObjectBase):
- pass
-
- cdef cppclass Vector1d(PlainObjectBase):
- pass
-
- cdef cppclass Vector2d(PlainObjectBase):
- pass
-
- cdef cppclass Vector3d(PlainObjectBase):
- pass
-
- cdef cppclass Vector4d(PlainObjectBase):
- pass
-
- cdef cppclass VectorXd(PlainObjectBase):
- pass
-
- cdef cppclass RowVector1d(PlainObjectBase):
- pass
-
- cdef cppclass RowVector2d(PlainObjectBase):
- pass
-
- cdef cppclass RowVector3d(PlainObjectBase):
- pass
-
- cdef cppclass RowVector4d(PlainObjectBase):
- pass
-
- cdef cppclass RowVectorXd(PlainObjectBase):
- pass
-
- cdef cppclass Matrix1d(PlainObjectBase):
- pass
-
- cdef cppclass Matrix2d(PlainObjectBase):
- pass
-
- cdef cppclass Matrix3d(PlainObjectBase):
- pass
-
- cdef cppclass Matrix4d(PlainObjectBase):
- pass
-
- cdef cppclass MatrixXd(PlainObjectBase):
- pass
-
- cdef cppclass Vector1cf(PlainObjectBase):
- pass
-
- cdef cppclass Vector2cf(PlainObjectBase):
- pass
-
- cdef cppclass Vector3cf(PlainObjectBase):
- pass
-
- cdef cppclass Vector4cf(PlainObjectBase):
- pass
-
- cdef cppclass VectorXcf(PlainObjectBase):
- pass
-
- cdef cppclass RowVector1cf(PlainObjectBase):
- pass
-
- cdef cppclass RowVector2cf(PlainObjectBase):
- pass
-
- cdef cppclass RowVector3cf(PlainObjectBase):
- pass
-
- cdef cppclass RowVector4cf(PlainObjectBase):
- pass
-
- cdef cppclass RowVectorXcf(PlainObjectBase):
- pass
-
- cdef cppclass Matrix1cf(PlainObjectBase):
- pass
-
- cdef cppclass Matrix2cf(PlainObjectBase):
- pass
-
- cdef cppclass Matrix3cf(PlainObjectBase):
- pass
-
- cdef cppclass Matrix4cf(PlainObjectBase):
- pass
-
- cdef cppclass MatrixXcf(PlainObjectBase):
- pass
-
- cdef cppclass Vector1cd(PlainObjectBase):
- pass
-
- cdef cppclass Vector2cd(PlainObjectBase):
- pass
-
- cdef cppclass Vector3cd(PlainObjectBase):
- pass
-
- cdef cppclass Vector4cd(PlainObjectBase):
- pass
-
- cdef cppclass VectorXcd(PlainObjectBase):
- pass
-
- cdef cppclass RowVector1cd(PlainObjectBase):
- pass
-
- cdef cppclass RowVector2cd(PlainObjectBase):
- pass
-
- cdef cppclass RowVector3cd(PlainObjectBase):
- pass
-
- cdef cppclass RowVector4cd(PlainObjectBase):
- pass
-
- cdef cppclass RowVectorXcd(PlainObjectBase):
- pass
-
- cdef cppclass Matrix1cd(PlainObjectBase):
- pass
-
- cdef cppclass Matrix2cd(PlainObjectBase):
- pass
-
- cdef cppclass Matrix3cd(PlainObjectBase):
- pass
-
- cdef cppclass Matrix4cd(PlainObjectBase):
- pass
-
- cdef cppclass MatrixXcd(PlainObjectBase):
- pass
-
- cdef cppclass Array22i(PlainObjectBase):
- pass
-
- cdef cppclass Array23i(PlainObjectBase):
- pass
-
- cdef cppclass Array24i(PlainObjectBase):
- pass
-
- cdef cppclass Array2Xi(PlainObjectBase):
- pass
-
- cdef cppclass Array32i(PlainObjectBase):
- pass
-
- cdef cppclass Array33i(PlainObjectBase):
- pass
-
- cdef cppclass Array34i(PlainObjectBase):
- pass
-
- cdef cppclass Array3Xi(PlainObjectBase):
- pass
-
- cdef cppclass Array42i(PlainObjectBase):
- pass
-
- cdef cppclass Array43i(PlainObjectBase):
- pass
-
- cdef cppclass Array44i(PlainObjectBase):
- pass
-
- cdef cppclass Array4Xi(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX2i(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX3i(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX4i(PlainObjectBase):
- pass
-
- cdef cppclass ArrayXXi(PlainObjectBase):
- pass
-
- cdef cppclass Array2i(PlainObjectBase):
- pass
-
- cdef cppclass Array3i(PlainObjectBase):
- pass
-
- cdef cppclass Array4i(PlainObjectBase):
- pass
-
- cdef cppclass ArrayXi(PlainObjectBase):
- pass
-
- cdef cppclass Array22f(PlainObjectBase):
- pass
-
- cdef cppclass Array23f(PlainObjectBase):
- pass
-
- cdef cppclass Array24f(PlainObjectBase):
- pass
-
- cdef cppclass Array2Xf(PlainObjectBase):
- pass
-
- cdef cppclass Array32f(PlainObjectBase):
- pass
-
- cdef cppclass Array33f(PlainObjectBase):
- pass
-
- cdef cppclass Array34f(PlainObjectBase):
- pass
-
- cdef cppclass Array3Xf(PlainObjectBase):
- pass
-
- cdef cppclass Array42f(PlainObjectBase):
- pass
-
- cdef cppclass Array43f(PlainObjectBase):
- pass
-
- cdef cppclass Array44f(PlainObjectBase):
- pass
-
- cdef cppclass Array4Xf(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX2f(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX3f(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX4f(PlainObjectBase):
- pass
-
- cdef cppclass ArrayXXf(PlainObjectBase):
- pass
-
- cdef cppclass Array2f(PlainObjectBase):
- pass
-
- cdef cppclass Array3f(PlainObjectBase):
- pass
-
- cdef cppclass Array4f(PlainObjectBase):
- pass
-
- cdef cppclass ArrayXf(PlainObjectBase):
- pass
-
- cdef cppclass Array22d(PlainObjectBase):
- pass
-
- cdef cppclass Array23d(PlainObjectBase):
- pass
-
- cdef cppclass Array24d(PlainObjectBase):
- pass
-
- cdef cppclass Array2Xd(PlainObjectBase):
- pass
-
- cdef cppclass Array32d(PlainObjectBase):
- pass
-
- cdef cppclass Array33d(PlainObjectBase):
- pass
-
- cdef cppclass Array34d(PlainObjectBase):
- pass
-
- cdef cppclass Array3Xd(PlainObjectBase):
- pass
-
- cdef cppclass Array42d(PlainObjectBase):
- pass
-
- cdef cppclass Array43d(PlainObjectBase):
- pass
-
- cdef cppclass Array44d(PlainObjectBase):
- pass
-
- cdef cppclass Array4Xd(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX2d(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX3d(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX4d(PlainObjectBase):
- pass
-
- cdef cppclass ArrayXXd(PlainObjectBase):
- pass
-
- cdef cppclass Array2d(PlainObjectBase):
- pass
-
- cdef cppclass Array3d(PlainObjectBase):
- pass
-
- cdef cppclass Array4d(PlainObjectBase):
- pass
-
- cdef cppclass ArrayXd(PlainObjectBase):
- pass
-
- cdef cppclass Array22cf(PlainObjectBase):
- pass
-
- cdef cppclass Array23cf(PlainObjectBase):
- pass
-
- cdef cppclass Array24cf(PlainObjectBase):
- pass
-
- cdef cppclass Array2Xcf(PlainObjectBase):
- pass
-
- cdef cppclass Array32cf(PlainObjectBase):
- pass
-
- cdef cppclass Array33cf(PlainObjectBase):
- pass
-
- cdef cppclass Array34cf(PlainObjectBase):
- pass
-
- cdef cppclass Array3Xcf(PlainObjectBase):
- pass
-
- cdef cppclass Array42cf(PlainObjectBase):
- pass
-
- cdef cppclass Array43cf(PlainObjectBase):
- pass
-
- cdef cppclass Array44cf(PlainObjectBase):
- pass
-
- cdef cppclass Array4Xcf(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX2cf(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX3cf(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX4cf(PlainObjectBase):
- pass
-
- cdef cppclass ArrayXXcf(PlainObjectBase):
- pass
-
- cdef cppclass Array2cf(PlainObjectBase):
- pass
-
- cdef cppclass Array3cf(PlainObjectBase):
- pass
-
- cdef cppclass Array4cf(PlainObjectBase):
- pass
-
- cdef cppclass ArrayXcf(PlainObjectBase):
- pass
-
- cdef cppclass Array22cd(PlainObjectBase):
- pass
-
- cdef cppclass Array23cd(PlainObjectBase):
- pass
-
- cdef cppclass Array24cd(PlainObjectBase):
- pass
-
- cdef cppclass Array2Xcd(PlainObjectBase):
- pass
-
- cdef cppclass Array32cd(PlainObjectBase):
- pass
-
- cdef cppclass Array33cd(PlainObjectBase):
- pass
-
- cdef cppclass Array34cd(PlainObjectBase):
- pass
-
- cdef cppclass Array3Xcd(PlainObjectBase):
- pass
-
- cdef cppclass Array42cd(PlainObjectBase):
- pass
-
- cdef cppclass Array43cd(PlainObjectBase):
- pass
-
- cdef cppclass Array44cd(PlainObjectBase):
- pass
-
- cdef cppclass Array4Xcd(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX2cd(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX3cd(PlainObjectBase):
- pass
-
- cdef cppclass ArrayX4cd(PlainObjectBase):
- pass
-
- cdef cppclass ArrayXXcd(PlainObjectBase):
- pass
-
- cdef cppclass Array2cd(PlainObjectBase):
- pass
-
- cdef cppclass Array3cd(PlainObjectBase):
- pass
-
- cdef cppclass Array4cd(PlainObjectBase):
- pass
-
- cdef cppclass ArrayXcd(PlainObjectBase):
- pass
-
-
diff --git a/cython/gtsam_eigency/core.pyx b/cython/gtsam_eigency/core.pyx
deleted file mode 100644
index 8b1378917..000000000
--- a/cython/gtsam_eigency/core.pyx
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/cython/gtsam_eigency/eigency_cpp.h b/cython/gtsam_eigency/eigency_cpp.h
deleted file mode 100644
index ce303182e..000000000
--- a/cython/gtsam_eigency/eigency_cpp.h
+++ /dev/null
@@ -1,504 +0,0 @@
-#include
-
-#include
-#include
-#include
-
-typedef ::std::complex< double > __pyx_t_double_complex;
-typedef ::std::complex< float > __pyx_t_float_complex;
-
-#include "conversions_api.h"
-
-#ifndef EIGENCY_CPP
-#define EIGENCY_CPP
-
-namespace eigency {
-
-template
-inline PyArrayObject *_ndarray_view(Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0);
-template
-inline PyArrayObject *_ndarray_copy(const Scalar *, long rows, long cols, bool is_row_major, long outer_stride=0, long inner_stride=0);
-
-// Strides:
-// Eigen and numpy differ in their way of dealing with strides. Eigen has the concept of outer and
-// inner strides, which are dependent on whether the array/matrix is row-major of column-major:
-// Inner stride: denotes the offset between succeeding elements in each row (row-major) or column (column-major).
-// Outer stride: denotes the offset between succeeding rows (row-major) or succeeding columns (column-major).
-// In contrast, numpy's stride is simply a measure of how fast each dimension should be incremented.
-// Consequently, a switch in numpy storage order from row-major to column-major involves a switch
-// in strides, while it does not affect the stride in Eigen.
-template<>
-inline PyArrayObject *_ndarray_view(double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major) {
- // Eigen row-major mode: row_stride=outer_stride, and col_stride=inner_stride
- // If no stride is given, the row_stride is set to the number of columns.
- return ndarray_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- } else {
- // Eigen column-major mode: row_stride=outer_stride, and col_stride=inner_stride
- // If no stride is given, the cow_stride is set to the number of rows.
- return ndarray_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
- }
-}
-template<>
-inline PyArrayObject *_ndarray_copy(const double *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view(float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy(const float *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view(long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy(const long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_long_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_long_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view(unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy(const unsigned long *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_ulong_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_ulong_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view(int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy(const int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_int_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_int_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view(unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy(const unsigned int *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_uint_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_uint_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view(short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy(const short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_short_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_short_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view(unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy(const unsigned short *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_ushort_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_ushort_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view(signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy(const signed char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_schar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_schar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view(unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy(const unsigned char *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_uchar_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_uchar_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view >(std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy >(const std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_complex_double_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_complex_double_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-template<>
-inline PyArrayObject *_ndarray_view >(std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-template<>
-inline PyArrayObject *_ndarray_copy >(const std::complex *data, long rows, long cols, bool is_row_major, long outer_stride, long inner_stride) {
- if (is_row_major)
- return ndarray_copy_complex_float_C(data, rows, cols, outer_stride>0?outer_stride:cols, inner_stride>0?inner_stride:1);
- else
- return ndarray_copy_complex_float_F(data, rows, cols, inner_stride>0?inner_stride:1, outer_stride>0?outer_stride:rows);
-}
-
-
-template
-inline PyArrayObject *ndarray(Eigen::PlainObjectBase &m) {
- import_gtsam_eigency__conversions();
- return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor);
-}
-// If C++11 is available, check if m is an r-value reference, in
-// which case a copy should always be made
-#if __cplusplus >= 201103L
-template
-inline PyArrayObject *ndarray(Eigen::PlainObjectBase &&m) {
- import_gtsam_eigency__conversions();
- return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor);
-}
-#endif
-template
-inline PyArrayObject *ndarray(const Eigen::PlainObjectBase &m) {
- import_gtsam_eigency__conversions();
- return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor);
-}
-template
-inline PyArrayObject *ndarray_view(Eigen::PlainObjectBase &m) {
- import_gtsam_eigency__conversions();
- return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor);
-}
-template
-inline PyArrayObject *ndarray_view(const Eigen::PlainObjectBase &m) {
- import_gtsam_eigency__conversions();
- return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor);
-}
-template
-inline PyArrayObject *ndarray_copy(const Eigen::PlainObjectBase &m) {
- import_gtsam_eigency__conversions();
- return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor);
-}
-
-template
-inline PyArrayObject *ndarray(Eigen::Map &m) {
- import_gtsam_eigency__conversions();
- return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
-}
-template
-inline PyArrayObject *ndarray(const Eigen::Map &m) {
- import_gtsam_eigency__conversions();
- // Since this is a map, we assume that ownership is correctly taken care
- // of, and we avoid taking a copy
- return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
-}
-template
-inline PyArrayObject *ndarray_view(Eigen::Map &m) {
- import_gtsam_eigency__conversions();
- return _ndarray_view(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
-}
-template
-inline PyArrayObject *ndarray_view(const Eigen::Map &m) {
- import_gtsam_eigency__conversions();
- return _ndarray_view(const_cast(m.data()), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
-}
-template
-inline PyArrayObject *ndarray_copy(const Eigen::Map &m) {
- import_gtsam_eigency__conversions();
- return _ndarray_copy(m.data(), m.rows(), m.cols(), m.IsRowMajor, m.outerStride(), m.innerStride());
-}
-
-
-template >
-class MapBase: public Eigen::Map {
-public:
- typedef Eigen::Map Base;
- typedef typename Base::Scalar Scalar;
-
- MapBase(Scalar* data,
- long rows,
- long cols,
- _StrideType stride=_StrideType())
- : Base(data,
- // If both dimensions are dynamic or dimensions match, accept dimensions as they are
- ((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) ||
- (Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols))
- ? rows
- // otherwise, test if swapping them makes them fit
- : ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows)
- ? cols
- : rows),
- ((Base::RowsAtCompileTime==Eigen::Dynamic && Base::ColsAtCompileTime==Eigen::Dynamic) ||
- (Base::RowsAtCompileTime==rows && Base::ColsAtCompileTime==cols))
- ? cols
- : ((Base::RowsAtCompileTime==cols || Base::ColsAtCompileTime==rows)
- ? rows
- : cols),
- stride
- ) {}
-
- MapBase &operator=(const MatrixType &other) {
- Base::operator=(other);
- return *this;
- }
-
- virtual ~MapBase() { }
-};
-
-
-template class EigencyDenseBase,
- typename Scalar,
- int _Rows, int _Cols,
- int _Options = Eigen::AutoAlign |
-#if defined(__GNUC__) && __GNUC__==3 && __GNUC_MINOR__==4
- // workaround a bug in at least gcc 3.4.6
- // the innermost ?: ternary operator is misparsed. We write it slightly
- // differently and this makes gcc 3.4.6 happy, but it's ugly.
- // The error would only show up with EIGEN_DEFAULT_TO_ROW_MAJOR is defined
- // (when EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION is RowMajor)
- ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
-// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19
-#if EIGEN_VERSION_AT_LEAST(3,2,90)
- : !(_Cols==1 && _Rows!=1) ? EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
-#else
- : !(_Cols==1 && _Rows!=1) ? Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION
-#endif
- : ColMajor ),
-#else
- ( (_Rows==1 && _Cols!=1) ? Eigen::RowMajor
- : (_Cols==1 && _Rows!=1) ? Eigen::ColMajor
-// EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION contains explicit namespace since Eigen 3.1.19
-#if EIGEN_VERSION_AT_LEAST(3,2,90)
- : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
-#else
- : Eigen::EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
-#endif
-#endif
- int _MapOptions = Eigen::Unaligned,
- int _StrideOuter=0, int _StrideInner=0,
- int _MaxRows = _Rows,
- int _MaxCols = _Cols>
-class FlattenedMap: public MapBase, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > {
-public:
- typedef MapBase, _MapOptions, Eigen::Stride<_StrideOuter, _StrideInner> > Base;
-
- FlattenedMap()
- : Base(NULL, 0, 0),
- object_(NULL) {}
-
- FlattenedMap(Scalar *data, long rows, long cols, long outer_stride=0, long inner_stride=0)
- : Base(data, rows, cols,
- Eigen::Stride<_StrideOuter, _StrideInner>(outer_stride, inner_stride)),
- object_(NULL) {
- }
-
- FlattenedMap(PyArrayObject *object)
- : Base((Scalar *)((PyArrayObject*)object)->data,
- // : Base(_from_numpy((PyArrayObject*)object),
- (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1,
- (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0],
- Eigen::Stride<_StrideOuter, _StrideInner>(_StrideOuter != Eigen::Dynamic ? _StrideOuter : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[0] : 1,
- _StrideInner != Eigen::Dynamic ? _StrideInner : (((PyArrayObject*)object)->nd == 2) ? ((PyArrayObject*)object)->dimensions[1] : ((PyArrayObject*)object)->dimensions[0])),
- object_(object) {
-
- if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object))
- throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map.");
-
- Py_XINCREF(object_);
- }
- FlattenedMap &operator=(const FlattenedMap &other) {
- if (other.object_) {
- new (this) FlattenedMap(other.object_);
- } else {
- // Replace the memory that we point to (not a memory allocation)
- new (this) FlattenedMap(const_cast(other.data()),
- other.rows(),
- other.cols(),
- other.outerStride(),
- other.innerStride());
- }
-
- return *this;
- }
-
- operator Base() const {
- return static_cast(*this);
- }
-
- operator Base&() const {
- return static_cast(*this);
- }
-
- operator EigencyDenseBase() const {
- return EigencyDenseBase(static_cast(*this));
- }
-
- virtual ~FlattenedMap() {
- Py_XDECREF(object_);
- }
-
-private:
- PyArrayObject * const object_;
-};
-
-
-template
-class Map: public MapBase {
-public:
- typedef MapBase Base;
- typedef typename MatrixType::Scalar Scalar;
-
- enum {
- RowsAtCompileTime = Base::Base::RowsAtCompileTime,
- ColsAtCompileTime = Base::Base::ColsAtCompileTime
- };
-
- Map()
- : Base(NULL,
- (RowsAtCompileTime == Eigen::Dynamic) ? 0 : RowsAtCompileTime,
- (ColsAtCompileTime == Eigen::Dynamic) ? 0 : ColsAtCompileTime),
- object_(NULL) {
- }
-
- Map(Scalar *data, long rows, long cols)
- : Base(data, rows, cols),
- object_(NULL) {}
-
- Map(PyArrayObject *object)
- : Base((PyObject*)object == Py_None? NULL: (Scalar *)object->data,
- // ROW: If array is in row-major order, transpose (see README)
- (PyObject*)object == Py_None? 0 :
- (!PyArray_IS_F_CONTIGUOUS(object)
- ? ((object->nd == 1)
- ? 1 // ROW: If 1D row-major numpy array, set to 1 (row vector)
- : object->dimensions[1])
- : object->dimensions[0]),
- // COLUMN: If array is in row-major order: transpose (see README)
- (PyObject*)object == Py_None? 0 :
- (!PyArray_IS_F_CONTIGUOUS(object)
- ? object->dimensions[0]
- : ((object->nd == 1)
- ? 1 // COLUMN: If 1D col-major numpy array, set to length (column vector)
- : object->dimensions[1]))),
- object_(object) {
-
- if (((PyObject*)object != Py_None) && !PyArray_ISONESEGMENT(object))
- throw std::invalid_argument("Numpy array must be a in one contiguous segment to be able to be transferred to a Eigen Map.");
- Py_XINCREF(object_);
- }
-
- Map &operator=(const Map &other) {
- if (other.object_) {
- new (this) Map(other.object_);
- } else {
- // Replace the memory that we point to (not a memory allocation)
- new (this) Map(const_cast(other.data()),
- other.rows(),
- other.cols());
- }
-
- return *this;
- }
-
- Map &operator=(const MatrixType &other) {
- MapBase::operator=(other);
- return *this;
- }
-
- operator Base() const {
- return static_cast(*this);
- }
-
- operator Base&() const {
- return static_cast(*this);
- }
-
- operator MatrixType() const {
- return MatrixType(static_cast(*this));
- }
-
- virtual ~Map() {
- Py_XDECREF(object_);
- }
-
-private:
- PyArrayObject * const object_;
-};
-
-
-}
-
-#endif
-
-
-
diff --git a/cython/requirements.txt b/cython/requirements.txt
deleted file mode 100644
index cd77b097d..000000000
--- a/cython/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-Cython>=0.25.2
-backports_abc>=0.5
-numpy>=1.12.0
diff --git a/debian/README.md b/debian/README.md
deleted file mode 100644
index 74eb351cd..000000000
--- a/debian/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# How to build a GTSAM debian package
-
-To use the ``debuild`` command, install the ``devscripts`` package
-
- sudo apt install devscripts
-
-Change into the gtsam directory, then run:
-
- debuild -us -uc -j4
-
-Adjust the ``-j4`` depending on how many CPUs you want to build on in
-parallel.
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index ef5d5ab97..000000000
--- a/debian/changelog
+++ /dev/null
@@ -1,5 +0,0 @@
-gtsam (4.0.0-1berndpfrommer) bionic; urgency=medium
-
- * initial release
-
- -- Bernd Pfrommer Wed, 18 Jul 2018 20:36:44 -0400
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index ec635144f..000000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/debian/control b/debian/control
deleted file mode 100644
index 9b3ae5308..000000000
--- a/debian/control
+++ /dev/null
@@ -1,15 +0,0 @@
-Source: gtsam
-Section: libs
-Priority: optional
-Maintainer: Frank Dellaert
-Uploaders: Jose Luis Blanco Claraco , Bernd Pfrommer
-Build-Depends: cmake, libboost-all-dev (>= 1.58), libeigen3-dev, libtbb-dev, debhelper (>=9)
-Standards-Version: 3.9.7
-Homepage: https://github.com/borglab/gtsam
-Vcs-Browser: https://github.com/borglab/gtsam
-
-Package: libgtsam-dev
-Architecture: any
-Depends: ${shlibs:Depends}, ${misc:Depends}, libboost-serialization-dev, libboost-system-dev, libboost-filesystem-dev, libboost-thread-dev, libboost-program-options-dev, libboost-date-time-dev, libboost-timer-dev, libboost-chrono-dev, libboost-regex-dev
-Description: Georgia Tech Smoothing and Mapping Library
- gtsam: Georgia Tech Smoothing and Mapping library for SLAM type applications
diff --git a/debian/copyright b/debian/copyright
deleted file mode 100644
index c2f41d83d..000000000
--- a/debian/copyright
+++ /dev/null
@@ -1,15 +0,0 @@
-Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: gtsam
-Source: https://bitbucket.org/gtborg/gtsam.git
-
-Files: *
-Copyright: 2017, Frank Dellaert
-License: BSD
-
-Files: gtsam/3rdparty/CCOLAMD/*
-Copyright: 2005-2011, Univ. of Florida. Authors: Timothy A. Davis, Sivasankaran Rajamanickam, and Stefan Larimore. Closely based on COLAMD by Davis, Stefan Larimore, in collaboration with Esmond Ng, and John Gilbert. http://www.cise.ufl.edu/research/sparse
-License: GNU LESSER GENERAL PUBLIC LICENSE
-
-Files: gtsam/3rdparty/Eigen/*
-Copyright: 2017, Multiple Authors
-License: MPL2
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index fab798f6e..000000000
--- a/debian/rules
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/make -f
-# See debhelper(7) (uncomment to enable)
-# output every command that modifies files on the build system.
-export DH_VERBOSE = 1
-
-# Makefile target name for running unit tests:
-GTSAM_TEST_TARGET = check
-
-# see FEATURE AREAS in dpkg-buildflags(1)
-#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
-
-# see ENVIRONMENT in dpkg-buildflags(1)
-# package maintainers to append CFLAGS
-#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
-# package maintainers to append LDFLAGS
-#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
-
-%:
- dh $@ --parallel
-
-# dh_make generated override targets
-# This is example for Cmake (See https://bugs.debian.org/641051 )
-override_dh_auto_configure:
- dh_auto_configure -- -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DGTSAM_BUILD_TESTS=ON -DGTSAM_BUILD_WRAP=OFF -DGTSAM_BUILD_DOCS=OFF -DGTSAM_INSTALL_CPPUNITLITE=OFF -DGTSAM_INSTALL_GEOGRAPHICLIB=OFF -DGTSAM_BUILD_TYPE_POSTFIXES=OFF -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF
-
-override_dh_auto_test-arch:
- # Tests for arch-dependent :
- echo "[override_dh_auto_test-arch]"
- dh_auto_build -O--buildsystem=cmake -- $(GTSAM_TEST_TARGET)
diff --git a/debian/source/format b/debian/source/format
deleted file mode 100644
index 163aaf8d8..000000000
--- a/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index d5c969a8a..fd7f4e5f6 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -1188,7 +1188,7 @@ USE_MATHJAX = YES
# MathJax, but it is strongly recommended to install a local copy of MathJax
# before deployment.
-MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+MATHJAX_RELPATH = https://cdn.mathjax.org/mathjax/latest
# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension
# names that should be enabled during MathJax rendering.
diff --git a/doc/gtsam-coordinate-frames.lyx b/doc/gtsam-coordinate-frames.lyx
index 33d0dd977..cfb44696b 100644
--- a/doc/gtsam-coordinate-frames.lyx
+++ b/doc/gtsam-coordinate-frames.lyx
@@ -2291,15 +2291,11 @@ uncalibration
used in the residual).
\end_layout
-\begin_layout Standard
-\begin_inset Note Note
-status collapsed
-
\begin_layout Section
Noise models of prior factors
\end_layout
-\begin_layout Plain Layout
+\begin_layout Standard
The simplest way to describe noise models is by an example.
Let's take a prior factor on a 3D pose
\begin_inset Formula $x\in\SE 3$
@@ -2353,7 +2349,7 @@ e\left(x\right)=\norm{h\left(x\right)}_{\Sigma}^{2}=h\left(x\right)^{\t}\Sigma^{
useful answer out quickly ]
\end_layout
-\begin_layout Plain Layout
+\begin_layout Standard
The density induced by a noise model on the prior factor is Gaussian in
the tangent space about the linearization point.
Suppose that the pose is linearized at
@@ -2431,7 +2427,7 @@ Here we see that the update
.
\end_layout
-\begin_layout Plain Layout
+\begin_layout Standard
This means that to draw random pose samples, we actually draw random samples
of
\begin_inset Formula $\delta x$
@@ -2456,7 +2452,7 @@ This means that to draw random pose samples, we actually draw random samples
Noise models of between factors
\end_layout
-\begin_layout Plain Layout
+\begin_layout Standard
The noise model of a BetweenFactor is a bit more complicated.
The unwhitened error is
\begin_inset Formula
@@ -2516,11 +2512,6 @@ e\left(\delta x_{1}\right) & \approx\norm{\log\left(z^{-1}\left(x_{1}\exp\delta
\end_inset
-\end_layout
-
-\end_inset
-
-
\end_layout
\end_body
diff --git a/doc/gtsam-coordinate-frames.pdf b/doc/gtsam-coordinate-frames.pdf
index 3613ef0ac..77910b4cf 100644
Binary files a/doc/gtsam-coordinate-frames.pdf and b/doc/gtsam-coordinate-frames.pdf differ
diff --git a/doc/robust.pdf b/doc/robust.pdf
new file mode 100644
index 000000000..67b853f44
Binary files /dev/null and b/doc/robust.pdf differ
diff --git a/docker/README.md b/docker/README.md
new file mode 100644
index 000000000..0c136f94c
--- /dev/null
+++ b/docker/README.md
@@ -0,0 +1,21 @@
+# Instructions
+
+Build all docker images, in order:
+
+```bash
+(cd ubuntu-boost-tbb && ./build.sh)
+(cd ubuntu-gtsam && ./build.sh)
+(cd ubuntu-gtsam-python && ./build.sh)
+(cd ubuntu-gtsam-python-vnc && ./build.sh)
+```
+
+Then launch with:
+
+ docker run -p 5900:5900 dellaert/ubuntu-gtsam-python-vnc:bionic
+
+Then open a remote VNC X client, for example:
+
+ sudo apt-get install tigervnc-viewer
+ xtigervncviewer :5900
+
+
diff --git a/docker/ubuntu-boost-tbb-eigen3/Dockerfile b/docker/ubuntu-boost-tbb-eigen3/Dockerfile
deleted file mode 100644
index 33aa1ab96..000000000
--- a/docker/ubuntu-boost-tbb-eigen3/Dockerfile
+++ /dev/null
@@ -1,18 +0,0 @@
-# Get the base Ubuntu image from Docker Hub
-FROM ubuntu:bionic
-
-# Update apps on the base image
-RUN apt-get -y update && apt-get install -y
-
-# Install C++
-RUN apt-get -y install build-essential
-
-# Install boost and cmake
-RUN apt-get -y install libboost-all-dev cmake
-
-# Install TBB
-RUN apt-get -y install libtbb-dev
-
-# Install latest Eigen
-RUN apt-get install -y libeigen3-dev
-
diff --git a/docker/ubuntu-boost-tbb/Dockerfile b/docker/ubuntu-boost-tbb/Dockerfile
new file mode 100644
index 000000000..9f6eea3b8
--- /dev/null
+++ b/docker/ubuntu-boost-tbb/Dockerfile
@@ -0,0 +1,19 @@
+# Basic Ubuntu 18.04 image with Boost and TBB installed. To be used for building further downstream packages.
+
+# Get the base Ubuntu image from Docker Hub
+FROM ubuntu:bionic
+
+# Disable GUI prompts
+ENV DEBIAN_FRONTEND noninteractive
+
+# Update apps on the base image
+RUN apt-get -y update && apt-get -y install
+
+# Install C++
+RUN apt-get -y install build-essential apt-utils
+
+# Install boost and cmake
+RUN apt-get -y install libboost-all-dev cmake
+
+# Install TBB
+RUN apt-get -y install libtbb-dev
diff --git a/docker/ubuntu-boost-tbb/build.sh b/docker/ubuntu-boost-tbb/build.sh
new file mode 100755
index 000000000..2dac4c3db
--- /dev/null
+++ b/docker/ubuntu-boost-tbb/build.sh
@@ -0,0 +1,3 @@
+# Build command for Docker image
+# TODO(dellaert): use docker compose and/or cmake
+docker build --no-cache -t dellaert/ubuntu-boost-tbb:bionic .
diff --git a/docker/ubuntu-gtsam-python-vnc/Dockerfile b/docker/ubuntu-gtsam-python-vnc/Dockerfile
new file mode 100644
index 000000000..61ecd9b9a
--- /dev/null
+++ b/docker/ubuntu-gtsam-python-vnc/Dockerfile
@@ -0,0 +1,20 @@
+# This GTSAM image connects to the host X-server via VNC to provide a Graphical User Interface for interaction.
+
+# Get the base Ubuntu/GTSAM image from Docker Hub
+FROM dellaert/ubuntu-gtsam-python:bionic
+
+# Things needed to get a python GUI
+ENV DEBIAN_FRONTEND noninteractive
+RUN apt install -y python-tk
+RUN python3 -m pip install matplotlib
+
+# Install a VNC X-server, Frame buffer, and windows manager
+RUN apt install -y x11vnc xvfb fluxbox
+
+# Finally, install wmctrl needed for bootstrap script
+RUN apt install -y wmctrl
+
+# Copy bootstrap script and make sure it runs
+COPY bootstrap.sh /
+
+CMD '/bootstrap.sh'
diff --git a/docker/ubuntu-gtsam-python-vnc/bootstrap.sh b/docker/ubuntu-gtsam-python-vnc/bootstrap.sh
new file mode 100755
index 000000000..21356138f
--- /dev/null
+++ b/docker/ubuntu-gtsam-python-vnc/bootstrap.sh
@@ -0,0 +1,111 @@
+#!/bin/bash
+
+# Based on: http://www.richud.com/wiki/Ubuntu_Fluxbox_GUI_with_x11vnc_and_Xvfb
+
+main() {
+ log_i "Starting xvfb virtual display..."
+ launch_xvfb
+ log_i "Starting window manager..."
+ launch_window_manager
+ log_i "Starting VNC server..."
+ run_vnc_server
+}
+
+launch_xvfb() {
+ local xvfbLockFilePath="/tmp/.X1-lock"
+ if [ -f "${xvfbLockFilePath}" ]
+ then
+ log_i "Removing xvfb lock file '${xvfbLockFilePath}'..."
+ if ! rm -v "${xvfbLockFilePath}"
+ then
+ log_e "Failed to remove xvfb lock file"
+ exit 1
+ fi
+ fi
+
+ # Set defaults if the user did not specify envs.
+ export DISPLAY=${XVFB_DISPLAY:-:1}
+ local screen=${XVFB_SCREEN:-0}
+ local resolution=${XVFB_RESOLUTION:-1280x960x24}
+ local timeout=${XVFB_TIMEOUT:-5}
+
+ # Start and wait for either Xvfb to be fully up or we hit the timeout.
+ Xvfb ${DISPLAY} -screen ${screen} ${resolution} &
+ local loopCount=0
+ until xdpyinfo -display ${DISPLAY} > /dev/null 2>&1
+ do
+ loopCount=$((loopCount+1))
+ sleep 1
+ if [ ${loopCount} -gt ${timeout} ]
+ then
+ log_e "xvfb failed to start"
+ exit 1
+ fi
+ done
+}
+
+launch_window_manager() {
+ local timeout=${XVFB_TIMEOUT:-5}
+
+ # Start and wait for either fluxbox to be fully up or we hit the timeout.
+ fluxbox &
+ local loopCount=0
+ until wmctrl -m > /dev/null 2>&1
+ do
+ loopCount=$((loopCount+1))
+ sleep 1
+ if [ ${loopCount} -gt ${timeout} ]
+ then
+ log_e "fluxbox failed to start"
+ exit 1
+ fi
+ done
+}
+
+run_vnc_server() {
+ local passwordArgument='-nopw'
+
+ if [ -n "${VNC_SERVER_PASSWORD}" ]
+ then
+ local passwordFilePath="${HOME}/.x11vnc.pass"
+ if ! x11vnc -storepasswd "${VNC_SERVER_PASSWORD}" "${passwordFilePath}"
+ then
+ log_e "Failed to store x11vnc password"
+ exit 1
+ fi
+ passwordArgument=-"-rfbauth ${passwordFilePath}"
+ log_i "The VNC server will ask for a password"
+ else
+ log_w "The VNC server will NOT ask for a password"
+ fi
+
+ x11vnc -ncache 10 -ncache_cr -display ${DISPLAY} -forever ${passwordArgument} &
+ wait $!
+}
+
+log_i() {
+ log "[INFO] ${@}"
+}
+
+log_w() {
+ log "[WARN] ${@}"
+}
+
+log_e() {
+ log "[ERROR] ${@}"
+}
+
+log() {
+ echo "[$(date '+%Y-%m-%d %H:%M:%S')] ${@}"
+}
+
+control_c() {
+ echo ""
+ exit
+}
+
+trap control_c SIGINT SIGTERM SIGHUP
+
+main
+
+exit
diff --git a/docker/ubuntu-gtsam-python-vnc/build.sh b/docker/ubuntu-gtsam-python-vnc/build.sh
new file mode 100755
index 000000000..8d280252f
--- /dev/null
+++ b/docker/ubuntu-gtsam-python-vnc/build.sh
@@ -0,0 +1,4 @@
+# Build command for Docker image
+# TODO(dellaert): use docker compose and/or cmake
+# Needs to be run in docker/ubuntu-gtsam-python-vnc directory
+docker build -t dellaert/ubuntu-gtsam-python-vnc:bionic .
diff --git a/docker/ubuntu-gtsam-python-vnc/vnc.sh b/docker/ubuntu-gtsam-python-vnc/vnc.sh
new file mode 100755
index 000000000..c0ab692c6
--- /dev/null
+++ b/docker/ubuntu-gtsam-python-vnc/vnc.sh
@@ -0,0 +1,5 @@
+# After running this script, connect VNC client to 0.0.0.0:5900
+docker run -it \
+ --workdir="/usr/src/gtsam" \
+ -p 5900:5900 \
+ dellaert/ubuntu-gtsam-python-vnc:bionic
\ No newline at end of file
diff --git a/docker/ubuntu-gtsam-python/Dockerfile b/docker/ubuntu-gtsam-python/Dockerfile
new file mode 100644
index 000000000..ce5d8fdca
--- /dev/null
+++ b/docker/ubuntu-gtsam-python/Dockerfile
@@ -0,0 +1,31 @@
+# GTSAM Ubuntu image with Python wrapper support.
+
+# Get the base Ubuntu/GTSAM image from Docker Hub
+FROM dellaert/ubuntu-gtsam:bionic
+
+# Install pip
+RUN apt-get install -y python3-pip python3-dev
+
+# Install python wrapper requirements
+RUN python3 -m pip install -U -r /usr/src/gtsam/python/requirements.txt
+
+# Run cmake again, now with python toolbox on
+WORKDIR /usr/src/gtsam/build
+RUN cmake \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DGTSAM_WITH_EIGEN_MKL=OFF \
+ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
+ -DGTSAM_BUILD_TIMING_ALWAYS=OFF \
+ -DGTSAM_BUILD_TESTS=OFF \
+ -DGTSAM_BUILD_PYTHON=ON \
+ -DGTSAM_PYTHON_VERSION=3\
+ ..
+
+# Build again, as ubuntu-gtsam image cleaned
+RUN make -j4 install && make clean
+
+# Needed to run python wrapper:
+RUN echo 'export PYTHONPATH=/usr/local/python/:$PYTHONPATH' >> /root/.bashrc
+
+# Run bash
+CMD ["bash"]
diff --git a/docker/ubuntu-gtsam-python/build.sh b/docker/ubuntu-gtsam-python/build.sh
new file mode 100755
index 000000000..1696f6c61
--- /dev/null
+++ b/docker/ubuntu-gtsam-python/build.sh
@@ -0,0 +1,3 @@
+# Build command for Docker image
+# TODO(dellaert): use docker compose and/or cmake
+docker build --no-cache -t dellaert/ubuntu-gtsam-python:bionic .
diff --git a/docker/ubuntu-gtsam/Dockerfile b/docker/ubuntu-gtsam/Dockerfile
new file mode 100644
index 000000000..f2b476f15
--- /dev/null
+++ b/docker/ubuntu-gtsam/Dockerfile
@@ -0,0 +1,35 @@
+# Ubuntu image with GTSAM installed. Configured with Boost and TBB support.
+
+# Get the base Ubuntu image from Docker Hub
+FROM dellaert/ubuntu-boost-tbb:bionic
+
+# Install git
+RUN apt-get update && \
+ apt-get install -y git
+
+# Install compiler
+RUN apt-get install -y build-essential
+
+# Clone GTSAM (develop branch)
+WORKDIR /usr/src/
+RUN git clone --single-branch --branch develop https://github.com/borglab/gtsam.git
+
+# Change to build directory. Will be created automatically.
+WORKDIR /usr/src/gtsam/build
+# Run cmake
+RUN cmake \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DGTSAM_WITH_EIGEN_MKL=OFF \
+ -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF \
+ -DGTSAM_BUILD_TIMING_ALWAYS=OFF \
+ -DGTSAM_BUILD_TESTS=OFF \
+ ..
+
+# Build
+RUN make -j4 install && make clean
+
+# Needed to link with GTSAM
+RUN echo 'export LD_LIBRARY_PATH=/usr/local/lib:LD_LIBRARY_PATH' >> /root/.bashrc
+
+# Run bash
+CMD ["bash"]
diff --git a/docker/ubuntu-gtsam/build.sh b/docker/ubuntu-gtsam/build.sh
new file mode 100755
index 000000000..bf545e9c2
--- /dev/null
+++ b/docker/ubuntu-gtsam/build.sh
@@ -0,0 +1,3 @@
+# Build command for Docker image
+# TODO(dellaert): use docker compose and/or cmake
+docker build --no-cache -t dellaert/ubuntu-gtsam:bionic .
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index 7251c2b6f..476f4ae21 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -1,7 +1,4 @@
set (excluded_examples
- DiscreteBayesNet_FG.cpp
- UGM_chain.cpp
- UGM_small.cpp
elaboratePoint2KalmanFilter.cpp
)
diff --git a/examples/CameraResectioning.cpp b/examples/CameraResectioning.cpp
index b12418098..7ac2de8b1 100644
--- a/examples/CameraResectioning.cpp
+++ b/examples/CameraResectioning.cpp
@@ -46,8 +46,8 @@ class ResectioningFactor: public NoiseModelFactor1 {
}
/// evaluate the error
- virtual Vector evaluateError(const Pose3& pose, boost::optional H =
- boost::none) const {
+ Vector evaluateError(const Pose3& pose, boost::optional H =
+ boost::none) const override {
PinholeCamera camera(pose, *K_);
return camera.project(P_, H, boost::none, boost::none) - p_;
}
diff --git a/examples/CombinedImuFactorsExample.cpp b/examples/CombinedImuFactorsExample.cpp
new file mode 100644
index 000000000..c9646e64d
--- /dev/null
+++ b/examples/CombinedImuFactorsExample.cpp
@@ -0,0 +1,303 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file CombinedImuFactorsExample
+ * @brief Test example for using GTSAM ImuCombinedFactor
+ * navigation code.
+ * @author Varun Agrawal
+ */
+
+/**
+ * Example of use of the CombinedImuFactor in
+ * conjunction with GPS
+ * - we read IMU and GPS data from a CSV file, with the following format:
+ * A row starting with "i" is the first initial position formatted with
+ * N, E, D, qx, qY, qZ, qW, velN, velE, velD
+ * A row starting with "0" is an imu measurement
+ * linAccN, linAccE, linAccD, angVelN, angVelE, angVelD
+ * A row starting with "1" is a gps correction formatted with
+ * N, E, D, qX, qY, qZ, qW
+ * Note that for GPS correction, we're only using the position not the
+ * rotation. The rotation is provided in the file for ground truth comparison.
+ *
+ * See usage: ./CombinedImuFactorsExample --help
+ */
+
+#include
+
+// GTSAM related includes.
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+using namespace gtsam;
+using namespace std;
+
+using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
+using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
+using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
+
+namespace po = boost::program_options;
+
+po::variables_map parseOptions(int argc, char* argv[]) {
+ po::options_description desc;
+ desc.add_options()("help,h", "produce help message")(
+ "data_csv_path", po::value()->default_value("imuAndGPSdata.csv"),
+ "path to the CSV file with the IMU data")(
+ "output_filename",
+ po::value()->default_value("imuFactorExampleResults.csv"),
+ "path to the result file to use")("use_isam", po::bool_switch(),
+ "use ISAM as the optimizer");
+
+ po::variables_map vm;
+ po::store(po::parse_command_line(argc, argv, desc), vm);
+
+ if (vm.count("help")) {
+ cout << desc << "\n";
+ exit(1);
+ }
+
+ return vm;
+}
+
+Vector10 readInitialState(ifstream& file) {
+ string value;
+ // Format is (N,E,D,qX,qY,qZ,qW,velN,velE,velD)
+ Vector10 initial_state;
+ getline(file, value, ','); // i
+ for (int i = 0; i < 9; i++) {
+ getline(file, value, ',');
+ initial_state(i) = stof(value.c_str());
+ }
+ getline(file, value, '\n');
+ initial_state(9) = stof(value.c_str());
+
+ return initial_state;
+}
+
+boost::shared_ptr imuParams() {
+ // We use the sensor specs to build the noise model for the IMU factor.
+ double accel_noise_sigma = 0.0003924;
+ double gyro_noise_sigma = 0.000205689024915;
+ double accel_bias_rw_sigma = 0.004905;
+ double gyro_bias_rw_sigma = 0.000001454441043;
+ Matrix33 measured_acc_cov = I_3x3 * pow(accel_noise_sigma, 2);
+ Matrix33 measured_omega_cov = I_3x3 * pow(gyro_noise_sigma, 2);
+ Matrix33 integration_error_cov =
+ I_3x3 * 1e-8; // error committed in integrating position from velocities
+ Matrix33 bias_acc_cov = I_3x3 * pow(accel_bias_rw_sigma, 2);
+ Matrix33 bias_omega_cov = I_3x3 * pow(gyro_bias_rw_sigma, 2);
+ Matrix66 bias_acc_omega_int =
+ I_6x6 * 1e-5; // error in the bias used for preintegration
+
+ auto p = PreintegratedCombinedMeasurements::Params::MakeSharedD(0.0);
+ // PreintegrationBase params:
+ p->accelerometerCovariance =
+ measured_acc_cov; // acc white noise in continuous
+ p->integrationCovariance =
+ integration_error_cov; // integration uncertainty continuous
+ // should be using 2nd order integration
+ // PreintegratedRotation params:
+ p->gyroscopeCovariance =
+ measured_omega_cov; // gyro white noise in continuous
+ // PreintegrationCombinedMeasurements params:
+ p->biasAccCovariance = bias_acc_cov; // acc bias in continuous
+ p->biasOmegaCovariance = bias_omega_cov; // gyro bias in continuous
+ p->biasAccOmegaInt = bias_acc_omega_int;
+
+ return p;
+}
+
+int main(int argc, char* argv[]) {
+ string data_filename, output_filename;
+ po::variables_map var_map = parseOptions(argc, argv);
+
+ data_filename = findExampleDataFile(var_map["data_csv_path"].as());
+ output_filename = var_map["output_filename"].as();
+
+ // Set up output file for plotting errors
+ FILE* fp_out = fopen(output_filename.c_str(), "w+");
+ fprintf(fp_out,
+ "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m),gt_qx,"
+ "gt_qy,gt_qz,gt_qw\n");
+
+ // Begin parsing the CSV file. Input the first line for initialization.
+ // From there, we'll iterate through the file and we'll preintegrate the IMU
+ // or add in the GPS given the input.
+ ifstream file(data_filename.c_str());
+
+ Vector10 initial_state = readInitialState(file);
+ cout << "initial state:\n" << initial_state.transpose() << "\n\n";
+
+ // Assemble initial quaternion through GTSAM constructor
+ // ::Quaternion(w,x,y,z);
+ Rot3 prior_rotation = Rot3::Quaternion(initial_state(6), initial_state(3),
+ initial_state(4), initial_state(5));
+ Point3 prior_point(initial_state.head<3>());
+ Pose3 prior_pose(prior_rotation, prior_point);
+ Vector3 prior_velocity(initial_state.tail<3>());
+
+ imuBias::ConstantBias prior_imu_bias; // assume zero initial bias
+
+ int index = 0;
+
+ Values initial_values;
+
+ // insert pose at initialization
+ initial_values.insert(X(index), prior_pose);
+ initial_values.insert(V(index), prior_velocity);
+ initial_values.insert(B(index), prior_imu_bias);
+
+ // Assemble prior noise model and add it the graph.`
+ auto pose_noise_model = noiseModel::Diagonal::Sigmas(
+ (Vector(6) << 0.01, 0.01, 0.01, 0.5, 0.5, 0.5)
+ .finished()); // rad,rad,rad,m, m, m
+ auto velocity_noise_model = noiseModel::Isotropic::Sigma(3, 0.1); // m/s
+ auto bias_noise_model = noiseModel::Isotropic::Sigma(6, 1e-3);
+
+ // Add all prior factors (pose, velocity, bias) to the graph.
+ NonlinearFactorGraph graph;
+ graph.addPrior(X(index), prior_pose, pose_noise_model);
+ graph.addPrior(V(index), prior_velocity, velocity_noise_model);
+ graph.addPrior(B(index), prior_imu_bias,
+ bias_noise_model);
+
+ auto p = imuParams();
+
+ std::shared_ptr preintegrated =
+ std::make_shared(p, prior_imu_bias);
+
+ assert(preintegrated);
+
+ // Store previous state for imu integration and latest predicted outcome.
+ NavState prev_state(prior_pose, prior_velocity);
+ NavState prop_state = prev_state;
+ imuBias::ConstantBias prev_bias = prior_imu_bias;
+
+ // Keep track of total error over the entire run as simple performance metric.
+ double current_position_error = 0.0, current_orientation_error = 0.0;
+
+ double output_time = 0.0;
+ double dt = 0.005; // The real system has noise, but here, results are nearly
+ // exactly the same, so keeping this for simplicity.
+
+ // All priors have been set up, now iterate through the data file.
+ while (file.good()) {
+ // Parse out first value
+ string value;
+ getline(file, value, ',');
+ int type = stoi(value.c_str());
+
+ if (type == 0) { // IMU measurement
+ Vector6 imu;
+ for (int i = 0; i < 5; ++i) {
+ getline(file, value, ',');
+ imu(i) = stof(value.c_str());
+ }
+ getline(file, value, '\n');
+ imu(5) = stof(value.c_str());
+
+ // Adding the IMU preintegration.
+ preintegrated->integrateMeasurement(imu.head<3>(), imu.tail<3>(), dt);
+
+ } else if (type == 1) { // GPS measurement
+ Vector7 gps;
+ for (int i = 0; i < 6; ++i) {
+ getline(file, value, ',');
+ gps(i) = stof(value.c_str());
+ }
+ getline(file, value, '\n');
+ gps(6) = stof(value.c_str());
+
+ index++;
+
+ // Adding IMU factor and GPS factor and optimizing.
+ auto preint_imu_combined =
+ dynamic_cast(
+ *preintegrated);
+ CombinedImuFactor imu_factor(X(index - 1), V(index - 1), X(index),
+ V(index), B(index - 1), B(index),
+ preint_imu_combined);
+ graph.add(imu_factor);
+
+ auto correction_noise = noiseModel::Isotropic::Sigma(3, 1.0);
+ GPSFactor gps_factor(X(index),
+ Point3(gps(0), // N,
+ gps(1), // E,
+ gps(2)), // D,
+ correction_noise);
+ graph.add(gps_factor);
+
+ // Now optimize and compare results.
+ prop_state = preintegrated->predict(prev_state, prev_bias);
+ initial_values.insert(X(index), prop_state.pose());
+ initial_values.insert(V(index), prop_state.v());
+ initial_values.insert(B(index), prev_bias);
+
+ LevenbergMarquardtParams params;
+ params.setVerbosityLM("SUMMARY");
+ LevenbergMarquardtOptimizer optimizer(graph, initial_values, params);
+ Values result = optimizer.optimize();
+
+ // Overwrite the beginning of the preintegration for the next step.
+ prev_state =
+ NavState(result.at(X(index)), result.at(V(index)));
+ prev_bias = result.at(B(index));
+
+ // Reset the preintegration object.
+ preintegrated->resetIntegrationAndSetBias(prev_bias);
+
+ // Print out the position and orientation error for comparison.
+ Vector3 result_position = prev_state.pose().translation();
+ Vector3 position_error = result_position - gps.head<3>();
+ current_position_error = position_error.norm();
+
+ Quaternion result_quat = prev_state.pose().rotation().toQuaternion();
+ Quaternion gps_quat(gps(6), gps(3), gps(4), gps(5));
+ Quaternion quat_error = result_quat * gps_quat.inverse();
+ quat_error.normalize();
+ Vector3 euler_angle_error(quat_error.x() * 2, quat_error.y() * 2,
+ quat_error.z() * 2);
+ current_orientation_error = euler_angle_error.norm();
+
+ // display statistics
+ cout << "Position error:" << current_position_error << "\t "
+ << "Angular error:" << current_orientation_error << "\n"
+ << endl;
+
+ fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
+ output_time, result_position(0), result_position(1),
+ result_position(2), result_quat.x(), result_quat.y(),
+ result_quat.z(), result_quat.w(), gps(0), gps(1), gps(2),
+ gps_quat.x(), gps_quat.y(), gps_quat.z(), gps_quat.w());
+
+ output_time += 1.0;
+
+ } else {
+ cerr << "ERROR parsing file\n";
+ return 1;
+ }
+ }
+ fclose(fp_out);
+ cout << "Complete, results written to " << output_filename << "\n\n";
+
+ return 0;
+}
diff --git a/examples/Data/Klaus3.g2o b/examples/Data/Klaus3.g2o
new file mode 100644
index 000000000..83a6e6fd2
--- /dev/null
+++ b/examples/Data/Klaus3.g2o
@@ -0,0 +1,6 @@
+VERTEX_SE3:QUAT 0 -1.6618596980158338 -0.5736497760548741 -3.3319774096611026 -0.02676080288219576 -0.024497002638379624 -0.015064701622500615 0.9992281076190063
+VERTEX_SE3:QUAT 1 -1.431820463019384 -0.549139761976065 -3.160677992237872 -0.049543805396343954 -0.03232420352077356 -0.004386230477751116 0.998239108728862
+VERTEX_SE3:QUAT 2 -1.0394840214436651 -0.5268841046291037 -2.972143862665523 -0.07993768981394891 0.0825062894866454 -0.04088089479075661 0.9925378735259738
+EDGE_SE3:QUAT 0 1 0.23003923499644974 0.02451001407880915 0.17129941742323052 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
+EDGE_SE3:QUAT 0 2 0.6223756765721686 0.04676567142577037 0.35983354699557957 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
+EDGE_SE3:QUAT 1 2 0.3923364415757189 0.022255657346961222 0.18853412957234905 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
diff --git a/examples/Data/example_with_vertices.g2o b/examples/Data/example_with_vertices.g2o
new file mode 100644
index 000000000..ca7cd86df
--- /dev/null
+++ b/examples/Data/example_with_vertices.g2o
@@ -0,0 +1,16 @@
+VERTEX_SE3:QUAT 0 40 -1.15443e-13 10 0.557345 0.557345 -0.435162 -0.435162
+VERTEX_SE3:QUAT 1 28.2843 28.2843 10 0.301633 0.728207 -0.568567 -0.235508
+VERTEX_SE3:QUAT 2 -1.6986e-08 40 10 -3.89609e-10 0.788205 -0.615412 -2.07622e-10
+VERTEX_SE3:QUAT 3 -28.2843 28.2843 10 -0.301633 0.728207 -0.568567 0.235508
+VERTEX_SE3:QUAT 4 -40 -2.32554e-10 10 -0.557345 0.557345 -0.435162 0.435162
+VERTEX_SE3:QUAT 5 -28.2843 -28.2843 10 -0.728207 0.301633 -0.235508 0.568567
+VERTEX_SE3:QUAT 6 -2.53531e-09 -40 10 -0.788205 -1.25891e-11 -3.82742e-13 0.615412
+VERTEX_SE3:QUAT 7 28.2843 -28.2843 10 -0.728207 -0.301633 0.235508 0.568567
+VERTEX_TRACKXYZ 0 10 10 10
+VERTEX_TRACKXYZ 1 -10 10 10
+VERTEX_TRACKXYZ 2 -10 -10 10
+VERTEX_TRACKXYZ 3 10 -10 10
+VERTEX_TRACKXYZ 4 10 10 -10
+VERTEX_TRACKXYZ 5 -10 10 -10
+VERTEX_TRACKXYZ 6 -10 -10 -10
+VERTEX_TRACKXYZ 7 10 -10 -10
diff --git a/examples/Data/toyExample.g2o b/examples/Data/toyExample.g2o
new file mode 100755
index 000000000..5ff1ba74a
--- /dev/null
+++ b/examples/Data/toyExample.g2o
@@ -0,0 +1,11 @@
+VERTEX_SE3:QUAT 0 0 0 0 0 0 0 1
+VERTEX_SE3:QUAT 1 0 0 0 0 0 0 1
+VERTEX_SE3:QUAT 2 0 0 0 0.00499994 0.00499994 0.00499994 0.999963
+VERTEX_SE3:QUAT 3 0 0 0 -0.00499994 -0.00499994 -0.00499994 0.999963
+VERTEX_SE3:QUAT 4 0 0 0 0.00499994 0.00499994 0.00499994 0.999963
+EDGE_SE3:QUAT 1 2 1 2 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
+EDGE_SE3:QUAT 2 3 -3.26795e-07 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
+EDGE_SE3:QUAT 3 4 1 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
+EDGE_SE3:QUAT 3 1 6.9282e-07 2 0 0 0 1 1.73205e-07 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
+EDGE_SE3:QUAT 1 4 -1 1 0 0 0 -0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
+EDGE_SE3:QUAT 0 1 0 0 0 0 0 0 1 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
diff --git a/examples/DiscreteBayesNetExample.cpp b/examples/DiscreteBayesNetExample.cpp
new file mode 100644
index 000000000..5dca116c3
--- /dev/null
+++ b/examples/DiscreteBayesNetExample.cpp
@@ -0,0 +1,83 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file DiscreteBayesNetExample.cpp
+ * @brief Discrete Bayes Net example with famous Asia Bayes Network
+ * @author Frank Dellaert
+ * @date JULY 10, 2020
+ */
+
+#include
+#include
+#include
+
+#include
+
+using namespace std;
+using namespace gtsam;
+
+int main(int argc, char **argv) {
+ DiscreteBayesNet asia;
+ DiscreteKey Asia(0, 2), Smoking(4, 2), Tuberculosis(3, 2), LungCancer(6, 2),
+ Bronchitis(7, 2), Either(5, 2), XRay(2, 2), Dyspnea(1, 2);
+ asia.add(Asia % "99/1");
+ asia.add(Smoking % "50/50");
+
+ asia.add(Tuberculosis | Asia = "99/1 95/5");
+ asia.add(LungCancer | Smoking = "99/1 90/10");
+ asia.add(Bronchitis | Smoking = "70/30 40/60");
+
+ asia.add((Either | Tuberculosis, LungCancer) = "F T T T");
+
+ asia.add(XRay | Either = "95/5 2/98");
+ asia.add((Dyspnea | Either, Bronchitis) = "9/1 2/8 3/7 1/9");
+
+ // print
+ vector pretty = {"Asia", "Dyspnea", "XRay", "Tuberculosis",
+ "Smoking", "Either", "LungCancer", "Bronchitis"};
+ auto formatter = [pretty](Key key) { return pretty[key]; };
+ asia.print("Asia", formatter);
+
+ // Convert to factor graph
+ DiscreteFactorGraph fg(asia);
+
+ // Create solver and eliminate
+ Ordering ordering;
+ ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7);
+ DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
+
+ // solve
+ DiscreteFactor::sharedValues mpe = chordal->optimize();
+ GTSAM_PRINT(*mpe);
+
+ // We can also build a Bayes tree (directed junction tree).
+ // The elimination order above will do fine:
+ auto bayesTree = fg.eliminateMultifrontal(ordering);
+ bayesTree->print("bayesTree", formatter);
+
+ // add evidence, we were in Asia and we have dyspnea
+ fg.add(Asia, "0 1");
+ fg.add(Dyspnea, "0 1");
+
+ // solve again, now with evidence
+ DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
+ DiscreteFactor::sharedValues mpe2 = chordal2->optimize();
+ GTSAM_PRINT(*mpe2);
+
+ // We can also sample from it
+ cout << "\n10 samples:" << endl;
+ for (size_t i = 0; i < 10; i++) {
+ DiscreteFactor::sharedValues sample = chordal2->sample();
+ GTSAM_PRINT(*sample);
+ }
+ return 0;
+}
diff --git a/examples/DiscreteBayesNet_FG.cpp b/examples/DiscreteBayesNet_FG.cpp
index 6eb08c12e..121df4bef 100644
--- a/examples/DiscreteBayesNet_FG.cpp
+++ b/examples/DiscreteBayesNet_FG.cpp
@@ -15,105 +15,106 @@
* @author Abhijit
* @date Jun 4, 2012
*
- * We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, p529]
- * You may be familiar with other graphical model packages like BNT (available
- * at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this is used as an
- * example. The following demo is same as that in the above link, except that
- * everything is using GTSAM.
+ * We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009,
+ * p529] You may be familiar with other graphical model packages like BNT
+ * (available at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this
+ * is used as an example. The following demo is same as that in the above link,
+ * except that everything is using GTSAM.
*/
#include
-#include
+#include
+
#include
using namespace std;
using namespace gtsam;
int main(int argc, char **argv) {
+ // Define keys and a print function
+ Key C(1), S(2), R(3), W(4);
+ auto print = [=](DiscreteFactor::sharedValues values) {
+ cout << boolalpha << "Cloudy = " << static_cast((*values)[C])
+ << " Sprinkler = " << static_cast((*values)[S])
+ << " Rain = " << boolalpha << static_cast((*values)[R])
+ << " WetGrass = " << static_cast((*values)[W]) << endl;
+ };
// We assume binary state variables
// we have 0 == "False" and 1 == "True"
const size_t nrStates = 2;
// define variables
- DiscreteKey Cloudy(1, nrStates), Sprinkler(2, nrStates), Rain(3, nrStates),
- WetGrass(4, nrStates);
+ DiscreteKey Cloudy(C, nrStates), Sprinkler(S, nrStates), Rain(R, nrStates),
+ WetGrass(W, nrStates);
// create Factor Graph of the bayes net
DiscreteFactorGraph graph;
// add factors
- graph.add(Cloudy, "0.5 0.5"); //P(Cloudy)
- graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); //P(Sprinkler | Cloudy)
- graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); //P(Rain | Cloudy)
+ graph.add(Cloudy, "0.5 0.5"); // P(Cloudy)
+ graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); // P(Sprinkler | Cloudy)
+ graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); // P(Rain | Cloudy)
graph.add(Sprinkler & Rain & WetGrass,
- "1 0 0.1 0.9 0.1 0.9 0.001 0.99"); //P(WetGrass | Sprinkler, Rain)
+ "1 0 0.1 0.9 0.1 0.9 0.001 0.99"); // P(WetGrass | Sprinkler, Rain)
- // Alternatively we can also create a DiscreteBayesNet, add DiscreteConditional
- // factors and create a FactorGraph from it. (See testDiscreteBayesNet.cpp)
+ // Alternatively we can also create a DiscreteBayesNet, add
+ // DiscreteConditional factors and create a FactorGraph from it. (See
+ // testDiscreteBayesNet.cpp)
// Since this is a relatively small distribution, we can as well print
// the whole distribution..
cout << "Distribution of Example: " << endl;
cout << setw(11) << "Cloudy(C)" << setw(14) << "Sprinkler(S)" << setw(10)
- << "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)"
- << endl;
+ << "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)"
+ << endl;
for (size_t a = 0; a < nrStates; a++)
for (size_t m = 0; m < nrStates; m++)
for (size_t h = 0; h < nrStates; h++)
for (size_t c = 0; c < nrStates; c++) {
DiscreteFactor::Values values;
- values[Cloudy.first] = c;
- values[Sprinkler.first] = h;
- values[Rain.first] = m;
- values[WetGrass.first] = a;
+ values[C] = c;
+ values[S] = h;
+ values[R] = m;
+ values[W] = a;
double prodPot = graph(values);
- cout << boolalpha << setw(8) << (bool) c << setw(14)
- << (bool) h << setw(12) << (bool) m << setw(13)
- << (bool) a << setw(16) << prodPot << endl;
+ cout << setw(8) << static_cast(c) << setw(14)
+ << static_cast(h) << setw(12) << static_cast(m)
+ << setw(13) << static_cast(a) << setw(16) << prodPot
+ << endl;
}
-
// "Most Probable Explanation", i.e., configuration with largest value
- DiscreteSequentialSolver solver(graph);
- DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
- cout <<"\nMost Probable Explanation (MPE):" << endl;
- cout << boolalpha << "Cloudy = " << (bool)(*optimalDecoding)[Cloudy.first]
- << " Sprinkler = " << (bool)(*optimalDecoding)[Sprinkler.first]
- << " Rain = " << boolalpha << (bool)(*optimalDecoding)[Rain.first]
- << " WetGrass = " << (bool)(*optimalDecoding)[WetGrass.first]<< endl;
-
-
- // "Inference" We show an inference query like: probability that the Sprinkler was on;
- // given that the grass is wet i.e. P( S | W=1) =?
- cout << "\nInference Query: Probability of Sprinkler being on given Grass is Wet" << endl;
-
- // Method 1: we can compute the joint marginal P(S,W) and from that we can compute
- // P(S | W=1) = P(S,W=1)/P(W=1) We do this in following three steps..
-
- //Step1: Compute P(S,W)
- DiscreteFactorGraph jointFG;
- jointFG = *solver.jointFactorGraph(DiscreteKeys(Sprinkler & WetGrass).indices());
- DecisionTreeFactor probSW = jointFG.product();
-
- //Step2: Compute P(W)
- DiscreteFactor::shared_ptr probW = solver.marginalFactor(WetGrass.first);
-
- //Step3: Computer P(S | W=1) = P(S,W=1)/P(W=1)
- DiscreteFactor::Values values;
- values[WetGrass.first] = 1;
-
- //print P(S=0|W=1)
- values[Sprinkler.first] = 0;
- cout << "P(S=0|W=1) = " << probSW(values)/(*probW)(values) << endl;
-
- //print P(S=1|W=1)
- values[Sprinkler.first] = 1;
- cout << "P(S=1|W=1) = " << probSW(values)/(*probW)(values) << endl;
-
- // TODO: Method 2 : One way is to modify the factor graph to
- // incorporate the evidence node and compute the marginal
- // TODO: graph.addEvidence(Cloudy,0);
-
+ DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize();
+ cout << "\nMost Probable Explanation (MPE):" << endl;
+ print(mpe);
+
+ // "Inference" We show an inference query like: probability that the Sprinkler
+ // was on; given that the grass is wet i.e. P( S | C=0) = ?
+
+ // add evidence that it is not Cloudy
+ graph.add(Cloudy, "1 0");
+
+ // solve again, now with evidence
+ DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
+ DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize();
+
+ cout << "\nMPE given C=0:" << endl;
+ print(mpe_with_evidence);
+
+ // we can also calculate arbitrary marginals:
+ DiscreteMarginals marginals(graph);
+ cout << "\nP(S=1|C=0):" << marginals.marginalProbabilities(Sprinkler)[1]
+ << endl;
+ cout << "\nP(R=0|C=0):" << marginals.marginalProbabilities(Rain)[0] << endl;
+ cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1]
+ << endl;
+
+ // We can also sample from it
+ cout << "\n10 samples:" << endl;
+ for (size_t i = 0; i < 10; i++) {
+ DiscreteFactor::sharedValues sample = chordal->sample();
+ print(sample);
+ }
return 0;
}
diff --git a/examples/HMMExample.cpp b/examples/HMMExample.cpp
new file mode 100644
index 000000000..ee861e381
--- /dev/null
+++ b/examples/HMMExample.cpp
@@ -0,0 +1,94 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file DiscreteBayesNetExample.cpp
+ * @brief Hidden Markov Model example, discrete.
+ * @author Frank Dellaert
+ * @date July 12, 2020
+ */
+
+#include
+#include
+#include
+
+#include
+#include
+
+using namespace std;
+using namespace gtsam;
+
+int main(int argc, char **argv) {
+ const int nrNodes = 4;
+ const size_t nrStates = 3;
+
+ // Define variables as well as ordering
+ Ordering ordering;
+ vector keys;
+ for (int k = 0; k < nrNodes; k++) {
+ DiscreteKey key_i(k, nrStates);
+ keys.push_back(key_i);
+ ordering.emplace_back(k);
+ }
+
+ // Create HMM as a DiscreteBayesNet
+ DiscreteBayesNet hmm;
+
+ // Define backbone
+ const string transition = "8/1/1 1/8/1 1/1/8";
+ for (int k = 1; k < nrNodes; k++) {
+ hmm.add(keys[k] | keys[k - 1] = transition);
+ }
+
+ // Add some measurements, not needed for all time steps!
+ hmm.add(keys[0] % "7/2/1");
+ hmm.add(keys[1] % "1/9/0");
+ hmm.add(keys.back() % "5/4/1");
+
+ // print
+ hmm.print("HMM");
+
+ // Convert to factor graph
+ DiscreteFactorGraph factorGraph(hmm);
+
+ // Create solver and eliminate
+ // This will create a DAG ordered with arrow of time reversed
+ DiscreteBayesNet::shared_ptr chordal =
+ factorGraph.eliminateSequential(ordering);
+ chordal->print("Eliminated");
+
+ // solve
+ DiscreteFactor::sharedValues mpe = chordal->optimize();
+ GTSAM_PRINT(*mpe);
+
+ // We can also sample from it
+ cout << "\n10 samples:" << endl;
+ for (size_t k = 0; k < 10; k++) {
+ DiscreteFactor::sharedValues sample = chordal->sample();
+ GTSAM_PRINT(*sample);
+ }
+
+ // Or compute the marginals. This re-eliminates the FG into a Bayes tree
+ cout << "\nComputing Node Marginals .." << endl;
+ DiscreteMarginals marginals(factorGraph);
+ for (int k = 0; k < nrNodes; k++) {
+ Vector margProbs = marginals.marginalProbabilities(keys[k]);
+ stringstream ss;
+ ss << "marginal " << k;
+ print(margProbs, ss.str());
+ }
+
+ // TODO(frank): put in the glue to have DiscreteMarginals produce *arbitrary*
+ // joints efficiently, by the Bayes tree shortcut magic. All the code is there
+ // but it's not yet connected.
+
+ return 0;
+}
diff --git a/examples/IMUKittiExampleGPS.cpp b/examples/IMUKittiExampleGPS.cpp
new file mode 100644
index 000000000..e2ca49647
--- /dev/null
+++ b/examples/IMUKittiExampleGPS.cpp
@@ -0,0 +1,359 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file IMUKittiExampleGPS
+ * @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
+ * @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics
+ */
+
+// GTSAM related includes.
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+using namespace std;
+using namespace gtsam;
+
+using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
+using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
+using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
+
+struct KittiCalibration {
+ double body_ptx;
+ double body_pty;
+ double body_ptz;
+ double body_prx;
+ double body_pry;
+ double body_prz;
+ double accelerometer_sigma;
+ double gyroscope_sigma;
+ double integration_sigma;
+ double accelerometer_bias_sigma;
+ double gyroscope_bias_sigma;
+ double average_delta_t;
+};
+
+struct ImuMeasurement {
+ double time;
+ double dt;
+ Vector3 accelerometer;
+ Vector3 gyroscope; // omega
+};
+
+struct GpsMeasurement {
+ double time;
+ Vector3 position; // x,y,z
+};
+
+const string output_filename = "IMUKittiExampleGPSResults.csv";
+
+void loadKittiData(KittiCalibration& kitti_calibration,
+ vector& imu_measurements,
+ vector& gps_measurements) {
+ string line;
+
+ // Read IMU metadata and compute relative sensor pose transforms
+ // BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma
+ // AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT
+ string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
+ ifstream imu_metadata(imu_metadata_file.c_str());
+
+ printf("-- Reading sensor metadata\n");
+
+ getline(imu_metadata, line, '\n'); // ignore the first line
+
+ // Load Kitti calibration
+ getline(imu_metadata, line, '\n');
+ sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
+ &kitti_calibration.body_ptx,
+ &kitti_calibration.body_pty,
+ &kitti_calibration.body_ptz,
+ &kitti_calibration.body_prx,
+ &kitti_calibration.body_pry,
+ &kitti_calibration.body_prz,
+ &kitti_calibration.accelerometer_sigma,
+ &kitti_calibration.gyroscope_sigma,
+ &kitti_calibration.integration_sigma,
+ &kitti_calibration.accelerometer_bias_sigma,
+ &kitti_calibration.gyroscope_bias_sigma,
+ &kitti_calibration.average_delta_t);
+ printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
+ kitti_calibration.body_ptx,
+ kitti_calibration.body_pty,
+ kitti_calibration.body_ptz,
+ kitti_calibration.body_prx,
+ kitti_calibration.body_pry,
+ kitti_calibration.body_prz,
+ kitti_calibration.accelerometer_sigma,
+ kitti_calibration.gyroscope_sigma,
+ kitti_calibration.integration_sigma,
+ kitti_calibration.accelerometer_bias_sigma,
+ kitti_calibration.gyroscope_bias_sigma,
+ kitti_calibration.average_delta_t);
+
+ // Read IMU data
+ // Time dt accelX accelY accelZ omegaX omegaY omegaZ
+ string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
+ printf("-- Reading IMU measurements from file\n");
+ {
+ ifstream imu_data(imu_data_file.c_str());
+ getline(imu_data, line, '\n'); // ignore the first line
+
+ double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0;
+ while (!imu_data.eof()) {
+ getline(imu_data, line, '\n');
+ sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf",
+ &time, &dt,
+ &acc_x, &acc_y, &acc_z,
+ &gyro_x, &gyro_y, &gyro_z);
+
+ ImuMeasurement measurement;
+ measurement.time = time;
+ measurement.dt = dt;
+ measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
+ measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
+ imu_measurements.push_back(measurement);
+ }
+ }
+
+ // Read GPS data
+ // Time,X,Y,Z
+ string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
+ printf("-- Reading GPS measurements from file\n");
+ {
+ ifstream gps_data(gps_data_file.c_str());
+ getline(gps_data, line, '\n'); // ignore the first line
+
+ double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
+ while (!gps_data.eof()) {
+ getline(gps_data, line, '\n');
+ sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
+
+ GpsMeasurement measurement;
+ measurement.time = time;
+ measurement.position = Vector3(gps_x, gps_y, gps_z);
+ gps_measurements.push_back(measurement);
+ }
+ }
+}
+
+int main(int argc, char* argv[]) {
+ KittiCalibration kitti_calibration;
+ vector imu_measurements;
+ vector gps_measurements;
+ loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
+
+ Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
+ kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
+ .finished();
+ auto body_T_imu = Pose3::Expmap(BodyP);
+ if (!body_T_imu.equals(Pose3(), 1e-5)) {
+ printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same");
+ exit(-1);
+ }
+
+ // Configure different variables
+ // double t_offset = gps_measurements[0].time;
+ size_t first_gps_pose = 1;
+ size_t gps_skip = 10; // Skip this many GPS measurements each time
+ double g = 9.8;
+ auto w_coriolis = Vector3::Zero(); // zero vector
+
+ // Configure noise models
+ auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
+ Vector3::Constant(1.0/0.07))
+ .finished());
+
+ // Set initial conditions for the estimated trajectory
+ // initial pose is the reference frame (navigation frame)
+ auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
+ // the vehicle is stationary at the beginning at position 0,0,0
+ Vector3 current_velocity_global = Vector3::Zero();
+ auto current_bias = imuBias::ConstantBias(); // init with zero bias
+
+ auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
+ Vector3::Constant(1.0))
+ .finished());
+ auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
+ auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100),
+ Vector3::Constant(5.00e-05))
+ .finished());
+
+ // Set IMU preintegration parameters
+ Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
+ Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
+ // error committed in integrating position from velocities
+ Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2);
+
+ auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
+ imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
+ imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous
+ imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
+ imu_params->omegaCoriolis = w_coriolis;
+
+ std::shared_ptr current_summarized_measurement = nullptr;
+
+ // Set ISAM2 parameters and create ISAM2 solver object
+ ISAM2Params isam_params;
+ isam_params.factorization = ISAM2Params::CHOLESKY;
+ isam_params.relinearizeSkip = 10;
+
+ ISAM2 isam(isam_params);
+
+ // Create the factor graph and values object that will store new factors and values to add to the incremental graph
+ NonlinearFactorGraph new_factors;
+ Values new_values; // values storing the initial estimates of new nodes in the factor graph
+
+ /// Main loop:
+ /// (1) we read the measurements
+ /// (2) we create the corresponding factors in the graph
+ /// (3) we solve the graph to obtain and optimal estimate of robot trajectory
+ printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n");
+ size_t j = 0;
+ for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
+ // At each non=IMU measurement we initialize a new node in the graph
+ auto current_pose_key = X(i);
+ auto current_vel_key = V(i);
+ auto current_bias_key = B(i);
+ double t = gps_measurements[i].time;
+
+ if (i == first_gps_pose) {
+ // Create initial estimate and prior on initial pose, velocity, and biases
+ new_values.insert(current_pose_key, current_pose_global);
+ new_values.insert(current_vel_key, current_velocity_global);
+ new_values.insert(current_bias_key, current_bias);
+ new_factors.emplace_shared>(current_pose_key, current_pose_global, sigma_init_x);
+ new_factors.emplace_shared>(current_vel_key, current_velocity_global, sigma_init_v);
+ new_factors.emplace_shared>(current_bias_key, current_bias, sigma_init_b);
+ } else {
+ double t_previous = gps_measurements[i-1].time;
+
+ // Summarize IMU data between the previous GPS measurement and now
+ current_summarized_measurement = std::make_shared(imu_params, current_bias);
+ static size_t included_imu_measurement_count = 0;
+ while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
+ if (imu_measurements[j].time >= t_previous) {
+ current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer,
+ imu_measurements[j].gyroscope,
+ imu_measurements[j].dt);
+ included_imu_measurement_count++;
+ }
+ j++;
+ }
+
+ // Create IMU factor
+ auto previous_pose_key = X(i-1);
+ auto previous_vel_key = V(i-1);
+ auto previous_bias_key = B(i-1);
+
+ new_factors.emplace_shared(previous_pose_key, previous_vel_key,
+ current_pose_key, current_vel_key,
+ previous_bias_key, *current_summarized_measurement);
+
+ // Bias evolution as given in the IMU metadata
+ auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() <<
+ Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
+ Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
+ .finished());
+ new_factors.emplace_shared>(previous_bias_key,
+ current_bias_key,
+ imuBias::ConstantBias(),
+ sigma_between_b);
+
+ // Create GPS factor
+ auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position);
+ if ((i % gps_skip) == 0) {
+ new_factors.emplace_shared>(current_pose_key, gps_pose, noise_model_gps);
+ new_values.insert(current_pose_key, gps_pose);
+
+ printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
+ cout << gps_pose.translation();
+ printf("\n\n");
+ } else {
+ new_values.insert(current_pose_key, current_pose_global);
+ }
+
+ // Add initial values for velocity and bias based on the previous estimates
+ new_values.insert(current_vel_key, current_velocity_global);
+ new_values.insert(current_bias_key, current_bias);
+
+ // Update solver
+ // =======================================================================
+ // We accumulate 2*GPSskip GPS measurements before updating the solver at
+ // first so that the heading becomes observable.
+ if (i > (first_gps_pose + 2*gps_skip)) {
+ printf("################ NEW FACTORS AT TIME %lf ################\n", t);
+ new_factors.print();
+
+ isam.update(new_factors, new_values);
+
+ // Reset the newFactors and newValues list
+ new_factors.resize(0);
+ new_values.clear();
+
+ // Extract the result/current estimates
+ Values result = isam.calculateEstimate();
+
+ current_pose_global = result.at(current_pose_key);
+ current_velocity_global = result.at(current_vel_key);
+ current_bias = result.at(current_bias_key);
+
+ printf("\n################ POSE AT TIME %lf ################\n", t);
+ current_pose_global.print();
+ printf("\n\n");
+ }
+ }
+ }
+
+ // Save results to file
+ printf("\nWriting results to file...\n");
+ FILE* fp_out = fopen(output_filename.c_str(), "w+");
+ fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
+
+ Values result = isam.calculateEstimate();
+ for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
+ auto pose_key = X(i);
+ auto vel_key = V(i);
+ auto bias_key = B(i);
+
+ auto pose = result.at(pose_key);
+ auto velocity = result.at(vel_key);
+ auto bias = result.at(bias_key);
+
+ auto pose_quat = pose.rotation().toQuaternion();
+ auto gps = gps_measurements[i].position;
+
+ cout << "State at #" << i << endl;
+ cout << "Pose:" << endl << pose << endl;
+ cout << "Velocity:" << endl << velocity << endl;
+ cout << "Bias:" << endl << bias << endl;
+
+ fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
+ gps_measurements[i].time,
+ pose.x(), pose.y(), pose.z(),
+ pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
+ gps(0), gps(1), gps(2));
+ }
+
+ fclose(fp_out);
+}
diff --git a/examples/ImuFactorsExample.cpp b/examples/ImuFactorsExample.cpp
index a4707ea46..793927d7e 100644
--- a/examples/ImuFactorsExample.cpp
+++ b/examples/ImuFactorsExample.cpp
@@ -10,7 +10,7 @@
* -------------------------------------------------------------------------- */
/**
- * @file imuFactorsExample
+ * @file ImuFactorsExample
* @brief Test example for using GTSAM ImuFactor and ImuCombinedFactor
* navigation code.
* @author Garrett (ghemann@gmail.com), Luca Carlone
@@ -31,17 +31,17 @@
* Note that for GPS correction, we're only using the position not the
* rotation. The rotation is provided in the file for ground truth comparison.
*
- * Usage: ./ImuFactorsExample [data_csv_path] [-c]
- * optional arguments:
- * data_csv_path path to the CSV file with the IMU data.
- * -c use CombinedImuFactor
+ * See usage: ./ImuFactorsExample --help
*/
+#include
+
// GTSAM related includes.
#include
#include
#include
#include
+#include
#include
#include
#include
@@ -58,34 +58,87 @@ using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
-static const char output_filename[] = "imuFactorExampleResults.csv";
-static const char use_combined_imu_flag[3] = "-c";
+namespace po = boost::program_options;
+
+po::variables_map parseOptions(int argc, char* argv[]) {
+ po::options_description desc;
+ desc.add_options()("help,h", "produce help message")(
+ "data_csv_path", po::value()->default_value("imuAndGPSdata.csv"),
+ "path to the CSV file with the IMU data")(
+ "output_filename",
+ po::value()->default_value("imuFactorExampleResults.csv"),
+ "path to the result file to use")("use_isam", po::bool_switch(),
+ "use ISAM as the optimizer");
+
+ po::variables_map vm;
+ po::store(po::parse_command_line(argc, argv, desc), vm);
+
+ if (vm.count("help")) {
+ cout << desc << "\n";
+ exit(1);
+ }
+
+ return vm;
+}
+
+boost::shared_ptr