Skip to content

Commit

Permalink
update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
yetongatech committed Jan 15, 2025
1 parent fef44c3 commit 080ba08
Show file tree
Hide file tree
Showing 3 changed files with 264 additions and 156 deletions.
108 changes: 108 additions & 0 deletions gtsam/constrained/tests/constrainedExample.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,3 +71,111 @@ Double_ x1(x1_key), x2(x2_key);

} // namespace constrained_example

/* ************************************************************************* */
/**
* Constrained optimization example in L. Vandenberghe slides:
* https://www.seas.ucla.edu/~vandenbe/133B/lectures/nllseq.pdf
* f(x) = 0.5*||x1 + e^(-x2)||^2 + 0.5*||x1^2 + 2*x2 + 1||^2
* h(x) = x1 + x1^3 + x2 + x2^2 = 0
*/
namespace constrained_example1 {
using namespace constrained_example;
NonlinearFactorGraph Cost() {
NonlinearFactorGraph graph;
auto f1 = x1 + exp(-x2);
auto f2 = pow(x1, 2.0) + 2.0 * x2 + 1.0;
auto cost_noise = gtsam::noiseModel::Isotropic::Sigma(1, 1.0);
graph.add(ExpressionFactor<double>(cost_noise, 0., f1));
graph.add(ExpressionFactor<double>(cost_noise, 0., f2));
return graph;
}

NonlinearEqualityConstraints EqConstraints() {
NonlinearEqualityConstraints constraints;
Vector sigmas = Vector1(1.0);
auto h1 = x1 + pow(x1, 3) + x2 + pow(x2, 2);
constraints.push_back(NonlinearEqualityConstraint::shared_ptr(
new ExpressionEqualityConstraint<double>(h1, 0.0, sigmas)));
return constraints;
}

Values InitValues() {
Values values;
values.insert(x1_key, -0.2);
values.insert(x2_key, -0.2);
return values;
}

Values OptimalValues() {
Values values;
values.insert(x1_key, 0.0);
values.insert(x2_key, 0.0);
return values;
}

NonlinearFactorGraph costs = Cost();
NonlinearEqualityConstraints e_constraints = EqConstraints();
NonlinearInequalityConstraints i_constraints;
Values init_values = InitValues();
ConstrainedOptProblem::shared_ptr problem =
std::make_shared<ConstrainedOptProblem>(costs, e_constraints, i_constraints, init_values);
Values optimal_values = OptimalValues();
} // namespace constrained_example1

/* ************************************************************************* */
/**
* Constrained optimization example with inequality constraints
* Approach a point while staying on unit circle, and within an ellipse.
* f(x) = 0.5 * ||x1-1||^2 + 0.5 * ||x2-1||^2
* h(x) = x1^2 + x2^2 - 1 = 0
* g(x) = 4*x1^2 + 0.25*x2^2 - 1 <= 0
*/
namespace constrained_example2 {
using namespace constrained_example;
NonlinearFactorGraph Cost() {
NonlinearFactorGraph graph;
auto cost_noise = gtsam::noiseModel::Isotropic::Sigma(1, 1.0);
graph.addPrior(x1_key, 1.0, cost_noise);
graph.addPrior(x2_key, 1.0, cost_noise);
return graph;
}

NonlinearEqualityConstraints EqConstraints() {
NonlinearEqualityConstraints constraints;
Vector1 sigmas(1.0);
Double_ h1 = x1 * x1 + x2 * x2;
constraints.emplace_shared<ExpressionEqualityConstraint<double>>(h1, 1.0, sigmas);
return constraints;
}

NonlinearInequalityConstraints IneqConstraints() {
NonlinearInequalityConstraints constraints;
Double_ g1 = 4 * x1 * x1 + 0.25 * x2 * x2 - Double_(1.0);
double sigma = 1;
constraints.emplace_shared<ScalarExpressionInequalityConstraint>(g1, sigma);
return constraints;
}

Values InitValues() {
Values values;
values.insert(x1_key, -1.0);
values.insert(x2_key, 2.0);
return values;
}

Values OptimalValues() {
Values values;
values.insert(x1_key, 1.0 / sqrt(5));
values.insert(x2_key, 2.0 / sqrt(5));
return values;
}

NonlinearFactorGraph costs = Cost();
NonlinearEqualityConstraints e_constraints = EqConstraints();
NonlinearInequalityConstraints i_constraints = IneqConstraints();
Values init_values = InitValues();
ConstrainedOptProblem::shared_ptr problem =
std::make_shared<ConstrainedOptProblem>(costs, e_constraints, i_constraints, init_values);
Values optimal_values = OptimalValues();

} // namespace constrained_example2
262 changes: 131 additions & 131 deletions gtsam/constrained/tests/testAugmentedLagrangianOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,137 +20,137 @@

using namespace gtsam;

// /* ********************************************************************************************* */
// double EvaluateLagrangeTerm(const std::vector<Vector>& lambdas,
// const NonlinearEqualityConstraints& constraints,
// const Values& values) {
// double s = 0;
// for (size_t i = 0; i < constraints.size(); i++) {
// const auto& constraint = constraints.at(i);
// s += lambdas.at(i).dot(constraint->whitenedError(values));
// }
// return s;
// }

// /* ********************************************************************************************* */
// double EvaluateLagrangeTerm(const std::vector<double>& lambdas,
// const NonlinearInequalityConstraints& constraints,
// const Values& values) {
// double s = 0;
// for (size_t i = 0; i < constraints.size(); i++) {
// const auto& constraint = constraints.at(i);
// s += lambdas.at(i) * constraint->whitenedExpr(values)(0);
// }
// return s;
// }

// /* ********************************************************************************************* */
// double ComputeBias(const std::vector<Vector>& lambdas, double mu) {
// double norm_squared = 0;
// for (const auto& lambda : lambdas) {
// norm_squared += pow(lambda.norm(), 2);
// }
// return 0.5 / mu * norm_squared;
// }

// /* ********************************************************************************************* */
// double ComputeBias(const std::vector<double>& lambdas, double epsilon) {
// double norm_squared = 0;
// for (const auto& lambda : lambdas) {
// norm_squared += pow(lambda, 2);
// }
// return 0.5 / epsilon * norm_squared;
// }

// /* ********************************************************************************************* */
// TEST(LagrangeDualFunction, constrained_example1) {
// using namespace constrained_example1;

// // Construct optimizer
// auto params = std::make_shared<AugmentedLagrangianParams>();
// AugmentedLagrangianOptimizer optimizer(problem, params);

// AugmentedLagrangianState state;
// state.lambda_e.emplace_back(Vector1(0.3));
// state.mu_e = 0.2;
// NonlinearFactorGraph dual_graph = optimizer.LagrangeDualFunction(state);

// const Values& values = init_values;
// double expected_cost = costs.error(values);
// double expected_l2_penalty = e_constraints.penaltyGraph(state.mu_e).error(values);
// double expected_lagrange_term = EvaluateLagrangeTerm(state.lambda_e, e_constraints, values);
// double bias = ComputeBias(state.lambda_e, state.mu_e);
// double expected_error = expected_cost + expected_l2_penalty + expected_lagrange_term + bias;
// EXPECT_DOUBLES_EQUAL(expected_error, dual_graph.error(values), 1e-6);
// }

// /* ********************************************************************************************* */
// TEST(LagrangeDualFunction, constrained_example2) {
// using namespace constrained_example2;

// // Construct optimizer
// auto params = std::make_shared<AugmentedLagrangianParams>();
// AugmentedLagrangianOptimizer optimizer(problem, params);

// AugmentedLagrangianState state;
// state.lambda_e.emplace_back(Vector1(0.3));
// state.lambda_i.emplace_back(-2.0);
// state.mu_e = 0.2;
// state.mu_i = 0.3;
// double epsilon = 1.0;
// NonlinearFactorGraph dual_graph = optimizer.LagrangeDualFunction(state, epsilon);

// const Values& values = init_values;
// double expected_cost = costs.error(values);
// double expected_l2_penalty_e = e_constraints.penaltyGraph(state.mu_e).error(values);
// double expected_lagrange_term_e = EvaluateLagrangeTerm(state.lambda_e, e_constraints, values);
// double bias_e = ComputeBias(state.lambda_e, state.mu_e);
// double expected_penalty_i = i_constraints.penaltyGraph(state.mu_i).error(values);
// double expected_lagrange_term_i = EvaluateLagrangeTerm(state.lambda_i, i_constraints, values);
// double bias_i = ComputeBias(state.lambda_i, epsilon);
// // std::cout << expected_cost << "\n";
// // std::cout << expected_l2_penalty_e << "\n";
// // std::cout << expected_penalty_i << "\n";
// // std::cout << expected_lagrange_term_e << "\n";
// // std::cout << "lag_i:\t" << expected_lagrange_term_i << "\n";
// // std::cout << "bias_e:\t" << bias_e << "\n";
// // std::cout << "bias_i:\t" << bias_i << "\n";
// double expected_error = expected_cost + expected_l2_penalty_e + expected_penalty_i +
// expected_lagrange_term_e + expected_lagrange_term_i + bias_e + bias_i;

// // for (const auto& factor : dual_graph) {
// // factor->print();
// // std::cout << "error: " << factor->error(values) << "\n";
// // }

// EXPECT_DOUBLES_EQUAL(expected_error, dual_graph.error(values), 1e-6);
// }

// /* ********************************************************************************************* */
// TEST(AugmentedLagrangianOptimizer, constrained_example1) {
// using namespace constrained_example1;

// auto params = std::make_shared<AugmentedLagrangianParams>();
// params->verbose = true;
// AugmentedLagrangianOptimizer optimizer(problem, params);
// Values results = optimizer.optimize();

// /// Check the result is correct within tolerance.
// EXPECT(assert_equal(optimal_values, results, 1e-4));
// }

// /* ********************************************************************************************* */
// TEST(AugmentedLagrangianOptimizer, constrained_example2) {
// using namespace constrained_example2;

// auto params = std::make_shared<AugmentedLagrangianParams>();
// params->verbose = true;
// AugmentedLagrangianOptimizer optimizer(problem, params);
// Values results = optimizer.optimize();

// /// Check the result is correct within tolerance.
// EXPECT(assert_equal(optimal_values, results, 1e-4));
// }
/* ********************************************************************************************* */
double EvaluateLagrangeTerm(const std::vector<Vector>& lambdas,
const NonlinearEqualityConstraints& constraints,
const Values& values) {
double s = 0;
for (size_t i = 0; i < constraints.size(); i++) {
const auto& constraint = constraints.at(i);
s += lambdas.at(i).dot(constraint->whitenedError(values));
}
return s;
}

/* ********************************************************************************************* */
double EvaluateLagrangeTerm(const std::vector<double>& lambdas,
const NonlinearInequalityConstraints& constraints,
const Values& values) {
double s = 0;
for (size_t i = 0; i < constraints.size(); i++) {
const auto& constraint = constraints.at(i);
s += lambdas.at(i) * constraint->whitenedExpr(values)(0);
}
return s;
}

/* ********************************************************************************************* */
double ComputeBias(const std::vector<Vector>& lambdas, double mu) {
double norm_squared = 0;
for (const auto& lambda : lambdas) {
norm_squared += pow(lambda.norm(), 2);
}
return 0.5 / mu * norm_squared;
}

/* ********************************************************************************************* */
double ComputeBias(const std::vector<double>& lambdas, double epsilon) {
double norm_squared = 0;
for (const auto& lambda : lambdas) {
norm_squared += pow(lambda, 2);
}
return 0.5 / epsilon * norm_squared;
}

/* ********************************************************************************************* */
TEST(LagrangeDualFunction, constrained_example1) {
using namespace constrained_example1;

// Construct optimizer
auto params = std::make_shared<AugmentedLagrangianParams>();
AugmentedLagrangianOptimizer optimizer(problem, params);

AugmentedLagrangianState state;
state.lambda_e.emplace_back(Vector1(0.3));
state.mu_e = 0.2;
NonlinearFactorGraph dual_graph = optimizer.LagrangeDualFunction(state);

const Values& values = init_values;
double expected_cost = costs.error(values);
double expected_l2_penalty = e_constraints.penaltyGraph(state.mu_e).error(values);
double expected_lagrange_term = EvaluateLagrangeTerm(state.lambda_e, e_constraints, values);
double bias = ComputeBias(state.lambda_e, state.mu_e);
double expected_error = expected_cost + expected_l2_penalty + expected_lagrange_term + bias;
EXPECT_DOUBLES_EQUAL(expected_error, dual_graph.error(values), 1e-6);
}

/* ********************************************************************************************* */
TEST(LagrangeDualFunction, constrained_example2) {
using namespace constrained_example2;

// Construct optimizer
auto params = std::make_shared<AugmentedLagrangianParams>();
AugmentedLagrangianOptimizer optimizer(problem, params);

AugmentedLagrangianState state;
state.lambda_e.emplace_back(Vector1(0.3));
state.lambda_i.emplace_back(-2.0);
state.mu_e = 0.2;
state.mu_i = 0.3;
double epsilon = 1.0;
NonlinearFactorGraph dual_graph = optimizer.LagrangeDualFunction(state, epsilon);

const Values& values = init_values;
double expected_cost = costs.error(values);
double expected_l2_penalty_e = e_constraints.penaltyGraph(state.mu_e).error(values);
double expected_lagrange_term_e = EvaluateLagrangeTerm(state.lambda_e, e_constraints, values);
double bias_e = ComputeBias(state.lambda_e, state.mu_e);
double expected_penalty_i = i_constraints.penaltyGraph(state.mu_i).error(values);
double expected_lagrange_term_i = EvaluateLagrangeTerm(state.lambda_i, i_constraints, values);
double bias_i = ComputeBias(state.lambda_i, epsilon);
// std::cout << expected_cost << "\n";
// std::cout << expected_l2_penalty_e << "\n";
// std::cout << expected_penalty_i << "\n";
// std::cout << expected_lagrange_term_e << "\n";
// std::cout << "lag_i:\t" << expected_lagrange_term_i << "\n";
// std::cout << "bias_e:\t" << bias_e << "\n";
// std::cout << "bias_i:\t" << bias_i << "\n";
double expected_error = expected_cost + expected_l2_penalty_e + expected_penalty_i +
expected_lagrange_term_e + expected_lagrange_term_i + bias_e + bias_i;

// for (const auto& factor : dual_graph) {
// factor->print();
// std::cout << "error: " << factor->error(values) << "\n";
// }

EXPECT_DOUBLES_EQUAL(expected_error, dual_graph.error(values), 1e-6);
}

/* ********************************************************************************************* */
TEST(AugmentedLagrangianOptimizer, constrained_example1) {
using namespace constrained_example1;

auto params = std::make_shared<AugmentedLagrangianParams>();
params->verbose = true;
AugmentedLagrangianOptimizer optimizer(problem, params);
Values results = optimizer.optimize();

/// Check the result is correct within tolerance.
EXPECT(assert_equal(optimal_values, results, 1e-4));
}

/* ********************************************************************************************* */
TEST(AugmentedLagrangianOptimizer, constrained_example2) {
using namespace constrained_example2;

auto params = std::make_shared<AugmentedLagrangianParams>();
params->verbose = true;
AugmentedLagrangianOptimizer optimizer(problem, params);
Values results = optimizer.optimize();

/// Check the result is correct within tolerance.
EXPECT(assert_equal(optimal_values, results, 1e-4));
}

/* ********************************************************************************************* */
int main() {
Expand Down
Loading

0 comments on commit 080ba08

Please sign in to comment.