|
| 1 | +// Copyright 2025, https://github.com/PatWie/CppNumericalSolvers |
| 2 | + |
| 3 | +#include <iostream> |
| 4 | +#include <limits> |
| 5 | + |
| 6 | +#include "Eigen/Core" |
| 7 | +#include "cppoptlib/function.h" |
| 8 | +#include "cppoptlib/solver/augmented_lagrangian.h" |
| 9 | +#include "cppoptlib/solver/lbfgs.h" |
| 10 | +#include "cppoptlib/solver/lbfgsb.h" |
| 11 | + |
| 12 | +using namespace cppoptlib::function; |
| 13 | + |
| 14 | +class LinearRegression : public FunctionXf<LinearRegression> { |
| 15 | + public: |
| 16 | + EIGEN_MAKE_ALIGNED_OPERATOR_NEW |
| 17 | + |
| 18 | + ScalarType operator()(const VectorType &x, |
| 19 | + VectorType *gradient = nullptr) const { |
| 20 | + // Compute residuals for the two observations: |
| 21 | + // Observation 1: beta1 + 2*beta2 - 4 |
| 22 | + // Observation 2: 3*beta1 + beta2 - 5 |
| 23 | + ScalarType r1 = x[0] + 2 * x[1] - 4; |
| 24 | + ScalarType r2 = 3 * x[0] + x[1] - 5; |
| 25 | + |
| 26 | + // Compute the objective function: sum of squared errors |
| 27 | + ScalarType f = r1 * r1 + r2 * r2; |
| 28 | + |
| 29 | + // Compute the gradient if requested. |
| 30 | + // The gradient is: grad f = 2 * [ r1 + 3*r2, 2*r1 + r2 ] |
| 31 | + if (gradient) { |
| 32 | + gradient->resize(x.size()); |
| 33 | + (*gradient)[0] = 2 * (r1 + 3 * r2); |
| 34 | + (*gradient)[1] = 2 * (2 * r1 + r2); |
| 35 | + } |
| 36 | + |
| 37 | + return f; |
| 38 | + } |
| 39 | +}; |
| 40 | + |
| 41 | +class BoundConstraint : public FunctionXf<BoundConstraint> { |
| 42 | + public: |
| 43 | + int index; // 0 or 1 |
| 44 | + ScalarType lower_bound; |
| 45 | + |
| 46 | + BoundConstraint(int i, ScalarType bound) : index(i), lower_bound(bound) {} |
| 47 | + |
| 48 | + ScalarType operator()(const VectorType &x, VectorType *grad = nullptr) const { |
| 49 | + if (grad) { |
| 50 | + grad->setZero(); |
| 51 | + (*grad)[index] = 1.0; // ∇(x[i] - lower_bound) |
| 52 | + } |
| 53 | + return x[index] - lower_bound; |
| 54 | + } |
| 55 | +}; |
| 56 | + |
| 57 | +int main() { |
| 58 | + cppoptlib::solver::Lbfgsb<FunctionExprXf> solver; |
| 59 | + |
| 60 | + // optimal solution is suppose to be [1, 1.6] under the box constraints |
| 61 | + FunctionExpr f = LinearRegression(); |
| 62 | + |
| 63 | + // Either use L-BFG-B |
| 64 | + // --------------------------- |
| 65 | + Eigen::VectorXf x(2); |
| 66 | + x << -1, 2; |
| 67 | + Eigen::VectorXf lb(2); |
| 68 | + lb << 0, 1; |
| 69 | + Eigen::VectorXf ub(2); |
| 70 | + ub << 1, 2; |
| 71 | + solver.SetBounds(lb, ub); |
| 72 | + |
| 73 | + const auto initial_state = cppoptlib::function::FunctionState(x); |
| 74 | + auto [solution, solver_state] = solver.Minimize(f, initial_state); |
| 75 | + std::cout << "argmin " << solution.x.transpose() << std::endl; |
| 76 | + |
| 77 | + // Or model it as a augmented Lagrangian |
| 78 | + // ------------------------------------------ |
| 79 | + FunctionExpr lb0 = BoundConstraint(0, 0.0f); |
| 80 | + FunctionExpr lb1 = BoundConstraint(1, 1.0f); |
| 81 | + FunctionExpr ub0 = -1 * BoundConstraint(0, 1.0f); |
| 82 | + FunctionExpr ub1 = -1 * BoundConstraint(1, 2.0f); |
| 83 | + |
| 84 | + ConstrainedOptimizationProblem prob(f, |
| 85 | + /* equality constraints */ {}, |
| 86 | + /* inequality constraints */ |
| 87 | + { |
| 88 | + lb0, /* x[0] - 0 >= 0 */ |
| 89 | + lb1, /* x[1] - 1 >= 0 */ |
| 90 | + ub0, /* 1 - x[0] >= 0 */ |
| 91 | + ub1, /* 2 - x[1] >= 0 */ |
| 92 | + }); |
| 93 | + cppoptlib::solver::Lbfgs<FunctionExprXf> unconstrained_solver; |
| 94 | + cppoptlib::solver::AugmentedLagrangian aug_solver(prob, unconstrained_solver); |
| 95 | + cppoptlib::solver::AugmentedLagrangeState l_state(x, 0, 4, 1.0f); |
| 96 | + |
| 97 | + // Run the agumented solver. |
| 98 | + auto [aug_solution, aug_solver_state] = aug_solver.Minimize(prob, l_state); |
| 99 | + std::cout << "argmin " << aug_solution.x.transpose() << std::endl; |
| 100 | + |
| 101 | + return 0; |
| 102 | +} |
0 commit comments