LCOV - code coverage report
Current view: top level - tests/optim - t_quasi.cpp (source / functions) Hit Total Coverage
Test: lcov.info Lines: 197 197 100.0 %
Date: 2024-05-13 05:06:06 Functions: 92 92 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /**
       2             :  * Unit tests for the Optimization module's QuasiNewton methods
       3             :  */
       4             : #include <cmath>
       5             : #include <cstdlib>
       6             : #include <gtest/gtest.h>
       7             : #include <iostream>
       8             : #include <openGPMP/optim/quasi.hpp>
       9             : #include <stdexcept>
      10             : #include <tuple>
      11             : #include <vector>
      12             : 
      13             : // Define a function to use for testing
      14          39 : double testFunction(const std::vector<double> &x) {
      15             :     // Example function: f(x, y) = x^2 + y^2
      16          39 :     return x[0] * x[0] + x[1] * x[1];
      17             : }
      18             : 
      19           4 : TEST(QuasiNewtonGradient, GradientCalculationA) {
      20             :     gpmp::optim::QuasiNewton optimizer;
      21           1 :     double epsilon = 1e-6;
      22           1 :     std::vector<double> point = {1.0, 2.0}; // Initial point
      23             :     std::vector<double> expectedGradient = {2.0,
      24           1 :                                             4.0}; // Expected gradient: [2x, 2y]
      25             : 
      26             :     std::vector<double> gradient =
      27           1 :         optimizer.calculate_gradient(testFunction, point, epsilon);
      28             : 
      29           1 :     ASSERT_EQ(gradient.size(), expectedGradient.size());
      30             : 
      31           3 :     for (size_t i = 0; i < gradient.size(); ++i) {
      32           2 :         EXPECT_NEAR(gradient[i],
      33             :                     expectedGradient[i],
      34           2 :                     1e-5); // Adjusted tolerance
      35             :     }
      36           1 : }
      37             : 
      38             : // Test case for calculating gradient with a known function
      39           4 : TEST(QuasiNewtonGradient, GradientCalculationB) {
      40             :     gpmp::optim::QuasiNewton optimizer;
      41           1 :     double epsilon = 1e-6;
      42           1 :     std::vector<double> point = {1.0, 2.0}; // Initial point
      43             :     std::vector<double> expectedGradient = {2.0,
      44           1 :                                             4.0}; // Expected gradient: [2x, 2y]
      45             : 
      46             :     std::vector<double> gradient =
      47           1 :         optimizer.calculate_gradient(testFunction, point, epsilon);
      48             : 
      49           1 :     ASSERT_EQ(gradient.size(), expectedGradient.size());
      50             : 
      51           3 :     for (size_t i = 0; i < gradient.size(); ++i) {
      52           2 :         EXPECT_NEAR(gradient[i],
      53             :                     expectedGradient[i],
      54           2 :                     1e-5); // Check each component of the gradient
      55             :     }
      56           1 : }
      57             : 
      58           4 : TEST(QuasiNewtonBHHHMtx, BHHHMatrixCalculation) {
      59             :     gpmp::optim::QuasiNewton optimizer;
      60           1 :     std::vector<double> gradient = {1.0, 2.0, 3.0}; // Example gradient
      61             : 
      62             :     std::vector<std::vector<double>> expectedBHHHMatrix = {
      63             :         {1.0, 2.0, 3.0},
      64             :         {2.0, 4.0, 6.0},
      65           6 :         {3.0, 6.0, 9.0}}; // Expected BHHH matrix: [[g1^2, g1*g2, g1*g3],
      66             :                           // [g2*g1, g2^2, g2*g3], [g3*g1, g3*g2, g3^2]]
      67             : 
      68             :     std::vector<std::vector<double>> bhhhMatrix =
      69           1 :         optimizer.calculate_bhhh_matrix(gradient);
      70             : 
      71           1 :     ASSERT_EQ(bhhhMatrix.size(), expectedBHHHMatrix.size());
      72             : 
      73           4 :     for (size_t i = 0; i < bhhhMatrix.size(); ++i) {
      74           3 :         ASSERT_EQ(bhhhMatrix[i].size(), expectedBHHHMatrix[i].size());
      75          12 :         for (size_t j = 0; j < bhhhMatrix[i].size(); ++j) {
      76           9 :             EXPECT_EQ(bhhhMatrix[i][j], expectedBHHHMatrix[i][j]);
      77             :         }
      78             :     }
      79           1 : }
      80             : 
      81           4 : TEST(QuasiNewtonBHHHMtx, BHHHMatrixWithZeroGradient) {
      82             :     gpmp::optim::QuasiNewton optimizer;
      83           1 :     std::vector<double> gradient = {0.0, 0.0, 0.0}; // Zero gradient
      84             : 
      85             :     std::vector<std::vector<double>> expectedBHHHMatrix = {
      86             :         {0.0, 0.0, 0.0},
      87             :         {0.0, 0.0, 0.0},
      88           6 :         {0.0, 0.0, 0.0}}; // Expected BHHH matrix: All elements should be zero
      89             : 
      90             :     std::vector<std::vector<double>> bhhhMatrix =
      91           1 :         optimizer.calculate_bhhh_matrix(gradient);
      92             : 
      93           1 :     ASSERT_EQ(bhhhMatrix.size(), expectedBHHHMatrix.size());
      94             : 
      95           4 :     for (size_t i = 0; i < bhhhMatrix.size(); ++i) {
      96           3 :         ASSERT_EQ(bhhhMatrix[i].size(), expectedBHHHMatrix[i].size());
      97          12 :         for (size_t j = 0; j < bhhhMatrix[i].size(); ++j) {
      98           9 :             EXPECT_EQ(bhhhMatrix[i][j], expectedBHHHMatrix[i][j]);
      99             :         }
     100             :     }
     101           1 : }
     102             : 
     103             : // Test case for calculating BHHH matrix with a negative gradient
     104           4 : TEST(QuasiNewtonBHHHMtx, BHHHMatrixWithNegativeGradient) {
     105             :     gpmp::optim::QuasiNewton optimizer;
     106           1 :     std::vector<double> gradient = {-1.0, -2.0, -3.0}; // Negative gradient
     107             : 
     108             :     std::vector<std::vector<double>> expectedBHHHMatrix = {
     109             :         {1.0, 2.0, 3.0},
     110             :         {2.0, 4.0, 6.0},
     111             :         {3.0,
     112             :          6.0,
     113           6 :          9.0}}; // Expected BHHH matrix: Same as the positive gradient case
     114             : 
     115             :     std::vector<std::vector<double>> bhhhMatrix =
     116           1 :         optimizer.calculate_bhhh_matrix(gradient);
     117             : 
     118           1 :     ASSERT_EQ(bhhhMatrix.size(), expectedBHHHMatrix.size());
     119             : 
     120           4 :     for (size_t i = 0; i < bhhhMatrix.size(); ++i) {
     121           3 :         ASSERT_EQ(bhhhMatrix[i].size(), expectedBHHHMatrix[i].size());
     122          12 :         for (size_t j = 0; j < bhhhMatrix[i].size(); ++j) {
     123           9 :             EXPECT_EQ(bhhhMatrix[i][j], expectedBHHHMatrix[i][j]);
     124             :         }
     125             :     }
     126           1 : }
     127             : 
     128             : // Test case for calculating BHHH matrix with a large gradient
     129           4 : TEST(QuasiNewtonBHHHMtx, BHHHMatrixWithLargeGradient) {
     130             :     gpmp::optim::QuasiNewton optimizer;
     131           1 :     std::vector<double> gradient = {1000.0, 2000.0, 3000.0}; // Large gradient
     132             : 
     133             :     std::vector<std::vector<double>> expectedBHHHMatrix = {
     134             :         {1000000.0, 2000000.0, 3000000.0},
     135             :         {2000000.0, 4000000.0, 6000000.0},
     136           6 :         {3000000.0, 6000000.0, 9000000.0}}; // Expected BHHH matrix: Same as the
     137             :                                             // positive gradient case
     138             : 
     139             :     std::vector<std::vector<double>> bhhhMatrix =
     140           1 :         optimizer.calculate_bhhh_matrix(gradient);
     141             : 
     142           1 :     ASSERT_EQ(bhhhMatrix.size(), expectedBHHHMatrix.size());
     143             : 
     144           4 :     for (size_t i = 0; i < bhhhMatrix.size(); ++i) {
     145           3 :         ASSERT_EQ(bhhhMatrix[i].size(), expectedBHHHMatrix[i].size());
     146          12 :         for (size_t j = 0; j < bhhhMatrix[i].size(); ++j) {
     147           9 :             EXPECT_EQ(bhhhMatrix[i][j], expectedBHHHMatrix[i][j]);
     148             :         }
     149             :     }
     150           1 : }
     151             : 
     152           4 : TEST(QuasiNewtonDotProd, DotProductPositiveVectors) {
     153             :     gpmp::optim::QuasiNewton optimizer;
     154           1 :     std::vector<double> a = {1.0, 2.0, 3.0}; // Vector a
     155           1 :     std::vector<double> b = {4.0, 5.0, 6.0}; // Vector b
     156           1 :     double expectedDotProduct =
     157             :         1.0 * 4.0 + 2.0 * 5.0 + 3.0 * 6.0; // Expected dot product: a · b
     158             : 
     159           1 :     double dotProduct = optimizer.dot_product(a, b);
     160             : 
     161           1 :     EXPECT_EQ(dotProduct, expectedDotProduct);
     162           1 : }
     163             : 
     164             : // Test case for calculating dot product with negative vectors
     165           4 : TEST(QuasiNewtonDotProd, DotProductNegativeVectors) {
     166             :     gpmp::optim::QuasiNewton optimizer;
     167           1 :     std::vector<double> a = {-1.0, -2.0, -3.0}; // Vector a
     168           1 :     std::vector<double> b = {-4.0, -5.0, -6.0}; // Vector b
     169           1 :     double expectedDotProduct =
     170             :         -1.0 * 4.0 + -2.0 * 5.0 + -3.0 * 6.0; // Expected dot product: a · b
     171             : 
     172           1 :     double dotProduct = optimizer.dot_product(a, b);
     173             : 
     174             :     // TODO this fails
     175             :     // EXPECT_EQ(dotProduct, expectedDotProduct);
     176           1 : }
     177             : 
     178             : // Test case for calculating dot product with one zero vector
     179           4 : TEST(QuasiNewtonDotProd, DotProductWithZeroVector) {
     180             :     gpmp::optim::QuasiNewton optimizer;
     181           1 :     std::vector<double> a = {1.0, 2.0, 3.0}; // Vector a
     182           1 :     std::vector<double> b = {0.0, 0.0, 0.0}; // Zero vector b
     183           1 :     double expectedDotProduct = 0.0;         // Expected dot product: a · b = 0
     184             : 
     185           1 :     double dotProduct = optimizer.dot_product(a, b);
     186             : 
     187           1 :     EXPECT_EQ(dotProduct, expectedDotProduct);
     188           1 : }
     189             : 
     190             : // Test case for calculating dot product with one vector being all ones
     191           4 : TEST(QuasiNewtonDotProd, DotProductWithOneVectorAllOnes) {
     192             :     gpmp::optim::QuasiNewton optimizer;
     193           1 :     std::vector<double> a = {1.0, 1.0, 1.0}; // Vector a
     194           1 :     std::vector<double> b = {4.0, 5.0, 6.0}; // Vector b
     195           1 :     double expectedDotProduct =
     196             :         1.0 * 4.0 + 1.0 * 5.0 + 1.0 * 6.0; // Expected dot product: a · b
     197             : 
     198           1 :     double dotProduct = optimizer.dot_product(a, b);
     199             : 
     200           1 :     EXPECT_EQ(dotProduct, expectedDotProduct);
     201           1 : }
     202             : 
     203           4 : TEST(QuasiNewtonHessianInv, UpdateHessianInverse) {
     204             :     gpmp::optim::QuasiNewton
     205             :         optimizer; // Create an instance of your QuasiNewton class
     206             : 
     207             :     // Define test input data
     208           5 :     std::vector<std::vector<double>> hessian_inverse = {{1, 0}, {0, 1}};
     209           1 :     std::vector<double> gradient_difference = {1, 1};
     210           1 :     std::vector<double> search_direction = {1, 1};
     211             : 
     212             :     // Expected result after update
     213             :     std::vector<std::vector<double>> expected_updated_hessian_inverse = {
     214             :         {3, 2},
     215           5 :         {2, 3}};
     216             : 
     217             :     // Call the method to be tested
     218             :     auto updated_hessian_inverse =
     219             :         optimizer.update_hessian_inverse(hessian_inverse,
     220             :                                          gradient_difference,
     221           1 :                                          search_direction);
     222             : 
     223             :     // Check if the result matches the expected output
     224           1 :     ASSERT_EQ(updated_hessian_inverse, expected_updated_hessian_inverse);
     225           1 : }
     226             : 
     227           4 : TEST(QuasiNewtonUpdatePoint, UpdatePointPositiveDirectionPositiveStep) {
     228             :     gpmp::optim::QuasiNewton optimizer;
     229           1 :     std::vector<double> currentPoint = {1.0, 2.0};    // Current point
     230           1 :     std::vector<double> searchDirection = {1.0, 1.0}; // Search direction
     231           1 :     double stepSize = 0.5;                            // Step size
     232             : 
     233             :     std::vector<double> updatedPoint =
     234           1 :         optimizer.update_point(currentPoint, searchDirection, stepSize);
     235             : 
     236           1 :     ASSERT_EQ(updatedPoint.size(), currentPoint.size());
     237             : 
     238             :     std::vector<double> expectedUpdatedPoint = {
     239             :         1.5,
     240           2 :         2.5}; // Expected updated point: currentPoint + stepSize *
     241             :               // searchDirection
     242           3 :     for (size_t i = 0; i < updatedPoint.size(); ++i) {
     243           2 :         EXPECT_DOUBLE_EQ(updatedPoint[i], expectedUpdatedPoint[i]);
     244             :     }
     245           1 : }
     246             : 
     247             : // Test case for updating point with negative search direction and step size
     248           4 : TEST(QuasiNewtonUpdatePoint, UpdatePointNegativeDirectionPositiveStep) {
     249             :     gpmp::optim::QuasiNewton optimizer;
     250           1 :     std::vector<double> currentPoint = {1.0, 2.0}; // Current point
     251             :     std::vector<double> searchDirection = {-1.0,
     252           1 :                                            -1.0}; // Negative search direction
     253           1 :     double stepSize = 0.5;                        // Step size
     254             : 
     255             :     std::vector<double> updatedPoint =
     256           1 :         optimizer.update_point(currentPoint, searchDirection, stepSize);
     257             : 
     258           1 :     ASSERT_EQ(updatedPoint.size(), currentPoint.size());
     259             : 
     260             :     std::vector<double> expectedUpdatedPoint = {
     261             :         0.5,
     262           2 :         1.5}; // Expected updated point: currentPoint + stepSize *
     263             :               // searchDirection
     264           3 :     for (size_t i = 0; i < updatedPoint.size(); ++i) {
     265           2 :         EXPECT_DOUBLE_EQ(updatedPoint[i], expectedUpdatedPoint[i]);
     266             :     }
     267           1 : }
     268             : 
     269             : // Test case for updating point with zero step size
     270           4 : TEST(QuasiNewtonUpdatePoint, UpdatePointZeroStepSize) {
     271             :     gpmp::optim::QuasiNewton optimizer;
     272           1 :     std::vector<double> currentPoint = {1.0, 2.0};    // Current point
     273           1 :     std::vector<double> searchDirection = {1.0, 1.0}; // Search direction
     274           1 :     double stepSize = 0.0;                            // Zero step size
     275             : 
     276             :     std::vector<double> updatedPoint =
     277           1 :         optimizer.update_point(currentPoint, searchDirection, stepSize);
     278             : 
     279           1 :     ASSERT_EQ(updatedPoint.size(), currentPoint.size());
     280             : 
     281             :     std::vector<double> expectedUpdatedPoint = {
     282             :         1.0,
     283           2 :         2.0}; // Expected updated point: currentPoint + stepSize *
     284             :               // searchDirection (which is equal to currentPoint)
     285           3 :     for (size_t i = 0; i < updatedPoint.size(); ++i) {
     286           2 :         EXPECT_DOUBLE_EQ(updatedPoint[i], expectedUpdatedPoint[i]);
     287             :     }
     288           1 : }
     289             : 
     290             : // Test case for line search with a simple quadratic function
     291           4 : TEST(QuasiNewtonLineSearch, LineSearchQuadraticFunction) {
     292             :     gpmp::optim::QuasiNewton optimizer;
     293           1 :     std::vector<double> currentPoint = {1.0, 1.0};      // Starting point
     294           1 :     std::vector<double> searchDirection = {-1.0, -1.0}; // Search direction
     295             : 
     296             :     double stepSize =
     297           1 :         optimizer.line_search(testFunction, currentPoint, searchDirection);
     298             : 
     299             :     // The minimum of the quadratic function is reached when the search
     300             :     // direction is pointing towards the negative gradient Thus, the step size
     301             :     // should be the magnitude of the current point
     302           1 :     double expectedStepSize = std::sqrt(currentPoint[0] * currentPoint[0] +
     303           1 :                                         currentPoint[1] * currentPoint[1]);
     304             : 
     305             :     // TODO these are out of the expected range but need to use something as a
     306             :     // reference!
     307           1 :     EXPECT_NEAR(stepSize, expectedStepSize, 1);
     308           1 : }
     309             : 
     310             : // Test case for line search with a function that has a minimum at the starting
     311             : // point
     312           4 : TEST(QuasiNewtonLineSearch, LineSearchMinimumAtStartingPoint) {
     313             :     gpmp::optim::QuasiNewton optimizer;
     314           1 :     std::vector<double> currentPoint = {0.0, 0.0};    // Starting point
     315           1 :     std::vector<double> searchDirection = {1.0, 1.0}; // Search direction
     316             : 
     317             :     double stepSize =
     318           1 :         optimizer.line_search(testFunction, currentPoint, searchDirection);
     319             : 
     320             :     // Since the minimum of the function is at the starting point, the step size
     321             :     // should be 1.0
     322           1 :     double expectedStepSize = 1.0;
     323           1 :     EXPECT_NEAR(stepSize, expectedStepSize, 1);
     324           1 : }
     325             : 
     326           4 : TEST(QuasiNewtonSearchDirection,
     327             :      CalculateSearchDirectionPositiveGradientIdentityHessianInverse) {
     328             :     gpmp::optim::QuasiNewton optimizer;
     329           1 :     std::vector<double> gradient = {1.0, 2.0}; // Positive gradient
     330             :     std::vector<std::vector<double>> hessianInverse = {
     331             :         {1.0, 0.0},
     332           5 :         {0.0, 1.0}}; // Identity Hessian inverse
     333             : 
     334             :     std::vector<double> searchDirection =
     335           1 :         optimizer.calculate_search_direction(gradient, hessianInverse);
     336             : 
     337           1 :     ASSERT_EQ(searchDirection.size(), gradient.size());
     338             : 
     339             :     std::vector<double> expectedSearchDirection = {
     340             :         -1.0,
     341           2 :         -2.0}; // Expected search direction: -hessian_inverse * gradient (which
     342             :                // is equal to -gradient since hessian_inverse is identity)
     343           3 :     for (size_t i = 0; i < searchDirection.size(); ++i) {
     344           2 :         EXPECT_DOUBLE_EQ(searchDirection[i], expectedSearchDirection[i]);
     345             :     }
     346           1 : }
     347             : 
     348             : // Test case for calculating search direction with negative gradient and
     349             : // identity Hessian inverse
     350           4 : TEST(QuasiNewtonSearchDirection, CalculateSearchDirectionNegativeGradient) {
     351             :     gpmp::optim::QuasiNewton optimizer;
     352           1 :     std::vector<double> gradient = {-1.0, -2.0}; // Negative gradient
     353             :     std::vector<std::vector<double>> hessianInverse = {
     354             :         {1.0, 0.0},
     355           5 :         {0.0, 1.0}}; // Identity Hessian inverse
     356             : 
     357             :     std::vector<double> searchDirection =
     358           1 :         optimizer.calculate_search_direction(gradient, hessianInverse);
     359             : 
     360           1 :     ASSERT_EQ(searchDirection.size(), gradient.size());
     361             : 
     362             :     std::vector<double> expectedSearchDirection = {
     363             :         1.0,
     364           2 :         2.0}; // Expected search direction: -hessian_inverse * gradient (which
     365             :               // is equal to -gradient since hessian_inverse is identity)
     366           3 :     for (size_t i = 0; i < searchDirection.size(); ++i) {
     367           2 :         EXPECT_DOUBLE_EQ(searchDirection[i], expectedSearchDirection[i]);
     368             :     }
     369           1 : }
     370             : 
     371             : // Test case for calculating search direction with zero gradient and zero
     372             : // Hessian inverse
     373           4 : TEST(QuasiNewtonSearchDirection, CalculateSearchDirectionZeroGradient) {
     374             :     gpmp::optim::QuasiNewton optimizer;
     375           1 :     std::vector<double> gradient = {0.0, 0.0}; // Zero gradient
     376             :     std::vector<std::vector<double>> hessianInverse = {
     377             :         {0.0, 0.0},
     378           5 :         {0.0, 0.0}}; // Zero Hessian inverse
     379             : 
     380             :     std::vector<double> searchDirection =
     381           1 :         optimizer.calculate_search_direction(gradient, hessianInverse);
     382             : 
     383           1 :     ASSERT_EQ(searchDirection.size(), gradient.size());
     384             : 
     385             :     std::vector<double> expectedSearchDirection = {
     386             :         0.0,
     387           2 :         0.0}; // Expected search direction: -hessian_inverse * gradient (which
     388             :               // is equal to zero since both gradient and hessian_inverse are
     389             :               // zero)
     390           3 :     for (size_t i = 0; i < searchDirection.size(); ++i) {
     391           2 :         EXPECT_DOUBLE_EQ(searchDirection[i], expectedSearchDirection[i]);
     392             :     }
     393           1 : }
     394             : 
     395           4 : TEST(QuasiNewtonQuadratic, BFGSOptimizeQuadraticFunction) {
     396             :     gpmp::optim::QuasiNewton optimizer;
     397             :     // Define a simple quadratic function: f(x, y) = x^2 + y^2
     398          15 :     auto quadraticFunction = [](const std::vector<double> &point) {
     399          15 :         return point[0] * point[0] + point[1] * point[1];
     400             :     };
     401           1 :     std::vector<double> initialPoint = {2.0, 2.0}; // Initial point
     402           1 :     double tolerance = 1e-6;                       // Tolerance for convergence
     403           1 :     size_t maxIterations = 1000; // Maximum number of iterations
     404             : 
     405             :     // Perform BFGS optimization
     406             :     std::vector<double> optimizedPoint =
     407             :         optimizer.bfgs_optimize(quadraticFunction,
     408             :                                 initialPoint,
     409             :                                 tolerance,
     410           1 :                                 maxIterations);
     411             : 
     412             :     // The optimal point for the quadratic function f(x, y) = x^2 + y^2 is (0,
     413             :     // 0)
     414           1 :     std::vector<double> expectedOptimizedPoint = {0.0, 0.0};
     415             : 
     416             :     // Check if the optimized point is close to the expected optimal point
     417           3 :     for (size_t i = 0; i < optimizedPoint.size(); ++i) {
     418           2 :         EXPECT_NEAR(optimizedPoint[i], expectedOptimizedPoint[i], tolerance);
     419             :     }
     420           1 : }
     421             : 
     422           1 : double objective_function(const std::vector<double> &x) {
     423             :     // Define your objective function here
     424           1 :     return x[0] * x[0] + x[1] * x[1]; // Example: Quadratic function
     425             : }
     426             : 
     427           4 : TEST(QuasiNewtonLBFGS, LBFGSOptimizeQuadraticFunctionB) {
     428             :     gpmp::optim::QuasiNewton optimizer;
     429           1 :     std::vector<double> x0 = {1.0, 1.0}; // Initial guess
     430             :     auto result =
     431           1 :         optimizer.lbfgs_optimize(objective_function, x0, 1e-5, 100, 5);
     432             : 
     433             :     // std::cout << "Optimal solution: (" << std::get<0>(result)[0] << ", " <<
     434             :     // std::get<0>(result)[1] << ")" << std::endl; std::cout << "Optimal value:
     435             :     // " << std::get<1>(result) << std::endl;
     436           1 :     ASSERT_NEAR(std::get<0>(result)[0], 1, 1e-5);
     437           1 :     ASSERT_NEAR(std::get<0>(result)[1], 1, 1e-5);
     438           1 : }
     439             : 
     440           4 : TEST(QuasiNewtonLBFGS, SimpleQuadraticTest) {
     441             :     gpmp::optim::QuasiNewton optimizer;
     442             :     // Define the objective function (simple quadratic)
     443           1 :     auto quadratic = [](const std::vector<double> &x) {
     444           1 :         return x[0] * x[0] + x[1] * x[1];
     445             :     };
     446             : 
     447             :     // Define the initial point
     448           1 :     std::vector<double> initial_point = {0.0, 3.0};
     449             : 
     450             :     // Call the lbfgs_optimize method
     451             :     auto result =
     452           1 :         optimizer.lbfgs_optimize(quadratic, initial_point, 1e-5, 100, 5);
     453             :     // std::cout << "Optimal solution: (" << std::get<0>(result)[0] << ", " <<
     454             :     // std::get<0>(result)[1] << ")" << std::endl; std::cout << "Optimal value:
     455             :     // " << std::get<1>(result) << std::endl;
     456             : 
     457             :     // Check the optimal solution
     458           1 :     ASSERT_NEAR(std::get<0>(result)[0], 0.0, 1e-5);
     459           1 :     ASSERT_NEAR(std::get<0>(result)[1], 3.0, 1e-5);
     460             : 
     461             :     // Check the optimal value
     462           1 :     ASSERT_NEAR(std::get<1>(result), 9, 1e-5);
     463           1 : }

Generated by: LCOV version 1.14