Skip to content

Commit

Permalink
Grid accepts optimization lattice as Eigen::Matrix (previous was std:…
Browse files Browse the repository at this point in the history
…:vector<DVector<double>>, other small adjustments
  • Loading branch information
AlePalu committed Mar 10, 2024
1 parent f8132c8 commit e1058c0
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 40 deletions.
14 changes: 4 additions & 10 deletions fdaPDE/optimization/bfgs.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ namespace core {
// implementation of the Broyden–Fletcher–Goldfarb–Shanno algorithm for unconstrained nonlinear optimization
template <int N, typename... Args> class BFGS {
private:
typedef typename std::conditional<N == Dynamic, DVector<double>, SVector<N>>::type VectorType;
typedef typename std::conditional<N == Dynamic, DMatrix<double>, SMatrix<N>>::type MatrixType;
using VectorType = typename std::conditional<N == Dynamic, DVector<double>, SVector<N>>::type;
using MatrixType = typename std::conditional<N == Dynamic, DMatrix<double>, SMatrix<N>>::type;
std::size_t max_iter_; // maximum number of iterations before forced stop
double tol_; // tolerance on error before forced stop
double step_; // update step
Expand Down Expand Up @@ -62,13 +62,11 @@ template <int N, typename... Args> class BFGS {
static_assert(
std::is_same<decltype(std::declval<F>().operator()(VectorType())), double>::value,
"F_IS_NOT_A_FUNCTOR_ACCEPTING_A_VECTORTYPE");

bool stop = false; // asserted true in case of forced stop
VectorType zero; // either statically or dynamically allocated depending on N
std::size_t n_iter = 0;
double error = 0;
h = step_; // restore optimizer step

h = step_;
x_old = x0;
x_new = x0;
if constexpr (N == Dynamic) { // inv_hessian approximated with identity matrix
Expand All @@ -78,16 +76,14 @@ template <int N, typename... Args> class BFGS {
inv_hessian = MatrixType::Identity();
zero = VectorType::Zero();
}

grad_old = objective.derive()(x_old);
if (grad_old.isApprox(zero)) stop = true; // already at stationary point
error = grad_old.norm();

while (n_iter < max_iter_ && error > tol_ && !stop) {
// compute update direction
update = -inv_hessian * grad_old;
stop |= execute_pre_update_step(*this, objective, callbacks_);

// update along descent direction
x_new = x_old + h * update;
grad_new = objective.derive()(x_new);
Expand All @@ -96,7 +92,6 @@ template <int N, typename... Args> class BFGS {
value_ = objective(optimum_);
return optimum_;
}

// update inverse hessian approximation
VectorType delta_x = x_new - x_old;
VectorType delta_grad = grad_new - grad_old;
Expand All @@ -106,7 +101,6 @@ template <int N, typename... Args> class BFGS {
MatrixType U = (1 + (delta_grad.dot(hx)) / xg) * ((delta_x * delta_x.transpose()) / xg);
MatrixType V = ((hx * delta_x.transpose() + delta_x * hx.transpose())) / xg;
inv_hessian += (U - V);

// prepare next iteration
error = grad_new.norm();
stop |= execute_post_update_step(*this, objective, callbacks_);
Expand Down
10 changes: 3 additions & 7 deletions fdaPDE/optimization/gradient_descent.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ namespace core {
// implementation of the gradient descent method for unconstrained nonlinear optimization
template <int N, typename... Args> class GradientDescent {
private:
typedef typename std::conditional<N == Dynamic, DVector<double>, SVector<N>>::type VectorType;
typedef typename std::conditional<N == Dynamic, DMatrix<double>, SMatrix<N>>::type MatrixType;
using VectorType = typename std::conditional<N == Dynamic, DVector<double>, SVector<N>>::type;
using MatrixType = typename std::conditional<N == Dynamic, DMatrix<double>, SMatrix<N>>::type;
std::size_t max_iter_; // maximum number of iterations before forced stop
double tol_; // tolerance on error before forced stop
double step_; // update step
Expand Down Expand Up @@ -62,24 +62,20 @@ template <int N, typename... Args> class GradientDescent {
static_assert(
std::is_same<decltype(std::declval<F>().operator()(VectorType())), double>::value,
"F_IS_NOT_A_FUNCTOR_ACCEPTING_A_VECTORTYPE");

bool stop = false; // asserted true in case of forced stop
std::size_t n_iter = 0;
double error = std::numeric_limits<double>::max();
h = step_; // restore optimizer step

h = step_;
x_old = x0;
x_new = x0;
grad_old = objective.derive()(x_old);

while (n_iter < max_iter_ && error > tol_ && !stop) {
update = -grad_old;
stop |= execute_pre_update_step(*this, objective, callbacks_);

// update along descent direction
x_new = x_old + h * update;
grad_new = objective.derive()(x_new);

// prepare next iteration
error = grad_new.norm();
stop |= execute_post_update_step(*this, objective, callbacks_);
Expand Down
19 changes: 7 additions & 12 deletions fdaPDE/optimization/grid.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,40 +27,36 @@ namespace core {
// searches for the point in a given grid minimizing a given nonlinear objective
template <int N, typename... Args> class Grid {
private:
typedef typename std::conditional<N == Dynamic, DVector<double>, SVector<N>>::type VectorType;
using VectorType = typename std::conditional_t<N == Dynamic, DVector<double>, SVector<N>>;
using GridType = Eigen::Matrix<double, Eigen::Dynamic, N>; // equivalent to DMatrix<double> for N == Dynamic
std::tuple<Args...> callbacks_ {};
VectorType optimum_;
double value_; // objective value at optimum
public:
VectorType x_current;

// constructor
template <int N_ = sizeof...(Args), typename std::enable_if<N_ != 0, int>::type = 0> Grid() { }
Grid() requires(sizeof...(Args) != 0) { }
Grid(Args&&... callbacks) : callbacks_(std::make_tuple(std::forward<Args>(callbacks)...)) { }
// copy semantic
Grid(const Grid& other) : callbacks_(other.callbacks_) { }
Grid& operator=(const Grid& other) {
callbacks_ = other.callbacks_;
return *this;
}

template <typename F>
VectorType optimize(F& objective, const std::vector<VectorType>& grid) {
template <typename F> VectorType optimize(F& objective, const GridType& grid) {
static_assert(
std::is_same<decltype(std::declval<F>().operator()(VectorType())), double>::value,
"F_IS_NOT_A_FUNCTOR_ACCEPTING_A_VECTORTYPE");

bool stop = false; // asserted true in case of forced stop
// algorithm initialization
x_current = grid[0];
x_current = grid.row(0);
value_ = objective(x_current);
optimum_ = x_current;
// optimize field over supplied grid
for (std::size_t i = 1; i < grid.size() && !stop; ++i) {
x_current = grid[i];
for (int i = 1; i < grid.rows() && !stop; ++i) {
x_current = grid.row(i);
double x = objective(x_current);
stop |= execute_post_update_step(*this, objective, callbacks_);

// update minimum if better optimum found
if (x < value_) {
value_ = x;
Expand All @@ -69,7 +65,6 @@ template <int N, typename... Args> class Grid {
}
return optimum_;
}

// getters
VectorType optimum() const { return optimum_; }
double value() const { return value_; }
Expand Down
13 changes: 5 additions & 8 deletions fdaPDE/optimization/newton.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ namespace core {
// implementation of the newton method for unconstrained nonlinear optimization
template <int N, typename... Args> class Newton {
private:
typedef typename std::conditional<N == Dynamic, DVector<double>, SVector<N>>::type VectorType;
typedef typename std::conditional<N == Dynamic, DMatrix<double>, SMatrix<N>>::type MatrixType;
using VectorType = typename std::conditional<N == Dynamic, DVector<double>, SVector<N>>::type;
using MatrixType = typename std::conditional<N == Dynamic, DMatrix<double>, SMatrix<N>>::type;
std::size_t max_iter_; // maximum number of iterations before forced stop
double tol_; // tolerance on error before forced stop
double step_; // update step
Expand Down Expand Up @@ -63,27 +63,24 @@ template <int N, typename... Args> class Newton {
static_assert(
std::is_same<decltype(std::declval<F>().operator()(VectorType())), double>::value,
"F_IS_NOT_A_FUNCTOR_ACCEPTING_A_VECTORTYPE");

bool stop = false; // asserted true in case of forced stop
std::size_t n_iter = 0;
double error = std::numeric_limits<double>::max();
h = step_; // restore optimizer step

h = step_;
x_old = x0;
x_new = x0;

while (n_iter < max_iter_ && error > tol_ && !stop) {
// compute update direction
grad_old = objective.derive()(x_old);
hessian = objective.derive_twice()(x_old);

inv_hessian.compute(hessian);
update = -inv_hessian.solve(grad_old);
stop |= execute_pre_update_step(*this, objective, callbacks_);

// update along descent direction
x_new = x_old + h * update;
grad_new = objective.derive()(x_new);

// prepare next iteration
error = grad_new.norm();
stop |= execute_post_update_step(*this, objective, callbacks_);
Expand Down
9 changes: 6 additions & 3 deletions test/src/optimization_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,12 @@ TEST(optimization_test, grid_search) {
ScalarField<2> f;
f = [](SVector<2> x) -> double { return x[0] * x[0] + x[1] * x[1]; };
// define grid of points
std::vector<SVector<2>> grid;
for (double x = -1; x < 1; x += 0.2) {
for (double y = -1; y < 1; y += 0.2) { grid.push_back(SVector<2>(x, y)); }
Eigen::Matrix<double, Eigen::Dynamic, 2> grid(100, 2);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
grid(i * 10 + j, 0) = -1 + 0.2 * i;
grid(i * 10 + j, 1) = -1 + 0.2 * j;
}
}
// define optimizer
Grid<2> opt;
Expand Down

0 comments on commit e1058c0

Please sign in to comment.