Skip to content
Snippets Groups Projects
Commit 3662b8ce authored by Erik Strand's avatar Erik Strand
Browse files

Write a basic algorithm comparison app

parent d04cb601
No related branches found
No related tags found
No related merge requests found
...@@ -21,6 +21,7 @@ include(cmake/make_plot_target.cmake) ...@@ -21,6 +21,7 @@ include(cmake/make_plot_target.cmake)
add_subdirectory(external) add_subdirectory(external)
add_subdirectory(optimization) add_subdirectory(optimization)
add_subdirectory(test) add_subdirectory(test)
add_subdirectory(apps)
if (VISUALIZE) if (VISUALIZE)
make_meta_plot_target() make_meta_plot_target()
......
add_executable(compare_convergence
compare_convergence.cpp
)
target_link_libraries(compare_convergence optimization_lib cma-es)
#include "optimizers/cma_es/cma_es.h"
#include "optimizers/conjugate_gradient_descent/conjugate_gradient_descent.h"
#include "optimizers/gradient_descent/gradient_descent.h"
#include "optimizers/nelder_mead/nelder_mead.h"
#include "objectives/rosenbrock.h"
#include <iostream>
#include <fstream>
#include <vector>
using namespace optimization;
//--------------------------------------------------------------------------------------------------
// TODO:
// - verify convergence by checking against known values
// - choose random sample of start points
// - compute time based exits
int main() {
using Objective = Rosenbrock<-1>;
Objective objective;
uint32_t seed = 0xdefceed9;
uint32_t max_iterations = 1000000000;
uint32_t max_evaluations = 1000000;
Scalar gradient_threshold = 1e-8;
Scalar value_threshold = 1e-8;
std::vector<uint32_t> dims = {2, 4, 8, 16, 32, 64, 128, 256, 512, 1024};
NelderMead<Objective, -1> nm(max_evaluations, value_threshold);
// We'll set the learning rate in the loop.
GradientDescent<-1> gd(0, max_evaluations, gradient_threshold);
std::vector<Scalar> learning_rates = {0.0015, 0.001, 0.0008, 0.0003, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001};
ConjugateGradientDescent<-1> cgd(
gradient_threshold,
value_threshold,
max_evaluations,
max_iterations
);
// We'll set dim and pop size in the loop.
CmaEs cma(0, 0, seed, max_evaluations, max_iterations, value_threshold);
VectorXs initial_point;
VectorXs result;
std::vector<uint32_t> nm_evals;
std::vector<uint32_t> gd_evals;
std::vector<uint32_t> cgd_evals;
std::vector<uint32_t> cma_evals;
for (uint32_t i = 0; i < dims.size(); ++i) {
uint32_t dim = dims[i];
objective.dim() = dim;
initial_point.resize(dim);
initial_point.fill(-1);
/*
nm.optimize(objective, initial_point);
nm_evals.push_back(nm.n_evaluations());
gd.learning_rate() = learning_rates[i];
gd.optimize(objective, initial_point);
gd_evals.push_back(gd.n_evaluations());
*/
cgd.optimize(objective, initial_point);
cgd_evals.push_back(cgd.n_evaluations());
if (dim < 200) {
uint32_t const pop_size = 4 + static_cast<uint32_t>(3 * std::log(dim));
cma.dim() = dim;
cma.pop_size() = pop_size;
std::cout << "pop size: " << pop_size << '\n';
cma.optimize(objective, initial_point);
cma_evals.push_back(cma.n_evaluations());
} else {
cma_evals.push_back(max_evaluations);
}
}
std::cout << '\n';
for (uint32_t i = 0; i < dims.size(); ++i) {
std::cout << "Dim " << dims[i] << '\n';
//std::cout << "nm: " << nm_evals[i] << '\n';
//std::cout << "gd: " << gd_evals[i] << '\n';
std::cout << "cgd: " << cgd_evals[i] << '\n';
std::cout << "cma: " << cma_evals[i] << '\n';
std::cout << '\n';
}
return 0;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment