From 3662b8ce64d2dbbe6a91d2433e3eb95539d8d382 Mon Sep 17 00:00:00 2001
From: Erik Strand <erik.strand@cba.mit.edu>
Date: Thu, 16 Apr 2020 16:37:18 -0400
Subject: [PATCH] Write a basic algorithm comparison app

---
 CMakeLists.txt               |  1 +
 apps/CMakeLists.txt          |  4 ++
 apps/compare_convergence.cpp | 93 ++++++++++++++++++++++++++++++++++++
 3 files changed, 98 insertions(+)
 create mode 100644 apps/CMakeLists.txt
 create mode 100644 apps/compare_convergence.cpp

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5027b37..8f51bdc 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -21,6 +21,7 @@ include(cmake/make_plot_target.cmake)
 add_subdirectory(external)
 add_subdirectory(optimization)
 add_subdirectory(test)
+add_subdirectory(apps)
 
 if (VISUALIZE)
     make_meta_plot_target()
diff --git a/apps/CMakeLists.txt b/apps/CMakeLists.txt
new file mode 100644
index 0000000..3aa653e
--- /dev/null
+++ b/apps/CMakeLists.txt
@@ -0,0 +1,4 @@
+add_executable(compare_convergence
+    compare_convergence.cpp
+)
+target_link_libraries(compare_convergence optimization_lib cma-es)
diff --git a/apps/compare_convergence.cpp b/apps/compare_convergence.cpp
new file mode 100644
index 0000000..ef25945
--- /dev/null
+++ b/apps/compare_convergence.cpp
@@ -0,0 +1,93 @@
+#include "optimizers/cma_es/cma_es.h"
+#include "optimizers/conjugate_gradient_descent/conjugate_gradient_descent.h"
+#include "optimizers/gradient_descent/gradient_descent.h"
+#include "optimizers/nelder_mead/nelder_mead.h"
+#include "objectives/rosenbrock.h"
+#include <iostream>
+#include <fstream>
+#include <vector>
+
+using namespace optimization;
+
+//--------------------------------------------------------------------------------------------------
+// TODO:
+//  - verify convergence by checking against known values
+//  - choose random sample of start points
+//  - compute time based exits
+int main() {
+    using Objective = Rosenbrock<-1>;
+    Objective objective;
+
+    uint32_t seed = 0xdefceed9;
+    uint32_t max_iterations = 1000000000;
+    uint32_t max_evaluations = 1000000;
+    Scalar gradient_threshold = 1e-8;
+    Scalar value_threshold = 1e-8;
+    std::vector<uint32_t> dims = {2, 4, 8, 16, 32, 64, 128, 256, 512, 1024};
+
+    NelderMead<Objective, -1> nm(max_evaluations, value_threshold);
+
+    // We'll set the learning rate in the loop.
+    GradientDescent<-1> gd(0, max_evaluations, gradient_threshold);
+    std::vector<Scalar> learning_rates = {0.0015, 0.001, 0.0008, 0.0003, 0.0002, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001};
+
+    ConjugateGradientDescent<-1> cgd(
+        gradient_threshold,
+        value_threshold,
+        max_evaluations,
+        max_iterations
+    );
+
+    // We'll set dim and pop size in the loop.
+    CmaEs cma(0, 0, seed, max_evaluations, max_iterations, value_threshold);
+
+    VectorXs initial_point;
+    VectorXs result;
+
+    std::vector<uint32_t> nm_evals;
+    std::vector<uint32_t> gd_evals;
+    std::vector<uint32_t> cgd_evals;
+    std::vector<uint32_t> cma_evals;
+
+    for (uint32_t i = 0; i < dims.size(); ++i) {
+        uint32_t dim = dims[i];
+        objective.dim() = dim;
+        initial_point.resize(dim);
+        initial_point.fill(-1);
+
+        /*
+        nm.optimize(objective, initial_point);
+        nm_evals.push_back(nm.n_evaluations());
+
+        gd.learning_rate() = learning_rates[i];
+        gd.optimize(objective, initial_point);
+        gd_evals.push_back(gd.n_evaluations());
+        */
+
+        cgd.optimize(objective, initial_point);
+        cgd_evals.push_back(cgd.n_evaluations());
+
+        if (dim < 200) {
+            uint32_t const pop_size = 4 + static_cast<uint32_t>(3 * std::log(dim));
+            cma.dim() = dim;
+            cma.pop_size() = pop_size;
+            std::cout << "pop size: " << pop_size << '\n';
+            cma.optimize(objective, initial_point);
+            cma_evals.push_back(cma.n_evaluations());
+        } else {
+            cma_evals.push_back(max_evaluations);
+        }
+    }
+
+    std::cout << '\n';
+    for (uint32_t i = 0; i < dims.size(); ++i) {
+        std::cout << "Dim " << dims[i] << '\n';
+        //std::cout << "nm:  " << nm_evals[i] << '\n';
+        //std::cout << "gd:  " << gd_evals[i] << '\n';
+        std::cout << "cgd: " << cgd_evals[i] << '\n';
+        std::cout << "cma: " << cma_evals[i] << '\n';
+        std::cout << '\n';
+    }
+
+    return 0;
+}
-- 
GitLab