diff --git a/CMakeLists.txt b/CMakeLists.txt
index ec636dfca541fea1fd6906810dc9b7ff6be09399..0a9a749a98f653526cb4b3d6f7535b04f0948530 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -21,3 +21,5 @@ include(cmake/make_plot_target.cmake)
 add_subdirectory(external)
 add_subdirectory(optimization)
 add_subdirectory(test)
+
+make_meta_plot_target()
diff --git a/apps/plots.py b/apps/plots.py
index 777ffd2566f1dd822a89ce90d6d1a22823dd15fd..388525f34745b16ed9c875e3f66bf07213f27e13 100644
--- a/apps/plots.py
+++ b/apps/plots.py
@@ -48,11 +48,13 @@ def add_points(fig, ax, points):
     ax.plot(x, y, 'k.')
     return fig, ax
 
-# points is a list of numpy arrays of dim n x 2
+# polygons is a list of numpy arrays, for which each row gives a vertex
 def add_polygons(fig, ax, polygons):
     for polygon in polygons:
-        p = mpl.patches.Polygon(polygon, True, fill=False, color="black", zorder=2)
+        # only plot first two dims
+        p = mpl.patches.Polygon(np.array(polygon)[:,0:2], True, fill=False, color="black", zorder=2)
         ax.add_patch(p)
+        # theoretically patch collections could be more efficient
         #p = PatchCollection(patches, alpha=0.4)
         #ax.add_collection(p)
     return fig, ax
diff --git a/cmake/make_plot_target.cmake b/cmake/make_plot_target.cmake
index 3c58735a50af00c7395c77f0aa7d27613de44667..64753514780c37c24846c74346f667151141abc5 100644
--- a/cmake/make_plot_target.cmake
+++ b/cmake/make_plot_target.cmake
@@ -1,22 +1,41 @@
+#set(PLOT_TARGETS "")
+
 # TODO Enable arguments to be passed to the C++ executable
-function(make_plot_target TARGET)
+function(make_plot_target TARGET ID)
+    cmake_parse_arguments(PLOT "" "" "ARGS" ${ARGN})
     if (VISUALIZE)
         add_custom_command(
-            OUTPUT ${TARGET}_log.json ${TARGET}_vis.json
-            COMMAND ${TARGET} ${TARGET}_log.json ${TARGET}_vis.json
+            OUTPUT ${TARGET}_log_${ID}.json ${TARGET}_vis_${ID}.json
+            COMMAND ${TARGET} ${TARGET}_log_${ID}.json ${TARGET}_vis_${ID}.json ${PLOT_ARGS}
             WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
             DEPENDS ${TARGET}
         )
 
         add_custom_command(
-            OUTPUT ${TARGET}_plot.pdf
-            COMMAND python3 ${CMAKE_SOURCE_DIR}/apps/plots.py ${TARGET}_vis.json ${TARGET}_plot.pdf
+            OUTPUT ${TARGET}_plot_${ID}.pdf
+            COMMAND python3 ${CMAKE_SOURCE_DIR}/apps/plots.py ${TARGET}_vis_${ID}.json ${TARGET}_plot_${ID}.pdf
             WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
-            DEPENDS ${CMAKE_SOURCE_DIR}/apps/plots.py ${TARGET}_vis.json 
+            DEPENDS ${CMAKE_SOURCE_DIR}/apps/plots.py ${TARGET}_vis_${ID}.json
         )
 
-        add_custom_target(${TARGET}_plot
-            DEPENDS ${TARGET}_plot.pdf
+        add_custom_target(${TARGET}_plot_${ID}
+            DEPENDS ${TARGET}_plot_${ID}.pdf
         )
+
+        #message(STATUS "another one")
+        #list(APPEND PLOT_TARGETS ${TARGET}_plot_${ID})
+        #set(PLOT_TARGETS ${PLOT_TARGETS} PARENT_SCOPE)
+        #message(STATUS ${PLOT_TARGETS})
     endif()
 endfunction()
+
+function(make_meta_plot_target)
+    # TODO Don't hard code this.
+    add_custom_target(all_plots)
+    add_dependencies(all_plots gradient_descent_plot_2d)
+    add_dependencies(all_plots gradient_descent_plot_10d)
+    add_dependencies(all_plots conjugate_gradient_descent_plot_2d)
+    add_dependencies(all_plots conjugate_gradient_descent_plot_10d)
+    add_dependencies(all_plots nelder_mead_plot_2d)
+    add_dependencies(all_plots nelder_mead_plot_10d)
+endfunction()
diff --git a/optimization/objectives/rosenbrock.h b/optimization/objectives/rosenbrock.h
index 318c2bba8aff34473e7113730393c723b1a83a94..7c5aaae15f56a162ccc7045065a5db7a93e3f675 100644
--- a/optimization/objectives/rosenbrock.h
+++ b/optimization/objectives/rosenbrock.h
@@ -1,38 +1,48 @@
 #ifndef OPTIMIZATION_OBJECTIVES_ROSENBROCK_H
 #define OPTIMIZATION_OBJECTIVES_ROSENBROCK_H
 
-#include "utils/scalar.h"
 #include "utils/vector.h"
 
 namespace optimization {
 
 //--------------------------------------------------------------------------------------------------
-template <typename Vector>
+template <int32_t N>
 class Rosenbrock {
 public:
-    using Input = Vector;
-    using Gradient = Vector;
     static constexpr char const* name = "rosenbrock";
 
-    Rosenbrock() {}
+    Rosenbrock(): dim_(2) {}
 
-    uint32_t dim() const { return 2; }
+    uint32_t dim() const { return dim_; }
+    uint32_t& dim() { return dim_; }
 
-    void eval(Input const& x, Scalar& value) const {
-        Scalar const x_squared = x[0] * x[0];
-        Scalar const tmp1 = (1 - x[0]);
-        Scalar const tmp2 = (x[1] - x_squared);
-        value = tmp1 * tmp1 + 100 * tmp2 * tmp2;
+    void eval(VectorNs<N> const& x, Scalar& value) {
+        value = Scalar(0);
+        for (uint32_t i = 1; i < dim_; ++i) {
+            Scalar const x_squared = x[i - 1] * x[i - 1];
+            Scalar const tmp_1 = (1 - x[i - 1]);
+            Scalar const tmp_2 = (x[i] - x_squared);
+            value += tmp_1 * tmp_1 + 100 * tmp_2 * tmp_2;
+        }
     }
 
-    void eval(Input const& x, Scalar& value, Gradient& gradient) const {
-        Scalar const x_squared = x[0] * x[0];
-        Scalar const tmp1 = (1 - x[0]);
-        Scalar const tmp2 = (x[1] - x_squared);
-        value = tmp1 * tmp1 + 100 * tmp2 * tmp2;
-        gradient[0] = -2 * tmp1 - 400 * tmp2 * x[0];
-        gradient[1] = 200 * tmp2;
+    void eval(VectorNs<N> const& x, Scalar& value, VectorNs<N>& gradient) {
+        value = Scalar(0);
+        gradient.resize(dim_);
+        gradient.setZero();
+        for (uint32_t i = 1; i < dim_; ++i) {
+            Scalar const x_squared = x[i - 1] * x[i - 1];
+            Scalar const tmp_1 = (1 - x[i - 1]);
+            Scalar const tmp_2 = (x[i] - x_squared);
+            value += tmp_1 * tmp_1 + 100 * tmp_2 * tmp_2;
+            gradient[i - 1] += -2 * tmp_1 - 400 * tmp_2 * x[i - 1];
+            gradient[i] += 200 * tmp_2;
+        }
     }
+
+private:
+    // This is redundant for N != -1.
+    uint32_t dim_;
 };
 
 }
diff --git a/optimization/objectives/samples.h b/optimization/objectives/samples.h
index f5359a75be910c22d444ee1861cc5c9d33f941f5..b998c6efb55543782b671efa63976082831bea89 100644
--- a/optimization/objectives/samples.h
+++ b/optimization/objectives/samples.h
@@ -18,20 +18,16 @@ struct Sample {
 };
 
 //--------------------------------------------------------------------------------------------------
-// TODO: Templatize on the underlying data types, not the objective function.
-template <typename Objective>
+template <typename Vector>
 struct GradientSample {
-    using Input = typename Objective::Input;
-    using Gradient = typename Objective::Gradient;
-
     GradientSample() {}
-    GradientSample(Input const& p, Scalar v, Gradient const& g)
+    GradientSample(Vector const& p, Scalar v, Vector const& g)
         : point(p), value(v), gradient(g)
     {}
 
-    Input point;
+    Vector point;
     Scalar value;
-    Gradient gradient;
+    Vector gradient;
 };
 
 }
diff --git a/optimization/objectives/samples_vis.h b/optimization/objectives/samples_vis.h
new file mode 100644
index 0000000000000000000000000000000000000000..2753760a43584d7ed4f86d286d5434a46dd5cce3
--- /dev/null
+++ b/optimization/objectives/samples_vis.h
@@ -0,0 +1,23 @@
+#ifndef OPTIMIZATION_OBJECTIVES_SAMPLES_VIS_H
+#define OPTIMIZATION_OBJECTIVES_SAMPLES_VIS_H
+
+#include "utils/vis_only.h"
+#ifdef VISUALIZE
+#include "samples.h"
+
+namespace optimization {
+
+//..................................................................................................
+template <typename Vector>
+void to_json(nlohmann::json& j, GradientSample<Vector> const& sample) {
+    j = nlohmann::json{
+        {"point", sample.point},
+        {"value", sample.value},
+        {"gradient", sample.gradient}
+    };
+}
+
+}
+
+#endif
+#endif
diff --git a/optimization/optimizers/conjugate_gradient_descent/CMakeLists.txt b/optimization/optimizers/conjugate_gradient_descent/CMakeLists.txt
index 85359f49630c66a8bfada7eefcf23a3d7bed2196..273708d2097d919733a9f461dfd034e2c1ec856c 100644
--- a/optimization/optimizers/conjugate_gradient_descent/CMakeLists.txt
+++ b/optimization/optimizers/conjugate_gradient_descent/CMakeLists.txt
@@ -3,5 +3,6 @@ if (VISUALIZE)
         main.cpp
     )
     target_link_libraries(conjugate_gradient_descent optimization_lib clara)
-    make_plot_target(conjugate_gradient_descent)
+    make_plot_target(conjugate_gradient_descent 2d ARGS -d 2)
+    make_plot_target(conjugate_gradient_descent 10d ARGS -d 10)
 endif()
diff --git a/optimization/optimizers/conjugate_gradient_descent/conjugate_gradient_descent.h b/optimization/optimizers/conjugate_gradient_descent/conjugate_gradient_descent.h
index 9f390d9e4fb03a6e2f7b26dcbe06b66670fad420..9ea5939b38304d77835900c0d37597a43c26f095 100644
--- a/optimization/optimizers/conjugate_gradient_descent/conjugate_gradient_descent.h
+++ b/optimization/optimizers/conjugate_gradient_descent/conjugate_gradient_descent.h
@@ -57,8 +57,8 @@ VectorNs<N> ConjugateGradientDescent<N>::optimize(
     Scalar alpha = -1;
     Scalar value, last_value;
     Vector gradient, last_gradient;
-    gradient.resize(objective.dim());
-    last_gradient.resize(objective.dim());
+    gradient.resize(initial_point.size());
+    last_gradient.resize(initial_point.size());
 
     objective.eval(line_objective.x0(), value, gradient);
     ++n_evaluations_;
diff --git a/optimization/optimizers/conjugate_gradient_descent/conjugate_gradient_descent_log.h b/optimization/optimizers/conjugate_gradient_descent/conjugate_gradient_descent_log.h
index 4bfcb0bbb84446782b90874f303defa7381c750d..f2cb61b0c5eb3e35d1a4d7530b7bb603c49aa817 100644
--- a/optimization/optimizers/conjugate_gradient_descent/conjugate_gradient_descent_log.h
+++ b/optimization/optimizers/conjugate_gradient_descent/conjugate_gradient_descent_log.h
@@ -67,8 +67,8 @@ void ConjugateGradientDescentLog<N>::initialize(
 ) {
     objective_name = Objective::name;
     states.emplace_back();
-    states.back().direction.resize(N);
-    states.back().direction.setZero(N);
+    states.back().direction.resize(point.size());
+    states.back().direction.setZero();
     states.back().point = point;
     states.back().gradient = gradient;
     states.back().value = value;
@@ -85,7 +85,7 @@ void ConjugateGradientDescentLog<N>::push_back(
     states.push_back({direction, point, gradient, value});
 }
 
-//..................................................................................................
+//--------------------------------------------------------------------------------------------------
 template <int32_t N>
 void to_json(nlohmann::json& j, ConjugateGradientDescentState<N> const& state) {
     j = nlohmann::json{
@@ -96,7 +96,7 @@ void to_json(nlohmann::json& j, ConjugateGradientDescentState<N> const& state) {
     };
 }
 
-//..................................................................................................
+//--------------------------------------------------------------------------------------------------
 template <int32_t N>
 void to_json(nlohmann::json& j, ConjugateGradientDescentLog<N> const& log) {
     j = nlohmann::json{
diff --git a/optimization/optimizers/conjugate_gradient_descent/main.cpp b/optimization/optimizers/conjugate_gradient_descent/main.cpp
index 80074694af97e827cd0df6c3f92bbd11ddd48a15..6ef15440ff9fa82c4368bc4937564d34c721c806 100644
--- a/optimization/optimizers/conjugate_gradient_descent/main.cpp
+++ b/optimization/optimizers/conjugate_gradient_descent/main.cpp
@@ -14,6 +14,7 @@ using json = nlohmann::json;
 int main(int const argc, char const** argv) {
     std::string log_file_path;
     std::string vis_file_path;
+    uint32_t dim = 2;
     Scalar gradient_threshold = 1e-8;
     Scalar progress_threshold = 1e-8;
     uint32_t max_iterations = 1000;
@@ -23,6 +24,7 @@ int main(int const argc, char const** argv) {
     auto const cli =
         clara::Arg(log_file_path, "log_file_path")("Location of the optimization log") |
         clara::Arg(vis_file_path, "vis_file_path")("Location of the visualization file") |
+        clara::Opt(dim, "dim")["-d"]["--dim"]("Dimension of the search space") |
         clara::Opt(gradient_threshold, "gradient_threshold")["-g"]["--gradient"]("Return if the gradient norm is this small") |
         clara::Opt(progress_threshold, "progress_threshold")["-p"]["--progress"]("Return if the difference between two consecutive values is this small") |
         clara::Opt(max_iterations, "max_iterations")["-n"]["--max-iterations"]("Maximum number of line minimization cycles") |
@@ -34,14 +36,22 @@ int main(int const argc, char const** argv) {
         exit(1);
     }
 
-    Vector2<Scalar> initial_point = Vector2<Scalar>(x0, y0);
+    VectorXs initial_point;
+    initial_point.resize(dim);
+    initial_point[0] = x0;
+    initial_point[1] = y0;
+    for (uint32_t i = 2; i < dim; ++i) {
+        initial_point[i] = -1;
+    }
 
     //using Objective = Paraboloid<Vector2<Scalar>>;
     //Objective objective(dim);
-    using Objective = Rosenbrock<Vector2<Scalar>>;
+    using Objective = Rosenbrock<-1>;
     Objective objective;
-    ConjugateGradientDescent<2> optimizer(gradient_threshold, progress_threshold, max_iterations);
-    VectorNs<2> minimum = optimizer.optimize(objective, initial_point);
+    objective.dim() = dim;
+
+    ConjugateGradientDescent<-1> optimizer(gradient_threshold, progress_threshold, max_iterations);
+    VectorXs minimum = optimizer.optimize(objective, initial_point);
     std::cout << "n iterations: " << optimizer.n_iterations() << '\n';
     std::cout << "n evaluations: " << optimizer.n_evaluations() << '\n';
     std::cout << "final point: " << minimum << '\n';
@@ -53,7 +63,7 @@ int main(int const argc, char const** argv) {
     }
 
     if (!vis_file_path.empty()) {
-        json data = ConjugateGradientDescentVis<2>{optimizer};
+        json data = ConjugateGradientDescentVis<-1>{optimizer};
         std::ofstream vis_file(vis_file_path);
         vis_file << data.dump(4) << '\n';
     }
diff --git a/optimization/optimizers/gradient_descent/CMakeLists.txt b/optimization/optimizers/gradient_descent/CMakeLists.txt
index cf64b2f10cb82025678a533a90d08ea6b50a269d..c2c77038db0eb22eb7dfc9a9960d2b1921eaba04 100644
--- a/optimization/optimizers/gradient_descent/CMakeLists.txt
+++ b/optimization/optimizers/gradient_descent/CMakeLists.txt
@@ -3,5 +3,6 @@ if (VISUALIZE)
         main.cpp
     )
     target_link_libraries(gradient_descent optimization_lib clara)
-    make_plot_target(gradient_descent)
+    make_plot_target(gradient_descent 2d ARGS -d 2 -l 0.0015)
+    make_plot_target(gradient_descent 10d ARGS -d 10 -l 0.0005 -n 10000)
 endif()
diff --git a/optimization/optimizers/gradient_descent/gradient_descent.h b/optimization/optimizers/gradient_descent/gradient_descent.h
index 42c3215436580995226b924f964720bdd45d6bb2..bcd5bf20abd88a4b7549b5e71d95a47b5840231c 100644
--- a/optimization/optimizers/gradient_descent/gradient_descent.h
+++ b/optimization/optimizers/gradient_descent/gradient_descent.h
@@ -6,12 +6,9 @@
 namespace optimization {
 
 //--------------------------------------------------------------------------------------------------
-template <typename Objective>
-class GradientDescent : public GradientDescentLog<Objective> {
+template <int32_t N>
+class GradientDescent : public GradientDescentLog<N> {
 public:
-    using Input = typename Objective::Input;
-    using Gradient = typename Objective::Gradient;
-
     GradientDescent() {}
     GradientDescent(Scalar learning_rate, uint32_t n_steps)
         : learning_rate_(learning_rate), n_steps_(n_steps)
@@ -20,18 +17,22 @@ public:
     Scalar& learning_rate() { return learning_rate_; }
     Scalar learning_rate() const { return learning_rate_; }
 
-    Input optimize(Objective const& objective, Input const& initial_point) {
-        Input point = initial_point;
+    template <typename Objective>
+    VectorNs<N> optimize(Objective& objective, VectorNs<N> const& initial_point) {
+        VectorNs<N> point = initial_point;
         Scalar value;
-        Gradient gradient;
+        VectorNs<N> gradient;
+        gradient.resize(point.size());
         objective.eval(point, value, gradient);
-        GradientDescentLog<Objective>::push_back(point, value, gradient);
+        GradientDescentLog<N>::initialize(objective);
+        GradientDescentLog<N>::push_back(point, value, gradient);
 
         for (uint32_t i = 0; i < n_steps_; ++i) {
             point -= learning_rate_ * gradient;
             objective.eval(point, value, gradient);
-            GradientDescentLog<Objective>::push_back(point, value, gradient);
+            GradientDescentLog<N>::push_back(point, value, gradient);
         }
+
         return point;
     }
 
diff --git a/optimization/optimizers/gradient_descent/gradient_descent_log.h b/optimization/optimizers/gradient_descent/gradient_descent_log.h
index cb16ff415481536933fdc25ab3ae7fcabab8a488..1d2a8a21ee3d7dd231ba252effda1ada63c54773 100644
--- a/optimization/optimizers/gradient_descent/gradient_descent_log.h
+++ b/optimization/optimizers/gradient_descent/gradient_descent_log.h
@@ -1,13 +1,14 @@
 #ifndef OPTIMIZATION_GRADIENT_DESCENT_LOG_H
 #define OPTIMIZATION_GRADIENT_DESCENT_LOG_H
 
-#include "utils/scalar.h"
+#include "utils/vector.h"
 #include "utils/vis_only.h"
 #include "objectives/samples.h"
 
 #ifdef VISUALIZE
 #include <vector>
 #include "json.hpp"
+#include "objectives/samples_vis.h"
 #endif
 
 namespace optimization {
@@ -15,54 +16,55 @@ namespace optimization {
 //--------------------------------------------------------------------------------------------------
 // This is used as a base class rather than a member so that the empty base class optimization can
 // be applied (the member would take up space even if it is an empty class).
-template <typename Objective>
+template <int32_t N>
 struct GradientDescentLog {
-    using Input = typename Objective::Input;
-    using Gradient = typename Objective::Gradient;
-
     void reserve(uint32_t n) VIS_ONLY_METHOD;
-    void push_back(Input const& point, Scalar value, Gradient const& gradient) VIS_ONLY_METHOD;
+    template <typename Objective>
+    void initialize(Objective const&);
+    void push_back(
+        VectorNs<N> const& point,
+        Scalar value,
+        VectorNs<N> const& gradient
+    ) VIS_ONLY_METHOD;
     void clear() VIS_ONLY_METHOD;
 
     #ifdef VISUALIZE
-    std::vector<GradientSample<Objective>> samples;
+    std::string objective_name;
+    std::vector<GradientSample<VectorNs<N>>> samples;
     #endif
 };
 
 #ifdef VISUALIZE
 
 //..................................................................................................
-template <typename Objective>
-void GradientDescentLog<Objective>::reserve(uint32_t n) {
+template <int32_t N>
+void GradientDescentLog<N>::reserve(uint32_t n) {
     samples.reserve(n);
 }
 
 //..................................................................................................
+template <int32_t N>
 template <typename Objective>
-void GradientDescentLog<Objective>::push_back(
-    Input const& point,
-    Scalar value,
-    Gradient const& gradient
-) {
-    samples.emplace_back(point, value, gradient);
+void GradientDescentLog<N>::initialize(Objective const&) {
+    objective_name = Objective::name;
 }
 
 //..................................................................................................
-template <typename Objective>
-void to_json(nlohmann::json& j, GradientSample<Objective> const& sample) {
-    j = nlohmann::json{
-        {"point", sample.point},
-        {"value", sample.value},
-        {"gradient", sample.gradient}
-    };
+template <int32_t N>
+void GradientDescentLog<N>::push_back(
+    VectorNs<N> const& point,
+    Scalar value,
+    VectorNs<N> const& gradient
+) {
+    samples.emplace_back(point, value, gradient);
 }
 
-//..................................................................................................
-template <typename Objective>
-void to_json(nlohmann::json& j, GradientDescentLog<Objective> const& log) {
+//--------------------------------------------------------------------------------------------------
+template <int32_t N>
+void to_json(nlohmann::json& j, GradientDescentLog<N> const& log) {
     j = nlohmann::json{
         {"algorithm", "gradient descent"},
-        {"objective", Objective::name},
+        {"objective", log.objective_name},
         {"data", log.samples}
     };
 }
diff --git a/optimization/optimizers/gradient_descent/gradient_descent_vis.h b/optimization/optimizers/gradient_descent/gradient_descent_vis.h
index 57551c387b0b20ab4802c0ede8f8177f7947a2b9..966d63a3e53076786779446042e785b80fa9a325 100644
--- a/optimization/optimizers/gradient_descent/gradient_descent_vis.h
+++ b/optimization/optimizers/gradient_descent/gradient_descent_vis.h
@@ -7,19 +7,20 @@
 namespace optimization {
 
 //--------------------------------------------------------------------------------------------------
-template <typename Objective>
+template <int32_t N>
 struct GradientDescentVis {
-    GradientDescentLog<Objective> const& log;
+    GradientDescentLog<N> const& log;
 };
 
 //..................................................................................................
-template <typename Objective>
-void to_json(nlohmann::json& j, GradientDescentVis<Objective> const& vis) {
+template <int32_t N>
+void to_json(nlohmann::json& j, GradientDescentVis<N> const& vis) {
     j = nlohmann::json{
         {"algorithm", "gradient descent"},
-        {"objective", Objective::name},
-        // double brackets gives us an object (as opposed to an array)
-        {"data", {{"points", nlohmann::json::array()}}}
+        {"objective", vis.log.objective_name},
+        {"data", {
+            {"points", nlohmann::json::array()}
+        }}
     };
     for (auto const& sample : vis.log.samples) {
         j["data"]["points"].push_back(sample.point);
diff --git a/optimization/optimizers/gradient_descent/main.cpp b/optimization/optimizers/gradient_descent/main.cpp
index a77ae6c9ec666e501399e99cf5b521dbbcdf4e9e..854576978eb3f9f43b2965d59ef4e81c2668ad31 100644
--- a/optimization/optimizers/gradient_descent/main.cpp
+++ b/optimization/optimizers/gradient_descent/main.cpp
@@ -34,14 +34,22 @@ int main(int const argc, char const** argv) {
         exit(1);
     }
 
-    Vector2<Scalar> initial_point = Vector2<Scalar>(x0, y0);
+    VectorXs initial_point;
+    initial_point.resize(dim);
+    initial_point[0] = x0;
+    initial_point[1] = y0;
+    for (uint32_t i = 2; i < dim; ++i) {
+        initial_point[i] = -1;
+    }
 
     //using Objective = Paraboloid<Vector2<Scalar>>;
     //Objective objective(dim);
-    using Objective = Rosenbrock<Vector2<Scalar>>;
+    using Objective = Rosenbrock<-1>;
     Objective objective;
-    GradientDescent<Objective> optimizer(learning_rate, n_steps);
-    VectorNs<2> minimum = optimizer.optimize(objective, initial_point);
+    objective.dim() = dim;
+
+    GradientDescent<-1> optimizer(learning_rate, n_steps);
+    VectorXs minimum = optimizer.optimize(objective, initial_point);
     std::cout << "n evaluations: " << n_steps << '\n';
     std::cout << "final point: " << minimum << '\n';
 
@@ -52,7 +60,7 @@ int main(int const argc, char const** argv) {
     }
 
     if (!vis_file_path.empty()) {
-        json data = GradientDescentVis<Objective>{optimizer};
+        json data = GradientDescentVis<-1>{optimizer};
         std::ofstream vis_file(vis_file_path);
         vis_file << data.dump(4) << '\n';
     }
diff --git a/optimization/optimizers/line_search/line_objective.h b/optimization/optimizers/line_search/line_objective.h
index 02c148a1e699ba84a6c3cb97c8824f5a1ded4ecb..d885beb2a3ca71af0783a53dcc5b9c4226a5983a 100644
--- a/optimization/optimizers/line_search/line_objective.h
+++ b/optimization/optimizers/line_search/line_objective.h
@@ -9,7 +9,7 @@ namespace optimization {
 template <typename Objective, int32_t N>
 class LineObjective {
 public:
-    LineObjective(Objective const& o): objective_(o) {
+    LineObjective(Objective& o): objective_(o) {
         x0_.resize(objective_.dim());
         dir_.resize(objective_.dim());
         x_.resize(objective_.dim());
@@ -33,7 +33,7 @@ public:
     }
 
 private:
-    Objective const& objective_;
+    Objective& objective_;
     VectorNs<N> x0_;
     VectorNs<N> dir_;
     VectorNs<N> x_;
diff --git a/optimization/optimizers/nelder_mead/CMakeLists.txt b/optimization/optimizers/nelder_mead/CMakeLists.txt
index feb04084dd19f3189ed394f0cf7146690152956a..cd984f8c32545f0334434e890783ae9120ca55b3 100644
--- a/optimization/optimizers/nelder_mead/CMakeLists.txt
+++ b/optimization/optimizers/nelder_mead/CMakeLists.txt
@@ -3,5 +3,6 @@ if (VISUALIZE)
         main.cpp
     )
     target_link_libraries(nelder_mead optimization_lib clara)
-    make_plot_target(nelder_mead)
+    make_plot_target(nelder_mead 2d ARGS -d 2)
+    make_plot_target(nelder_mead 10d ARGS -d 10 -n 10000)
 endif()
diff --git a/optimization/optimizers/nelder_mead/main.cpp b/optimization/optimizers/nelder_mead/main.cpp
index fe5fe42122c4c7c4c8406f0b478dc2e0c8edb16b..e7522264ba4dc651d55b9e6376c55db5bc8865ad 100644
--- a/optimization/optimizers/nelder_mead/main.cpp
+++ b/optimization/optimizers/nelder_mead/main.cpp
@@ -14,12 +14,14 @@ using json = nlohmann::json;
 int main(int const argc, char const** argv) {
     std::string log_file_path;
     std::string vis_file_path;
+    uint32_t dim = 2;
     uint32_t max_evaluations = 1000;
     Scalar relative_y_tolerance = 1e-8;
 
     auto const cli =
         clara::Arg(log_file_path, "log_file_path")("Location of the optimization log") |
         clara::Arg(vis_file_path, "vis_file_path")("Location of the visualization file") |
+        clara::Opt(dim, "dim")["-d"]["--dim"]("Dimension of the search space") |
         clara::Opt(max_evaluations, "max_evaluations")["-n"]["--max-evals"]("Max number of function evaluations") |
         clara::Opt(relative_y_tolerance, "relative_y_tolerance")["-y"]["--rel-y-tol"]("Relative tolerance for function values (termination condition)");
     auto const result = cli.parse(clara::Args(argc, argv));
@@ -28,17 +30,22 @@ int main(int const argc, char const** argv) {
         exit(1);
     }
 
-    Eigen::Matrix<Scalar, 2, 3> simplex;
-    simplex.col(0) = Vector2s(-1, 2);
-    simplex.col(1) = Vector2s(-1, 1);
-    simplex.col(2) = Vector2s(-2, 2);
+    MatrixXs simplex;
+    simplex.resize(dim, dim + 1);
+    simplex.fill(-1);
+    simplex.row(1).fill(2);
+    for (uint32_t i = 0; i < dim; ++i) {
+        simplex(i, i + 1) -= 1;
+    }
 
     //using Objective = Paraboloid<Vector2<Scalar>>;
     //Objective objective(dim);
-    using Objective = Rosenbrock<Vector2s>;
+    using Objective = Rosenbrock<-1>;
     Objective objective;
-    NelderMead<Objective, 2> optimizer(max_evaluations, relative_y_tolerance);
-    VectorNs<2> minimum = optimizer.optimize(objective, simplex);
+    objective.dim() = dim;
+
+    NelderMead<Objective, -1> optimizer(max_evaluations, relative_y_tolerance);
+    VectorXs minimum = optimizer.optimize(objective, simplex);
     std::cout << "n evaluations: " << optimizer.n_evaluations() << '\n';
     std::cout << "final point: " << minimum << '\n';
 
@@ -49,7 +56,7 @@ int main(int const argc, char const** argv) {
     }
 
     if (!vis_file_path.empty()) {
-        json data = NelderMeadVis<Objective, 2>{optimizer};
+        json data = NelderMeadVis<Objective, -1>{optimizer};
         std::ofstream vis_file(vis_file_path);
         vis_file << data.dump(4) << '\n';
     }
diff --git a/optimization/optimizers/nelder_mead/nelder_mead.h b/optimization/optimizers/nelder_mead/nelder_mead.h
index d60f1ed5ad2f96eb1790fef339fc137af2b6b712..662d08597816d3894afe4081882f226b3c19de75 100644
--- a/optimization/optimizers/nelder_mead/nelder_mead.h
+++ b/optimization/optimizers/nelder_mead/nelder_mead.h
@@ -29,13 +29,13 @@ public:
     VectorN const& simplex_values() const { return simplex_values_; }
 
     // Each row of simplex is a vertex.
-    VectorD optimize(Objective const& objective, MatrixDN const& simplex);
+    VectorD optimize(Objective& objective, MatrixDN const& simplex);
 
     VectorD const& min_point() const { return simplex_vertices_[i_lowest_]; }
     Scalar min_value() const { return simplex_values_[i_lowest_]; }
 
 private:
-    Scalar try_new_point(Objective const& objective, Scalar factor);
+    Scalar try_new_point(Objective& objective, Scalar factor);
 
     // For fixed size D (and thus N), dim_ and n_vertices_ are redundant.
     uint32_t dim_;
@@ -62,7 +62,7 @@ private:
 
 //..................................................................................................
 template <typename Objective, int32_t D>
-auto NelderMead<Objective, D>::optimize(Objective const& objective, MatrixDN const& simplex) -> VectorD {
+auto NelderMead<Objective, D>::optimize(Objective& objective, MatrixDN const& simplex) -> VectorD {
     simplex_vertices_ = simplex;
     n_vertices_ = simplex_vertices_.cols();
     dim_ = simplex_vertices_.rows();
@@ -152,7 +152,7 @@ auto NelderMead<Objective, D>::optimize(Objective const& objective, MatrixDN con
 
 //..................................................................................................
 template <typename Objective, int32_t D>
-Scalar NelderMead<Objective, D>::try_new_point(Objective const& objective, Scalar factor) {
+Scalar NelderMead<Objective, D>::try_new_point(Objective& objective, Scalar factor) {
     // Generate a new point by reflecting/expanding/contracting the worst point.
     Scalar const t1 = (Scalar(1) - factor) / dim_;
     Scalar const t2 = factor - t1;
diff --git a/optimization/optimizers/nelder_mead/nelder_mead_log.h b/optimization/optimizers/nelder_mead/nelder_mead_log.h
index c7f20825cdd4c32b704ebc8a50fb5ddc0ed1afdc..0e39c7dfdffd6d482c0929449ce1652aee0364cf 100644
--- a/optimization/optimizers/nelder_mead/nelder_mead_log.h
+++ b/optimization/optimizers/nelder_mead/nelder_mead_log.h
@@ -81,7 +81,7 @@ void NelderMeadLog<Objective, D>::push_back(
 }
 
 
-//..................................................................................................
+//--------------------------------------------------------------------------------------------------
 template <typename Objective, int32_t D>
 void to_json(nlohmann::json& j, NelderMeadState<Objective, D> const& state) {
     j = nlohmann::json{
@@ -90,7 +90,7 @@ void to_json(nlohmann::json& j, NelderMeadState<Objective, D> const& state) {
     };
 }
 
-//..................................................................................................
+//--------------------------------------------------------------------------------------------------
 template <typename Objective, int32_t D>
 void to_json(nlohmann::json& j, NelderMeadLog<Objective, D> const& log) {
     j = nlohmann::json{