diff --git a/CMakeLists.txt b/CMakeLists.txt
index 4320e2f95c03210248f517fc46b8ce1826cb2d96..a7f5a9745005da971d2a7a3436dc320c9a7d61ba 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,7 +1,7 @@
 cmake_minimum_required(VERSION 3.13) # 3.13 is required for target_link_options
 project(CppStarter CXX)
 
-if(NOT CMAKE_BUILD_TYPE)
+if (NOT CMAKE_BUILD_TYPE)
     set(CMAKE_BUILD_TYPE "Release")
 endif()
 message(STATUS "Build type: ${CMAKE_BUILD_TYPE}")
@@ -10,5 +10,8 @@ find_package (Eigen3 3.3 REQUIRED NO_MODULE)
 
 include(cmake/shared_settings.cmake)
 
+option(VISUALIZE "compiles extra code to allow visualization of optimization algorithms" ON)
+
+add_subdirectory(external)
 add_subdirectory(optimization)
 add_subdirectory(plots)
diff --git a/README.md b/README.md
index 46b27b401c86ea9bbdcbd07b206245f39a92e178..b9a0537bef5f9fe1bde059ed237fdbb32b45a6fd 100644
--- a/README.md
+++ b/README.md
@@ -23,3 +23,30 @@ Tips:
 - you can build using multiple threads at once using e.g. `make -j4`
 - you can specify a specific compiler using `cmake .. -DCMAKE_CXX_COMPILER=g++-8`
 - for debugging symbols use `cmake .. -DCMAKE_BUILD_TYPE=Debug`
+
+## TODO
+
+save data to files and run python with a make command
+run with single size parameter to see what happens
+
+For the first thing...
+an EVO experiment needs pop size, learning rate, n iterations, print stride (common)
+and a starting distribution (depends on type of distribution)
+separate command line tools for each experiment?
+
+objective for optimization gives input type and gradient type, can eval both (apart and at once)
+- should objective give actual input type, or const ref? For now it'll always be const &, so I'll add
+those elsewhere (need to be able to make the literal thing in some contexts)
+- gradient type can be multiplied by scalar; input type can be += with gradient
+
+expected value estimator?
+- stores n_samples
+- takes objective and distribution (objective only needs to eval points)
+- input type is distribution::params
+- gradient type is distribution::score (hm should I call this gradient?)
+- can estimate value at params
+- can estimate gradient wrt params at params
+
+then I'll be able to change just the distribution - expected value
+
+how to handle data logging?
diff --git a/external/CMakeLists.txt b/external/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6dab21dc39a613666248075ec17cf114db4134cb
--- /dev/null
+++ b/external/CMakeLists.txt
@@ -0,0 +1,3 @@
+# Clara
+add_library(clara INTERFACE)
+target_include_directories(clara INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/clara)
diff --git a/external/clara/clara.hpp b/external/clara/clara.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..6be5a98b14b0b1f4ba555619f6f54658eed9ab25
--- /dev/null
+++ b/external/clara/clara.hpp
@@ -0,0 +1,1264 @@
+// Copyright 2017 Two Blue Cubes Ltd. All rights reserved.
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+// See https://github.com/philsquared/Clara for more details
+
+// Clara v1.1.5
+
+#ifndef CLARA_HPP_INCLUDED
+#define CLARA_HPP_INCLUDED
+
+#ifndef CLARA_CONFIG_CONSOLE_WIDTH
+#define CLARA_CONFIG_CONSOLE_WIDTH 80
+#endif
+
+#ifndef CLARA_TEXTFLOW_CONFIG_CONSOLE_WIDTH
+#define CLARA_TEXTFLOW_CONFIG_CONSOLE_WIDTH CLARA_CONFIG_CONSOLE_WIDTH
+#endif
+
+#ifndef CLARA_CONFIG_OPTIONAL_TYPE
+#ifdef __has_include
+#if __has_include(<optional>) && __cplusplus >= 201703L
+#include <optional>
+#define CLARA_CONFIG_OPTIONAL_TYPE std::optional
+#endif
+#endif
+#endif
+
+
+// ----------- #included from clara_textflow.hpp -----------
+
+// TextFlowCpp
+//
+// A single-header library for wrapping and laying out basic text, by Phil Nash
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+// This project is hosted at https://github.com/philsquared/textflowcpp
+
+#ifndef CLARA_TEXTFLOW_HPP_INCLUDED
+#define CLARA_TEXTFLOW_HPP_INCLUDED
+
+#include <cassert>
+#include <ostream>
+#include <sstream>
+#include <vector>
+
+#ifndef CLARA_TEXTFLOW_CONFIG_CONSOLE_WIDTH
+#define CLARA_TEXTFLOW_CONFIG_CONSOLE_WIDTH 80
+#endif
+
+
+namespace clara { namespace TextFlow {
+
+    inline auto isWhitespace( char c ) -> bool {
+        static std::string chars = " \t\n\r";
+        return chars.find( c ) != std::string::npos;
+    }
+    inline auto isBreakableBefore( char c ) -> bool {
+        static std::string chars = "[({<|";
+        return chars.find( c ) != std::string::npos;
+    }
+    inline auto isBreakableAfter( char c ) -> bool {
+        static std::string chars = "])}>.,:;*+-=&/\\";
+        return chars.find( c ) != std::string::npos;
+    }
+
+    class Columns;
+
+    class Column {
+        std::vector<std::string> m_strings;
+        size_t m_width = CLARA_TEXTFLOW_CONFIG_CONSOLE_WIDTH;
+        size_t m_indent = 0;
+        size_t m_initialIndent = std::string::npos;
+
+    public:
+        class iterator {
+            friend Column;
+
+            Column const& m_column;
+            size_t m_stringIndex = 0;
+            size_t m_pos = 0;
+
+            size_t m_len = 0;
+            size_t m_end = 0;
+            bool m_suffix = false;
+
+            iterator( Column const& column, size_t stringIndex )
+            :   m_column( column ),
+                m_stringIndex( stringIndex )
+            {}
+
+            auto line() const -> std::string const& { return m_column.m_strings[m_stringIndex]; }
+
+            auto isBoundary( size_t at ) const -> bool {
+                assert( at > 0 );
+                assert( at <= line().size() );
+
+                return at == line().size() ||
+                       ( isWhitespace( line()[at] ) && !isWhitespace( line()[at-1] ) ) ||
+                       isBreakableBefore( line()[at] ) ||
+                       isBreakableAfter( line()[at-1] );
+            }
+
+            void calcLength() {
+                assert( m_stringIndex < m_column.m_strings.size() );
+
+                m_suffix = false;
+                auto width = m_column.m_width-indent();
+                m_end = m_pos;
+                while( m_end < line().size() && line()[m_end] != '\n' )
+                    ++m_end;
+
+                if( m_end < m_pos + width ) {
+                    m_len = m_end - m_pos;
+                }
+                else {
+                    size_t len = width;
+                    while (len > 0 && !isBoundary(m_pos + len))
+                        --len;
+                    while (len > 0 && isWhitespace( line()[m_pos + len - 1] ))
+                        --len;
+
+                    if (len > 0) {
+                        m_len = len;
+                    } else {
+                        m_suffix = true;
+                        m_len = width - 1;
+                    }
+                }
+            }
+
+            auto indent() const -> size_t {
+                auto initial = m_pos == 0 && m_stringIndex == 0 ? m_column.m_initialIndent : std::string::npos;
+                return initial == std::string::npos ? m_column.m_indent : initial;
+            }
+
+            auto addIndentAndSuffix(std::string const &plain) const -> std::string {
+                return std::string( indent(), ' ' ) + (m_suffix ? plain + "-" : plain);
+            }
+
+        public:
+            using difference_type = std::ptrdiff_t;
+            using value_type = std::string;
+            using pointer = value_type*;
+            using reference = value_type&;
+            using iterator_category = std::forward_iterator_tag;
+
+            explicit iterator( Column const& column ) : m_column( column ) {
+                assert( m_column.m_width > m_column.m_indent );
+                assert( m_column.m_initialIndent == std::string::npos || m_column.m_width > m_column.m_initialIndent );
+                calcLength();
+                if( m_len == 0 )
+                    m_stringIndex++; // Empty string
+            }
+
+            auto operator *() const -> std::string {
+                assert( m_stringIndex < m_column.m_strings.size() );
+                assert( m_pos <= m_end );
+                return addIndentAndSuffix(line().substr(m_pos, m_len));
+            }
+
+            auto operator ++() -> iterator& {
+                m_pos += m_len;
+                if( m_pos < line().size() && line()[m_pos] == '\n' )
+                    m_pos += 1;
+                else
+                    while( m_pos < line().size() && isWhitespace( line()[m_pos] ) )
+                        ++m_pos;
+
+                if( m_pos == line().size() ) {
+                    m_pos = 0;
+                    ++m_stringIndex;
+                }
+                if( m_stringIndex < m_column.m_strings.size() )
+                    calcLength();
+                return *this;
+            }
+            auto operator ++(int) -> iterator {
+                iterator prev( *this );
+                operator++();
+                return prev;
+            }
+
+            auto operator ==( iterator const& other ) const -> bool {
+                return
+                    m_pos == other.m_pos &&
+                    m_stringIndex == other.m_stringIndex &&
+                    &m_column == &other.m_column;
+            }
+            auto operator !=( iterator const& other ) const -> bool {
+                return !operator==( other );
+            }
+        };
+        using const_iterator = iterator;
+
+        explicit Column( std::string const& text ) { m_strings.push_back( text ); }
+
+        auto width( size_t newWidth ) -> Column& {
+            assert( newWidth > 0 );
+            m_width = newWidth;
+            return *this;
+        }
+        auto indent( size_t newIndent ) -> Column& {
+            m_indent = newIndent;
+            return *this;
+        }
+        auto initialIndent( size_t newIndent ) -> Column& {
+            m_initialIndent = newIndent;
+            return *this;
+        }
+
+        auto width() const -> size_t { return m_width; }
+        auto begin() const -> iterator { return iterator( *this ); }
+        auto end() const -> iterator { return { *this, m_strings.size() }; }
+
+        inline friend std::ostream& operator << ( std::ostream& os, Column const& col ) {
+            bool first = true;
+            for( auto line : col ) {
+                if( first )
+                    first = false;
+                else
+                    os << "\n";
+                os <<  line;
+            }
+            return os;
+        }
+
+        auto operator + ( Column const& other ) -> Columns;
+
+        auto toString() const -> std::string {
+            std::ostringstream oss;
+            oss << *this;
+            return oss.str();
+        }
+    };
+
+    class Spacer : public Column {
+
+    public:
+        explicit Spacer( size_t spaceWidth ) : Column( "" ) {
+            width( spaceWidth );
+        }
+    };
+
+    class Columns {
+        std::vector<Column> m_columns;
+
+    public:
+
+        class iterator {
+            friend Columns;
+            struct EndTag {};
+
+            std::vector<Column> const& m_columns;
+            std::vector<Column::iterator> m_iterators;
+            size_t m_activeIterators;
+
+            iterator( Columns const& columns, EndTag )
+            :   m_columns( columns.m_columns ),
+                m_activeIterators( 0 )
+            {
+                m_iterators.reserve( m_columns.size() );
+
+                for( auto const& col : m_columns )
+                    m_iterators.push_back( col.end() );
+            }
+
+        public:
+            using difference_type = std::ptrdiff_t;
+            using value_type = std::string;
+            using pointer = value_type*;
+            using reference = value_type&;
+            using iterator_category = std::forward_iterator_tag;
+
+            explicit iterator( Columns const& columns )
+            :   m_columns( columns.m_columns ),
+                m_activeIterators( m_columns.size() )
+            {
+                m_iterators.reserve( m_columns.size() );
+
+                for( auto const& col : m_columns )
+                    m_iterators.push_back( col.begin() );
+            }
+
+            auto operator ==( iterator const& other ) const -> bool {
+                return m_iterators == other.m_iterators;
+            }
+            auto operator !=( iterator const& other ) const -> bool {
+                return m_iterators != other.m_iterators;
+            }
+            auto operator *() const -> std::string {
+                std::string row, padding;
+
+                for( size_t i = 0; i < m_columns.size(); ++i ) {
+                    auto width = m_columns[i].width();
+                    if( m_iterators[i] != m_columns[i].end() ) {
+                        std::string col = *m_iterators[i];
+                        row += padding + col;
+                        if( col.size() < width )
+                            padding = std::string( width - col.size(), ' ' );
+                        else
+                            padding = "";
+                    }
+                    else {
+                        padding += std::string( width, ' ' );
+                    }
+                }
+                return row;
+            }
+            auto operator ++() -> iterator& {
+                for( size_t i = 0; i < m_columns.size(); ++i ) {
+                    if (m_iterators[i] != m_columns[i].end())
+                        ++m_iterators[i];
+                }
+                return *this;
+            }
+            auto operator ++(int) -> iterator {
+                iterator prev( *this );
+                operator++();
+                return prev;
+            }
+        };
+        using const_iterator = iterator;
+
+        auto begin() const -> iterator { return iterator( *this ); }
+        auto end() const -> iterator { return { *this, iterator::EndTag() }; }
+
+        auto operator += ( Column const& col ) -> Columns& {
+            m_columns.push_back( col );
+            return *this;
+        }
+        auto operator + ( Column const& col ) -> Columns {
+            Columns combined = *this;
+            combined += col;
+            return combined;
+        }
+
+        inline friend std::ostream& operator << ( std::ostream& os, Columns const& cols ) {
+
+            bool first = true;
+            for( auto line : cols ) {
+                if( first )
+                    first = false;
+                else
+                    os << "\n";
+                os << line;
+            }
+            return os;
+        }
+
+        auto toString() const -> std::string {
+            std::ostringstream oss;
+            oss << *this;
+            return oss.str();
+        }
+    };
+
+    inline auto Column::operator + ( Column const& other ) -> Columns {
+        Columns cols;
+        cols += *this;
+        cols += other;
+        return cols;
+    }
+}}
+
+#endif // CLARA_TEXTFLOW_HPP_INCLUDED
+
+// ----------- end of #include from clara_textflow.hpp -----------
+// ........... back in clara.hpp
+
+
+#include <memory>
+#include <set>
+#include <algorithm>
+
+#if !defined(CLARA_PLATFORM_WINDOWS) && ( defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) )
+#define CLARA_PLATFORM_WINDOWS
+#endif
+
+namespace clara {
+namespace detail {
+
+    // Traits for extracting arg and return type of lambdas (for single argument lambdas)
+    template<typename L>
+    struct UnaryLambdaTraits : UnaryLambdaTraits<decltype( &L::operator() )> {};
+
+    template<typename ClassT, typename ReturnT, typename... Args>
+    struct UnaryLambdaTraits<ReturnT( ClassT::* )( Args... ) const> {
+        static const bool isValid = false;
+    };
+
+    template<typename ClassT, typename ReturnT, typename ArgT>
+    struct UnaryLambdaTraits<ReturnT( ClassT::* )( ArgT ) const> {
+        static const bool isValid = true;
+        using ArgType = typename std::remove_const<typename std::remove_reference<ArgT>::type>::type;
+        using ReturnType = ReturnT;
+    };
+
+    class TokenStream;
+
+    // Transport for raw args (copied from main args, or supplied via init list for testing)
+    class Args {
+        friend TokenStream;
+        std::string m_exeName;
+        std::vector<std::string> m_args;
+
+    public:
+        Args( int argc, char const* const* argv )
+            : m_exeName(argv[0]),
+              m_args(argv + 1, argv + argc) {}
+
+        Args( std::initializer_list<std::string> args )
+        :   m_exeName( *args.begin() ),
+            m_args( args.begin()+1, args.end() )
+        {}
+
+        auto exeName() const -> std::string {
+            return m_exeName;
+        }
+    };
+
+    // Wraps a token coming from a token stream. These may not directly correspond to strings as a single string
+    // may encode an option + its argument if the : or = form is used
+    enum class TokenType {
+        Option, Argument
+    };
+    struct Token {
+        TokenType type;
+        std::string token;
+    };
+
+    inline auto isOptPrefix( char c ) -> bool {
+        return c == '-'
+#ifdef CLARA_PLATFORM_WINDOWS
+            || c == '/'
+#endif
+        ;
+    }
+
+    // Abstracts iterators into args as a stream of tokens, with option arguments uniformly handled
+    class TokenStream {
+        using Iterator = std::vector<std::string>::const_iterator;
+        Iterator it;
+        Iterator itEnd;
+        std::vector<Token> m_tokenBuffer;
+
+        void loadBuffer() {
+            m_tokenBuffer.resize( 0 );
+
+            // Skip any empty strings
+            while( it != itEnd && it->empty() )
+                ++it;
+
+            if( it != itEnd ) {
+                auto const &next = *it;
+                if( isOptPrefix( next[0] ) ) {
+                    auto delimiterPos = next.find_first_of( " :=" );
+                    if( delimiterPos != std::string::npos ) {
+                        m_tokenBuffer.push_back( { TokenType::Option, next.substr( 0, delimiterPos ) } );
+                        m_tokenBuffer.push_back( { TokenType::Argument, next.substr( delimiterPos + 1 ) } );
+                    } else {
+                        if( next[1] != '-' && next.size() > 2 ) {
+                            std::string opt = "- ";
+                            for( size_t i = 1; i < next.size(); ++i ) {
+                                opt[1] = next[i];
+                                m_tokenBuffer.push_back( { TokenType::Option, opt } );
+                            }
+                        } else {
+                            m_tokenBuffer.push_back( { TokenType::Option, next } );
+                        }
+                    }
+                } else {
+                    m_tokenBuffer.push_back( { TokenType::Argument, next } );
+                }
+            }
+        }
+
+    public:
+        explicit TokenStream( Args const &args ) : TokenStream( args.m_args.begin(), args.m_args.end() ) {}
+
+        TokenStream( Iterator it, Iterator itEnd ) : it( it ), itEnd( itEnd ) {
+            loadBuffer();
+        }
+
+        explicit operator bool() const {
+            return !m_tokenBuffer.empty() || it != itEnd;
+        }
+
+        auto count() const -> size_t { return m_tokenBuffer.size() + (itEnd - it); }
+
+        auto operator*() const -> Token {
+            assert( !m_tokenBuffer.empty() );
+            return m_tokenBuffer.front();
+        }
+
+        auto operator->() const -> Token const * {
+            assert( !m_tokenBuffer.empty() );
+            return &m_tokenBuffer.front();
+        }
+
+        auto operator++() -> TokenStream & {
+            if( m_tokenBuffer.size() >= 2 ) {
+                m_tokenBuffer.erase( m_tokenBuffer.begin() );
+            } else {
+                if( it != itEnd )
+                    ++it;
+                loadBuffer();
+            }
+            return *this;
+        }
+    };
+
+
+    class ResultBase {
+    public:
+        enum Type {
+            Ok, LogicError, RuntimeError
+        };
+
+    protected:
+        ResultBase( Type type ) : m_type( type ) {}
+        virtual ~ResultBase() = default;
+
+        virtual void enforceOk() const = 0;
+
+        Type m_type;
+    };
+
+    template<typename T>
+    class ResultValueBase : public ResultBase {
+    public:
+        auto value() const -> T const & {
+            enforceOk();
+            return m_value;
+        }
+
+    protected:
+        ResultValueBase( Type type ) : ResultBase( type ) {}
+
+        ResultValueBase( ResultValueBase const &other ) : ResultBase( other ) {
+            if( m_type == ResultBase::Ok )
+                new( &m_value ) T( other.m_value );
+        }
+
+        ResultValueBase( Type, T const &value ) : ResultBase( Ok ) {
+            new( &m_value ) T( value );
+        }
+
+        auto operator=( ResultValueBase const &other ) -> ResultValueBase & {
+            if( m_type == ResultBase::Ok )
+                m_value.~T();
+            ResultBase::operator=(other);
+            if( m_type == ResultBase::Ok )
+                new( &m_value ) T( other.m_value );
+            return *this;
+        }
+
+        ~ResultValueBase() override {
+            if( m_type == Ok )
+                m_value.~T();
+        }
+
+        union {
+            T m_value;
+        };
+    };
+
+    template<>
+    class ResultValueBase<void> : public ResultBase {
+    protected:
+        using ResultBase::ResultBase;
+    };
+
+    template<typename T = void>
+    class BasicResult : public ResultValueBase<T> {
+    public:
+        template<typename U>
+        explicit BasicResult( BasicResult<U> const &other )
+        :   ResultValueBase<T>( other.type() ),
+            m_errorMessage( other.errorMessage() )
+        {
+            assert( type() != ResultBase::Ok );
+        }
+
+        template<typename U>
+        static auto ok( U const &value ) -> BasicResult { return { ResultBase::Ok, value }; }
+        static auto ok() -> BasicResult { return { ResultBase::Ok }; }
+        static auto logicError( std::string const &message ) -> BasicResult { return { ResultBase::LogicError, message }; }
+        static auto runtimeError( std::string const &message ) -> BasicResult { return { ResultBase::RuntimeError, message }; }
+
+        explicit operator bool() const { return m_type == ResultBase::Ok; }
+        auto type() const -> ResultBase::Type { return m_type; }
+        auto errorMessage() const -> std::string { return m_errorMessage; }
+
+    protected:
+        void enforceOk() const override {
+
+            // Errors shouldn't reach this point, but if they do
+            // the actual error message will be in m_errorMessage
+            assert( m_type != ResultBase::LogicError );
+            assert( m_type != ResultBase::RuntimeError );
+            if( m_type != ResultBase::Ok )
+                std::abort();
+        }
+
+        std::string m_errorMessage; // Only populated if resultType is an error
+
+        BasicResult( ResultBase::Type type, std::string const &message )
+        :   ResultValueBase<T>(type),
+            m_errorMessage(message)
+        {
+            assert( m_type != ResultBase::Ok );
+        }
+
+        using ResultValueBase<T>::ResultValueBase;
+        using ResultBase::m_type;
+    };
+
+    enum class ParseResultType {
+        Matched, NoMatch, ShortCircuitAll, ShortCircuitSame
+    };
+
+    class ParseState {
+    public:
+
+        ParseState( ParseResultType type, TokenStream const &remainingTokens )
+        : m_type(type),
+          m_remainingTokens( remainingTokens )
+        {}
+
+        auto type() const -> ParseResultType { return m_type; }
+        auto remainingTokens() const -> TokenStream { return m_remainingTokens; }
+
+    private:
+        ParseResultType m_type;
+        TokenStream m_remainingTokens;
+    };
+
+    using Result = BasicResult<void>;
+    using ParserResult = BasicResult<ParseResultType>;
+    using InternalParseResult = BasicResult<ParseState>;
+
+    struct HelpColumns {
+        std::string left;
+        std::string right;
+    };
+
+    template<typename T>
+    inline auto convertInto( std::string const &source, T& target ) -> ParserResult {
+        std::stringstream ss;
+        ss << source;
+        ss >> target;
+        if( ss.fail() )
+            return ParserResult::runtimeError( "Unable to convert '" + source + "' to destination type" );
+        else
+            return ParserResult::ok( ParseResultType::Matched );
+    }
+    inline auto convertInto( std::string const &source, std::string& target ) -> ParserResult {
+        target = source;
+        return ParserResult::ok( ParseResultType::Matched );
+    }
+    inline auto convertInto( std::string const &source, bool &target ) -> ParserResult {
+        std::string srcLC = source;
+        std::transform( srcLC.begin(), srcLC.end(), srcLC.begin(), []( char c ) { return static_cast<char>( ::tolower(c) ); } );
+        if (srcLC == "y" || srcLC == "1" || srcLC == "true" || srcLC == "yes" || srcLC == "on")
+            target = true;
+        else if (srcLC == "n" || srcLC == "0" || srcLC == "false" || srcLC == "no" || srcLC == "off")
+            target = false;
+        else
+            return ParserResult::runtimeError( "Expected a boolean value but did not recognise: '" + source + "'" );
+        return ParserResult::ok( ParseResultType::Matched );
+    }
+#ifdef CLARA_CONFIG_OPTIONAL_TYPE
+    template<typename T>
+    inline auto convertInto( std::string const &source, CLARA_CONFIG_OPTIONAL_TYPE<T>& target ) -> ParserResult {
+        T temp;
+        auto result = convertInto( source, temp );
+        if( result )
+            target = std::move(temp);
+        return result;
+    }
+#endif // CLARA_CONFIG_OPTIONAL_TYPE
+
+    struct NonCopyable {
+        NonCopyable() = default;
+        NonCopyable( NonCopyable const & ) = delete;
+        NonCopyable( NonCopyable && ) = delete;
+        NonCopyable &operator=( NonCopyable const & ) = delete;
+        NonCopyable &operator=( NonCopyable && ) = delete;
+    };
+
+    struct BoundRef : NonCopyable {
+        virtual ~BoundRef() = default;
+        virtual auto isContainer() const -> bool { return false; }
+        virtual auto isFlag() const -> bool { return false; }
+    };
+    struct BoundValueRefBase : BoundRef {
+        virtual auto setValue( std::string const &arg ) -> ParserResult = 0;
+    };
+    struct BoundFlagRefBase : BoundRef {
+        virtual auto setFlag( bool flag ) -> ParserResult = 0;
+        virtual auto isFlag() const -> bool { return true; }
+    };
+
+    template<typename T>
+    struct BoundValueRef : BoundValueRefBase {
+        T &m_ref;
+
+        explicit BoundValueRef( T &ref ) : m_ref( ref ) {}
+
+        auto setValue( std::string const &arg ) -> ParserResult override {
+            return convertInto( arg, m_ref );
+        }
+    };
+
+    template<typename T>
+    struct BoundValueRef<std::vector<T>> : BoundValueRefBase {
+        std::vector<T> &m_ref;
+
+        explicit BoundValueRef( std::vector<T> &ref ) : m_ref( ref ) {}
+
+        auto isContainer() const -> bool override { return true; }
+
+        auto setValue( std::string const &arg ) -> ParserResult override {
+            T temp;
+            auto result = convertInto( arg, temp );
+            if( result )
+                m_ref.push_back( temp );
+            return result;
+        }
+    };
+
+    struct BoundFlagRef : BoundFlagRefBase {
+        bool &m_ref;
+
+        explicit BoundFlagRef( bool &ref ) : m_ref( ref ) {}
+
+        auto setFlag( bool flag ) -> ParserResult override {
+            m_ref = flag;
+            return ParserResult::ok( ParseResultType::Matched );
+        }
+    };
+
+    template<typename ReturnType>
+    struct LambdaInvoker {
+        static_assert( std::is_same<ReturnType, ParserResult>::value, "Lambda must return void or clara::ParserResult" );
+
+        template<typename L, typename ArgType>
+        static auto invoke( L const &lambda, ArgType const &arg ) -> ParserResult {
+            return lambda( arg );
+        }
+    };
+
+    template<>
+    struct LambdaInvoker<void> {
+        template<typename L, typename ArgType>
+        static auto invoke( L const &lambda, ArgType const &arg ) -> ParserResult {
+            lambda( arg );
+            return ParserResult::ok( ParseResultType::Matched );
+        }
+    };
+
+    template<typename ArgType, typename L>
+    inline auto invokeLambda( L const &lambda, std::string const &arg ) -> ParserResult {
+        ArgType temp{};
+        auto result = convertInto( arg, temp );
+        return !result
+           ? result
+           : LambdaInvoker<typename UnaryLambdaTraits<L>::ReturnType>::invoke( lambda, temp );
+    }
+
+
+    template<typename L>
+    struct BoundLambda : BoundValueRefBase {
+        L m_lambda;
+
+        static_assert( UnaryLambdaTraits<L>::isValid, "Supplied lambda must take exactly one argument" );
+        explicit BoundLambda( L const &lambda ) : m_lambda( lambda ) {}
+
+        auto setValue( std::string const &arg ) -> ParserResult override {
+            return invokeLambda<typename UnaryLambdaTraits<L>::ArgType>( m_lambda, arg );
+        }
+    };
+
+    template<typename L>
+    struct BoundFlagLambda : BoundFlagRefBase {
+        L m_lambda;
+
+        static_assert( UnaryLambdaTraits<L>::isValid, "Supplied lambda must take exactly one argument" );
+        static_assert( std::is_same<typename UnaryLambdaTraits<L>::ArgType, bool>::value, "flags must be boolean" );
+
+        explicit BoundFlagLambda( L const &lambda ) : m_lambda( lambda ) {}
+
+        auto setFlag( bool flag ) -> ParserResult override {
+            return LambdaInvoker<typename UnaryLambdaTraits<L>::ReturnType>::invoke( m_lambda, flag );
+        }
+    };
+
+    enum class Optionality { Optional, Required };
+
+    struct Parser;
+
+    class ParserBase {
+    public:
+        virtual ~ParserBase() = default;
+        virtual auto validate() const -> Result { return Result::ok(); }
+        virtual auto parse( std::string const& exeName, TokenStream const &tokens) const -> InternalParseResult  = 0;
+        virtual auto cardinality() const -> size_t { return 1; }
+
+        auto parse( Args const &args ) const -> InternalParseResult {
+            return parse( args.exeName(), TokenStream( args ) );
+        }
+    };
+
+    template<typename DerivedT>
+    class ComposableParserImpl : public ParserBase {
+    public:
+        template<typename T>
+        auto operator|( T const &other ) const -> Parser;
+
+		template<typename T>
+        auto operator+( T const &other ) const -> Parser;
+    };
+
+    // Common code and state for Args and Opts
+    template<typename DerivedT>
+    class ParserRefImpl : public ComposableParserImpl<DerivedT> {
+    protected:
+        Optionality m_optionality = Optionality::Optional;
+        std::shared_ptr<BoundRef> m_ref;
+        std::string m_hint;
+        std::string m_description;
+
+        explicit ParserRefImpl( std::shared_ptr<BoundRef> const &ref ) : m_ref( ref ) {}
+
+    public:
+        template<typename T>
+        ParserRefImpl( T &ref, std::string const &hint )
+        :   m_ref( std::make_shared<BoundValueRef<T>>( ref ) ),
+            m_hint( hint )
+        {}
+
+        template<typename LambdaT>
+        ParserRefImpl( LambdaT const &ref, std::string const &hint )
+        :   m_ref( std::make_shared<BoundLambda<LambdaT>>( ref ) ),
+            m_hint(hint)
+        {}
+
+        auto operator()( std::string const &description ) -> DerivedT & {
+            m_description = description;
+            return static_cast<DerivedT &>( *this );
+        }
+
+        auto optional() -> DerivedT & {
+            m_optionality = Optionality::Optional;
+            return static_cast<DerivedT &>( *this );
+        };
+
+        auto required() -> DerivedT & {
+            m_optionality = Optionality::Required;
+            return static_cast<DerivedT &>( *this );
+        };
+
+        auto isOptional() const -> bool {
+            return m_optionality == Optionality::Optional;
+        }
+
+        auto cardinality() const -> size_t override {
+            if( m_ref->isContainer() )
+                return 0;
+            else
+                return 1;
+        }
+
+        auto hint() const -> std::string { return m_hint; }
+    };
+
+    class ExeName : public ComposableParserImpl<ExeName> {
+        std::shared_ptr<std::string> m_name;
+        std::shared_ptr<BoundValueRefBase> m_ref;
+
+        template<typename LambdaT>
+        static auto makeRef(LambdaT const &lambda) -> std::shared_ptr<BoundValueRefBase> {
+            return std::make_shared<BoundLambda<LambdaT>>( lambda) ;
+        }
+
+    public:
+        ExeName() : m_name( std::make_shared<std::string>( "<executable>" ) ) {}
+
+        explicit ExeName( std::string &ref ) : ExeName() {
+            m_ref = std::make_shared<BoundValueRef<std::string>>( ref );
+        }
+
+        template<typename LambdaT>
+        explicit ExeName( LambdaT const& lambda ) : ExeName() {
+            m_ref = std::make_shared<BoundLambda<LambdaT>>( lambda );
+        }
+
+        // The exe name is not parsed out of the normal tokens, but is handled specially
+        auto parse( std::string const&, TokenStream const &tokens ) const -> InternalParseResult override {
+            return InternalParseResult::ok( ParseState( ParseResultType::NoMatch, tokens ) );
+        }
+
+        auto name() const -> std::string { return *m_name; }
+        auto set( std::string const& newName ) -> ParserResult {
+
+            auto lastSlash = newName.find_last_of( "\\/" );
+            auto filename = ( lastSlash == std::string::npos )
+                    ? newName
+                    : newName.substr( lastSlash+1 );
+
+            *m_name = filename;
+            if( m_ref )
+                return m_ref->setValue( filename );
+            else
+                return ParserResult::ok( ParseResultType::Matched );
+        }
+    };
+
+    class Arg : public ParserRefImpl<Arg> {
+    public:
+        using ParserRefImpl::ParserRefImpl;
+
+        auto parse( std::string const &, TokenStream const &tokens ) const -> InternalParseResult override {
+            auto validationResult = validate();
+            if( !validationResult )
+                return InternalParseResult( validationResult );
+
+            auto remainingTokens = tokens;
+            auto const &token = *remainingTokens;
+            if( token.type != TokenType::Argument )
+                return InternalParseResult::ok( ParseState( ParseResultType::NoMatch, remainingTokens ) );
+
+            assert( !m_ref->isFlag() );
+            auto valueRef = static_cast<detail::BoundValueRefBase*>( m_ref.get() );
+
+            auto result = valueRef->setValue( remainingTokens->token );
+            if( !result )
+                return InternalParseResult( result );
+            else
+                return InternalParseResult::ok( ParseState( ParseResultType::Matched, ++remainingTokens ) );
+        }
+    };
+
+    inline auto normaliseOpt( std::string const &optName ) -> std::string {
+#ifdef CLARA_PLATFORM_WINDOWS
+        if( optName[0] == '/' )
+            return "-" + optName.substr( 1 );
+        else
+#endif
+            return optName;
+    }
+
+    class Opt : public ParserRefImpl<Opt> {
+    protected:
+        std::vector<std::string> m_optNames;
+
+    public:
+        template<typename LambdaT>
+        explicit Opt( LambdaT const &ref ) : ParserRefImpl( std::make_shared<BoundFlagLambda<LambdaT>>( ref ) ) {}
+
+        explicit Opt( bool &ref ) : ParserRefImpl( std::make_shared<BoundFlagRef>( ref ) ) {}
+
+        template<typename LambdaT>
+        Opt( LambdaT const &ref, std::string const &hint ) : ParserRefImpl( ref, hint ) {}
+
+        template<typename T>
+        Opt( T &ref, std::string const &hint ) : ParserRefImpl( ref, hint ) {}
+
+        auto operator[]( std::string const &optName ) -> Opt & {
+            m_optNames.push_back( optName );
+            return *this;
+        }
+
+        auto getHelpColumns() const -> std::vector<HelpColumns> {
+            std::ostringstream oss;
+            bool first = true;
+            for( auto const &opt : m_optNames ) {
+                if (first)
+                    first = false;
+                else
+                    oss << ", ";
+                oss << opt;
+            }
+            if( !m_hint.empty() )
+                oss << " <" << m_hint << ">";
+            return { { oss.str(), m_description } };
+        }
+
+        auto isMatch( std::string const &optToken ) const -> bool {
+            auto normalisedToken = normaliseOpt( optToken );
+            for( auto const &name : m_optNames ) {
+                if( normaliseOpt( name ) == normalisedToken )
+                    return true;
+            }
+            return false;
+        }
+
+        using ParserBase::parse;
+
+        auto parse( std::string const&, TokenStream const &tokens ) const -> InternalParseResult override {
+            auto validationResult = validate();
+            if( !validationResult )
+                return InternalParseResult( validationResult );
+
+            auto remainingTokens = tokens;
+            if( remainingTokens && remainingTokens->type == TokenType::Option ) {
+                auto const &token = *remainingTokens;
+                if( isMatch(token.token ) ) {
+                    if( m_ref->isFlag() ) {
+                        auto flagRef = static_cast<detail::BoundFlagRefBase*>( m_ref.get() );
+                        auto result = flagRef->setFlag( true );
+                        if( !result )
+                            return InternalParseResult( result );
+                        if( result.value() == ParseResultType::ShortCircuitAll )
+                            return InternalParseResult::ok( ParseState( result.value(), remainingTokens ) );
+                    } else {
+                        auto valueRef = static_cast<detail::BoundValueRefBase*>( m_ref.get() );
+                        ++remainingTokens;
+                        if( !remainingTokens )
+                            return InternalParseResult::runtimeError( "Expected argument following " + token.token );
+                        auto const &argToken = *remainingTokens;
+                        if( argToken.type != TokenType::Argument )
+                            return InternalParseResult::runtimeError( "Expected argument following " + token.token );
+                        auto result = valueRef->setValue( argToken.token );
+                        if( !result )
+                            return InternalParseResult( result );
+                        if( result.value() == ParseResultType::ShortCircuitAll )
+                            return InternalParseResult::ok( ParseState( result.value(), remainingTokens ) );
+                    }
+                    return InternalParseResult::ok( ParseState( ParseResultType::Matched, ++remainingTokens ) );
+                }
+            }
+            return InternalParseResult::ok( ParseState( ParseResultType::NoMatch, remainingTokens ) );
+        }
+
+        auto validate() const -> Result override {
+            if( m_optNames.empty() )
+                return Result::logicError( "No options supplied to Opt" );
+            for( auto const &name : m_optNames ) {
+                if( name.empty() )
+                    return Result::logicError( "Option name cannot be empty" );
+#ifdef CLARA_PLATFORM_WINDOWS
+                if( name[0] != '-' && name[0] != '/' )
+                    return Result::logicError( "Option name must begin with '-' or '/'" );
+#else
+                if( name[0] != '-' )
+                    return Result::logicError( "Option name must begin with '-'" );
+#endif
+            }
+            return ParserRefImpl::validate();
+        }
+    };
+
+    struct Help : Opt {
+        Help( bool &showHelpFlag )
+        :   Opt([&]( bool flag ) {
+                showHelpFlag = flag;
+                return ParserResult::ok( ParseResultType::ShortCircuitAll );
+            })
+        {
+            static_cast<Opt &>( *this )
+                    ("display usage information")
+                    ["-?"]["-h"]["--help"]
+                    .optional();
+        }
+    };
+
+
+    struct Parser : ParserBase {
+
+        mutable ExeName m_exeName;
+        std::vector<Opt> m_options;
+        std::vector<Arg> m_args;
+
+        auto operator|=( ExeName const &exeName ) -> Parser & {
+            m_exeName = exeName;
+            return *this;
+        }
+
+        auto operator|=( Arg const &arg ) -> Parser & {
+            m_args.push_back(arg);
+            return *this;
+        }
+
+        auto operator|=( Opt const &opt ) -> Parser & {
+            m_options.push_back(opt);
+            return *this;
+        }
+
+        auto operator|=( Parser const &other ) -> Parser & {
+            m_options.insert(m_options.end(), other.m_options.begin(), other.m_options.end());
+            m_args.insert(m_args.end(), other.m_args.begin(), other.m_args.end());
+            return *this;
+        }
+
+        template<typename T>
+        auto operator|( T const &other ) const -> Parser {
+            return Parser( *this ) |= other;
+        }
+
+        // Forward deprecated interface with '+' instead of '|'
+        template<typename T>
+        auto operator+=( T const &other ) -> Parser & { return operator|=( other ); }
+        template<typename T>
+        auto operator+( T const &other ) const -> Parser { return operator|( other ); }
+
+        auto getHelpColumns() const -> std::vector<HelpColumns> {
+            std::vector<HelpColumns> cols;
+            for (auto const &o : m_options) {
+                auto childCols = o.getHelpColumns();
+                cols.insert( cols.end(), childCols.begin(), childCols.end() );
+            }
+            return cols;
+        }
+
+        void writeToStream( std::ostream &os ) const {
+            if (!m_exeName.name().empty()) {
+                os << "usage:\n" << "  " << m_exeName.name() << " ";
+                bool required = true, first = true;
+                for( auto const &arg : m_args ) {
+                    if (first)
+                        first = false;
+                    else
+                        os << " ";
+                    if( arg.isOptional() && required ) {
+                        os << "[";
+                        required = false;
+                    }
+                    os << "<" << arg.hint() << ">";
+                    if( arg.cardinality() == 0 )
+                        os << " ... ";
+                }
+                if( !required )
+                    os << "]";
+                if( !m_options.empty() )
+                    os << " options";
+                os << "\n\nwhere options are:" << std::endl;
+            }
+
+            auto rows = getHelpColumns();
+            size_t consoleWidth = CLARA_CONFIG_CONSOLE_WIDTH;
+            size_t optWidth = 0;
+            for( auto const &cols : rows )
+                optWidth = (std::max)(optWidth, cols.left.size() + 2);
+
+            optWidth = (std::min)(optWidth, consoleWidth/2);
+
+            for( auto const &cols : rows ) {
+                auto row =
+                        TextFlow::Column( cols.left ).width( optWidth ).indent( 2 ) +
+                        TextFlow::Spacer(4) +
+                        TextFlow::Column( cols.right ).width( consoleWidth - 7 - optWidth );
+                os << row << std::endl;
+            }
+        }
+
+        friend auto operator<<( std::ostream &os, Parser const &parser ) -> std::ostream& {
+            parser.writeToStream( os );
+            return os;
+        }
+
+        auto validate() const -> Result override {
+            for( auto const &opt : m_options ) {
+                auto result = opt.validate();
+                if( !result )
+                    return result;
+            }
+            for( auto const &arg : m_args ) {
+                auto result = arg.validate();
+                if( !result )
+                    return result;
+            }
+            return Result::ok();
+        }
+
+        using ParserBase::parse;
+
+        auto parse( std::string const& exeName, TokenStream const &tokens ) const -> InternalParseResult override {
+
+            struct ParserInfo {
+                ParserBase const* parser = nullptr;
+                size_t count = 0;
+            };
+            const size_t totalParsers = m_options.size() + m_args.size();
+            assert( totalParsers < 512 );
+            // ParserInfo parseInfos[totalParsers]; // <-- this is what we really want to do
+            ParserInfo parseInfos[512];
+
+            {
+                size_t i = 0;
+                for (auto const &opt : m_options) parseInfos[i++].parser = &opt;
+                for (auto const &arg : m_args) parseInfos[i++].parser = &arg;
+            }
+
+            m_exeName.set( exeName );
+
+            auto result = InternalParseResult::ok( ParseState( ParseResultType::NoMatch, tokens ) );
+            while( result.value().remainingTokens() ) {
+                bool tokenParsed = false;
+
+                for( size_t i = 0; i < totalParsers; ++i ) {
+                    auto&  parseInfo = parseInfos[i];
+                    if( parseInfo.parser->cardinality() == 0 || parseInfo.count < parseInfo.parser->cardinality() ) {
+                        result = parseInfo.parser->parse(exeName, result.value().remainingTokens());
+                        if (!result)
+                            return result;
+                        if (result.value().type() != ParseResultType::NoMatch) {
+                            tokenParsed = true;
+                            ++parseInfo.count;
+                            break;
+                        }
+                    }
+                }
+
+                if( result.value().type() == ParseResultType::ShortCircuitAll )
+                    return result;
+                if( !tokenParsed )
+                    return InternalParseResult::runtimeError( "Unrecognised token: " + result.value().remainingTokens()->token );
+            }
+            // !TBD Check missing required options
+            return result;
+        }
+    };
+
+    template<typename DerivedT>
+    template<typename T>
+    auto ComposableParserImpl<DerivedT>::operator|( T const &other ) const -> Parser {
+        return Parser() | static_cast<DerivedT const &>( *this ) | other;
+    }
+} // namespace detail
+
+
+// A Combined parser
+using detail::Parser;
+
+// A parser for options
+using detail::Opt;
+
+// A parser for arguments
+using detail::Arg;
+
+// Wrapper for argc, argv from main()
+using detail::Args;
+
+// Specifies the name of the executable
+using detail::ExeName;
+
+// Convenience wrapper for option parser that specifies the help option
+using detail::Help;
+
+// enum of result types from a parse
+using detail::ParseResultType;
+
+// Result type for parser operation
+using detail::ParserResult;
+
+
+} // namespace clara
+
+#endif // CLARA_HPP_INCLUDED
diff --git a/optimization/CMakeLists.txt b/optimization/CMakeLists.txt
index 223fddff193f23d18935144a42a97f25bac3f3b6..c42d8259c0d9949da5dbd5485ef6a860e04fe22a 100644
--- a/optimization/CMakeLists.txt
+++ b/optimization/CMakeLists.txt
@@ -2,10 +2,14 @@ add_library(optimization_lib
     gaussian.cpp
     gaussian.h
     matrix.h
-    rosenbrock.h
+    objective.h
     scalar.h
+    sfg_estimator.h
     vector.h
 )
 target_include_directories(optimization_lib PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
 target_link_libraries(optimization_lib PUBLIC shared_settings Eigen3::Eigen)
 target_compile_features(optimization_lib PUBLIC cxx_std_17)
+if (VISUALIZE)
+    target_compile_definitions(optimization_lib PRIVATE VISUALIZE)
+endif()
diff --git a/optimization/gaussian.cpp b/optimization/gaussian.cpp
index 2b009729ebd022287f3824ccdb6ee1ce075895b1..470266ee5cdd6c26140e246e64fcc3888ec02a66 100644
--- a/optimization/gaussian.cpp
+++ b/optimization/gaussian.cpp
@@ -4,16 +4,42 @@
 namespace optimization {
 
 //..................................................................................................
-void Gaussian::set_dim(uint32_t dim) {
-    dim_ = dim;
+void Gaussian::Score::set_dim(uint32_t dim) {
     mean_.resize(dim_);
     covariance_.resize(dim_, dim_);
-    inverse_covariance_.resize(dim_, dim_);
-    generator_.resize(dim_, dim_);
 }
 
 //..................................................................................................
-void Gaussian::compute_derived_matrices() {
+Gaussian::Score& operator*=(Scalar x) {
+    mean_ *= x;
+    covariance_ *= x;
+    return *this;
+}
+
+//..................................................................................................
+Gaussian::Score operator*(Scalar x) const & {
+    Score result;
+    result.mean_ = mean_ * x;
+    result.covariance_ = covariance_ * x;
+}
+
+//..................................................................................................
+Gaussian::Score operator*(Scalar x) const && {
+    Score result(std::move(*this));
+    result *= x;
+    return result;
+}
+
+//..................................................................................................
+void Gaussian::Parameters::set_dim(uint32_t dim) {
+    mean_.resize(dim);
+    covariance_.resize(dim, dim);
+    inverse_covariance_.resize(dim, dim);
+    generator_.resize(dim, dim);
+}
+
+//..................................................................................................
+void Gaussian::Parameters::compute_derived_matrices() {
     Eigen::SelfAdjointEigenSolver<MatrixX<Scalar>> eigenSolver(covariance_);
     inverse_covariance_ = eigenSolver.operatorInverseSqrt();
     inverse_covariance_ *= inverse_covariance_;
@@ -21,20 +47,36 @@ void Gaussian::compute_derived_matrices() {
 }
 
 //..................................................................................................
-VectorX<Scalar> Gaussian::sample() const {
-    return mean_ + generator_ * VectorX<Scalar>{mean_.size()}.unaryExpr([&](auto) {
+Gaussian::Parameters& Gaussian::Parameters::operator+=(Gaussian::Score const& score) {
+    mean_ += Score.mean_;
+    covariance_ += Score.covariance_;
+    compute_derived_matrices();
+    return *this;
+}
+
+//..................................................................................................
+VectorX<Scalar> Gaussian::sample(Parameters const& parameters) const {
+    return parameters.mean() + parameters.generator() * VectorX<Scalar>{dim_}.unaryExpr([&](auto) {
         return unit_normal_(gen_);
     });
 }
 
 //..................................................................................................
-VectorX<Scalar> Gaussian::grad_log_mean(VectorX<Scalar> const& x) const {
+VectorX<Scalar> Gaussian::mean_score(VectorX<Scalar> const& x) const {
     return (x - mean_).transpose() * inverse_covariance_;
 }
 
 //..................................................................................................
-MatrixX<Scalar> Gaussian::grad_log_covariance(VectorX<Scalar> const& x) const {
+MatrixX<Scalar> Gaussian::covariance_score(VectorX<Scalar> const& x) const {
     return -0.5 * (inverse_covariance_ + inverse_covariance_ * x * x.transpose() * inverse_covariance_);
 }
 
+//..................................................................................................
+Gaussian::Score Gaussian::score(VectorX<Scalar> const& x) const {
+    Score result;
+    result.mean_ = mean_score(x);
+    result.covariance_ = mean_covariance(x);
+    return result;
+}
+
 }
diff --git a/optimization/gaussian.h b/optimization/gaussian.h
index 57639fe347a23ef8cf9e5d337410e4588b3a5c14..bed9a07cf2244ebc862fddfee235cc555a9657f9 100644
--- a/optimization/gaussian.h
+++ b/optimization/gaussian.h
@@ -11,29 +11,52 @@ namespace optimization {
 //--------------------------------------------------------------------------------------------------
 class Gaussian {
 public:
+    struct Score {
+        void set_dim(uint32_t dim);
+        Score& operator*=(Scalar x);
+        Score operator*(Scalar x) const &;
+        Score operator*(Scalar x) const &&;
+
+        VectorX<Scalar> mean_;
+        MatrixX<Scalar> covariance_;
+    };
+
+    struct Parameters {
+    public:
+        void set_dim(uint32_t dim);
+        compute_derived_matrices();
+        Parameters& operator+=(Score const& score);
+
+        VectorX<Scalar>& mean() { return mean_; }
+        VectorX<Scalar> const& mean() const { return mean_; }
+        MatrixX<Scalar>& covariance() { return covariance_; }
+        MatrixX<Scalar> const& covariance() const { return covariance_; }
+        MatrixX<Scalar> const& inverse_covariance() const { return inverse_covariance_; }
+        MatrixX<Scalar> const& generator() const { return generator_; }
+
+    private:
+        VectorX<Scalar> mean_;
+        MatrixX<Scalar> covariance_;
+        MatrixX<Scalar> inverse_covariance_;
+        MatrixX<Scalar> generator_;
+    };
+
     Gaussian(): gen_(std::random_device{}()) {}
-    Gaussian(uint32_t dim): gen_(std::random_device{}()) { set_dim(dim); }
+    Gaussian(uint32_t dim): dim_(dim), gen_(std::random_device{}()) {}
 
-    void set_dim(uint32_t dim);
     uint32_t dim() const { return dim_; }
-    VectorX<Scalar>& mean() { return mean_; }
-    VectorX<Scalar> const& mean() const { return mean_; }
-    MatrixX<Scalar>& covariance() { return covariance_; }
-    MatrixX<Scalar> const& covariance() const { return covariance_; }
-    MatrixX<Scalar> const& inverse_covariance() const { return inverse_covariance_; }
-    void compute_derived_matrices();
+    void set_dim(uint32_t dim) { dim_ = dim; }
+    void set_dim(Score& score) const { score.set_dim(dim_); }
+    void set_dim(Parameters& parameters) const { parameters.set_dim(dim_); }
+
+    VectorX<Scalar> sample(Parameters const& parameters) const;
 
-    VectorX<Scalar> sample() const;
-    VectorX<Scalar> grad_log_mean(VectorX<Scalar> const& x) const;
-    MatrixX<Scalar> grad_log_covariance(VectorX<Scalar> const& x) const;
+    VectorX<Scalar> mean_score(VectorX<Scalar> const& x) const;
+    MatrixX<Scalar> covariance_score(VectorX<Scalar> const& x) const;
+    Score score(VectorX<Scalar> const& x) const;
 
 private:
     uint32_t dim_;
-    VectorX<Scalar> mean_;
-    MatrixX<Scalar> covariance_;
-    MatrixX<Scalar> inverse_covariance_;
-    MatrixX<Scalar> generator_;
-
     mutable std::mt19937 gen_;
     mutable std::normal_distribution<> unit_normal_;
 };
diff --git a/optimization/gradient_descent.h b/optimization/gradient_descent.h
new file mode 100644
index 0000000000000000000000000000000000000000..25fe0ccd4c8ffa885208320aed8a87ffac266a30
--- /dev/null
+++ b/optimization/gradient_descent.h
@@ -0,0 +1,85 @@
+#ifndef OPTIMIZATION_GRADIENT_DESCENT_H
+#define OPTIMIZATION_GRADIENT_DESCENT_H
+#include "vis_only.h"
+
+namespace optimization {
+
+#ifdef VISUALIZE
+#include <vector>
+#endif
+
+//--------------------------------------------------------------------------------------------------
+template <typename Objective>
+struct GradientDescentLog {
+    struct Sample {
+        typename Objective::Input point;
+        Scalar value;
+        typename Objective::Gradient gradient;
+    };
+
+    void reserve(uint32_t n) VIS_ONLY_METHOD;
+    void push_back(
+        typename Objective::Input const& point,
+        Scalar value,
+        typename Objective::Gradient const& gradient
+    ) VIS_ONLY_METHOD;
+
+    #ifdef VISUALIZE
+    std::vector<Sample> samples;
+    #endif
+};
+
+#ifdef VISUALIZE
+
+//..................................................................................................
+void GradientDescentLog::reserve(uint32_t n) {
+    samples.reserve(n);
+}
+
+//..................................................................................................
+void GradientDescentLog::push_back(
+    typename Objective::Input const& point,
+    Scalar value,
+    typename Objective::Gradient const& gradient
+{
+    samples.emplace_back({point, value, gradient});
+}
+
+#endif
+
+//--------------------------------------------------------------------------------------------------
+template <typename Objective>
+class GradientDescent : GradientDescentLog<Objective> {
+public:
+    GradientDescent() {}
+    GradientDescent(Scalar learning_rate): learning_rate_(learning_rate) {}
+
+    Scalar& learning_rate() { return learning_rate_; }
+    Scalar learning_rate() const { return learning_rate_; }
+
+    template <typename Objective>
+    typename Objective::Input optimize(
+        Objective const& objective,
+        typename Objective::Input const& initial_point
+    ) {
+        typename Objective::Input point = initial_point;
+        Scalar value;
+        typename Objective::Gradient gradient;
+        objective.eval(point, value, gradient);
+        log.push_back(point, value, gradient);
+
+        for (uint32_t i = 0; i < n_steps_; ++i) {
+            point -= learning_rate_ * gradient;
+            objective.eval(point, gradient);
+            log.push_back(point, value, gradient);
+        }
+        return point;
+    }
+
+private:
+    Scalar learning_rate_;
+};
+
+}
+
+#endif
diff --git a/optimization/objective.h b/optimization/objective.h
new file mode 100644
index 0000000000000000000000000000000000000000..3fd40e20a23e2540bdd8a8828139f43dbf4091f9
--- /dev/null
+++ b/optimization/objective.h
@@ -0,0 +1,46 @@
+#ifndef OPTIMIZATION_OBJECTIVE_H
+#define OPTIMIZATION_OBJECTIVE_H
+
+#include "scalar.h"
+#include "vector.h"
+
+namespace optimization {
+
+//--------------------------------------------------------------------------------------------------
+template <typename Derived>
+inline Scalar sphere(Eigen::MatrixBase<Derived> const& x) {
+    auto const x_squared = x[0] * x[0];
+    auto const y_squared = x[1] * x[1];
+    return std::sqrt(x_squared + y_squared);
+}
+
+//--------------------------------------------------------------------------------------------------
+template <typename Derived>
+inline Scalar rosenbrock(Eigen::MatrixBase<Derived> const& x) {
+    auto const x_squared = x[0] * x[0];
+    return (1 - x[0]) * (1 - x[0]) + 100 * (x[1] - x_squared) * (x[1] - x_squared);
+}
+
+//--------------------------------------------------------------------------------------------------
+enum class Objective {
+    Sphere     = 0,
+    Rosenbrock = 1
+};
+
+//--------------------------------------------------------------------------------------------------
+std::function<Scalar(VectorX<Scalar> const&)> make_objective(Objective objective) {
+    switch (objective) {
+        case Objective::Sphere:
+            return std::function<Scalar(VectorX<Scalar> const&)>([](VectorX<Scalar> const& x) {
+                sphere(x);
+            });
+        case Objective::Rosenbrock:
+            return std::function<Scalar(VectorX<Scalar> const&)>([](VectorX<Scalar> const& x) {
+                rosenbrock(x);
+            });
+    }
+}
+
+}
+
+#endif
diff --git a/optimization/rosenbrock.h b/optimization/rosenbrock.h
deleted file mode 100644
index f2967ca92700862ced5ea9789aa729dba7a3ab16..0000000000000000000000000000000000000000
--- a/optimization/rosenbrock.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef OPTIMIZATION_ROSENBROCK_H
-#define OPTIMIZATION_ROSENBROCK_H
-
-#include "scalar.h"
-#include "vector.h"
-
-namespace optimization {
-
-//--------------------------------------------------------------------------------------------------
-inline Scalar rosenbrock(Vector2<Scalar> const& x) {
-    auto const x_squared = x[0] * x[0];
-    return (1 - x[0]) * (1 - x[0]) + 100 * (x[1] - x_squared) * (x[1] - x_squared);
-}
-
-//--------------------------------------------------------------------------------------------------
-template <typename Derived>
-inline Scalar rosenbrock(Eigen::MatrixBase<Derived> const& x) {
-    auto const x_squared = x[0] * x[0];
-    return (1 - x[0]) * (1 - x[0]) + 100 * (x[1] - x_squared) * (x[1] - x_squared);
-}
-
-}
-
-#endif
diff --git a/optimization/evo.h b/optimization/sfg_estimator.h
similarity index 66%
rename from optimization/evo.h
rename to optimization/sfg_estimator.h
index 3ceb3488055feb13051ddc7138bcf6d6db660209..74b4bba2b565037aa049e30bd42ba62877c71b4e 100644
--- a/optimization/evo.h
+++ b/optimization/sfg_estimator.h
@@ -9,11 +9,27 @@
 
 namespace optimization {
 
+//--------------------------------------------------------------------------------------------------
+template <typename Objective, typename Distribution>
+score_function_gradient_estimate(
+    Objective const& objective,
+    Distribution const& distribution,
+    uint32_t n_samples,
+    typename Distribution::Gradient& gradient
+) {
+    gradient.setZero();
+    for (uint32_t i = 0; i < n_samples; ++i) {
+        sample_ = distribution.sample();
+        Scalar const value = objective(sample_);
+        gradient += distribution.gradient_log(sample) * value;
+    }
+}
+
 //--------------------------------------------------------------------------------------------------
 template <typename Distribution>
-class ExpectedValueOptimizer {
+class ScoreFunctionGradientEstimator {
 public:
-    ExpectedValueOptimizer(uint32_t pop_size, Scalar learning_rate)
+    ScoreFunctionGradientEstimator(uint32_t pop_size, Scalar learning_rate)
         : population_size_(pop_size), learning_rate_(learning_rate / pop_size)
     {
         distribution_.initialize(gradient_);
@@ -38,7 +54,7 @@ private:
 
 //..................................................................................................
 template <typename Distribution>
-void ExpectedValueOptimizer<Distribution>::step() {
+void ScoreFunctionGradientEstimator<Distribution>::step() {
     gradient_.setZero();
     for (uint32_t i = 0; i < population_size_; ++i) {
         sample_ = distribution_.sample();
diff --git a/optimization/vis_only.h b/optimization/vis_only.h
new file mode 100644
index 0000000000000000000000000000000000000000..b913ae8689a13406a2e5b9ba71141df986421e0e
--- /dev/null
+++ b/optimization/vis_only.h
@@ -0,0 +1,16 @@
+#ifndef OPTIMIZATION_VIS_ONLY_H
+#define OPTIMIZATION_VIS_ONLY_H
+
+#ifdef VISUALIZE
+
+#define VIS_ONLY_STATEMENT(X) X
+#define VIS_ONLY_METHOD
+
+#else
+
+#define VIS_ONLY_STATEMENT(X) do { if (0) { X } } while(0)
+#define VIS_ONLY_METHOD {}
+
+#endif
+
+#endif
diff --git a/plots/evo_experiment.h b/plots/evo_experiment.h
new file mode 100644
index 0000000000000000000000000000000000000000..c86d0a42720e2dca67fbbe6f45bd1602dba0dd80
--- /dev/null
+++ b/plots/evo_experiment.h
@@ -0,0 +1,48 @@
+#include "rosenbrock.h"
+#include "evo.h"
+
+namespace optimization {
+
+//--------------------------------------------------------------------------------------------------
+template <typename Distribution>
+struct EVOExperiment(
+    uint32_t pop_size,
+    Scalar learning_rate,
+    uint32_t n_iterations,
+    uint32_t print_stride
+) {
+    ExpectedValueOptimizer<Distribution> optimizer1(pop_size, learning_rate);
+    optimizer1.objective() = [](VectorX<Scalar> const& x) {
+        return rosenbrock(x);
+    };
+    optimizer1.distribution().gaussian().mean()[0] = x;
+    optimizer1.distribution().gaussian().mean()[1] = y;
+
+    std::vector<std::pair<Scalar, Scalar>> means;
+    means.reserve(n_iterations / print_stride + 1);
+    std::vector<Scalar> std_devs;
+    std_devs.reserve(n_iterations / print_stride + 1);
+    for (uint32_t i = 0; i < n_iterations; ++i) {
+        if (i % print_stride == 0) {
+            auto const& mean = optimizer1.distribution().gaussian().mean();
+            auto const& covar = optimizer1.distribution().gaussian().covariance();
+            means.emplace_back(mean[0], mean[1]);
+            std_devs.push_back(std::sqrt(covar(0, 0)));
+            //std::cout << i << ": " << mean[0] << ", " << mean[1] << ": " << optimizer1.objective()(mean) << '\n';
+            //std::cout << optimizer1.distribution().gaussian().covariance().format(matrix_format) << '\n';
+        }
+        optimizer1.step();
+    }
+    auto const& mean = optimizer1.distribution().gaussian().mean();
+    auto const& covar = optimizer1.distribution().gaussian().covariance();
+    means.emplace_back(mean[0], mean[1]);
+    std_devs.push_back(std::sqrt(covar(0, 0)));
+    //std::cout << n_iterations << ": " << mean[0] << ", " << mean[1] << ": " << optimizer1.objective()(mean) << '\n';
+    //std::cout << optimizer1.distribution().gaussian().covariance().format(matrix_format) << '\n';
+
+    //std::ofstream data_file(...);
+    std::cout << PyAssign("means", means)
+        << PyAssign("std_devs", std_devs);
+}
+
+}
diff --git a/plots/gaussian_mean.cpp b/plots/gaussian_mean.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b1b1b56fbf14619b5640d3d1bf8a4a779440981a
--- /dev/null
+++ b/plots/gaussian_mean.cpp
@@ -0,0 +1,52 @@
+#include "py_print.h"
+#include "rosenbrock.h"
+#include "evo.h"
+#include "gaussian.h"
+#include <iostream>
+#include <string>
+#include <vector>
+
+using namespace optimization;
+
+//..................................................................................................
+// This distribution only updates the mean.
+class Distribution1 {
+public:
+    Distribution1(): gaussian_(2) {
+        gaussian_.mean()[0] = 0;
+        gaussian_.mean()[1] = 0.5;
+        gaussian_.covariance().setIdentity();
+        gaussian_.covariance() *= 0.001;
+        gaussian_.compute_derived_matrices();
+    }
+
+    using Gradient = VectorX<Scalar>;
+
+    Gaussian& gaussian() { return gaussian_; }
+    Gaussian const& gaussian() const { return gaussian_; }
+    void initialize(Gradient& gradient) { gradient.resize(2); }
+
+    VectorX<Scalar> sample() const { return gaussian_.sample(); }
+    VectorX<Scalar> gradient_log(VectorX<Scalar> const& x) const { return gaussian_.grad_log_mean(x); }
+    void update(Gradient const& step) { gaussian_.mean() += step; }
+
+private:
+    Gaussian gaussian_;
+};
+
+
+    // Define CLI interface.
+    uint32_t max_regions = std::max(1u, std::thread::hardware_concurrency());
+    int verbosity = 2; // 0 => nothing, 1 => final results only, 2 => intermediate results as well
+    uint32_t n_frames = 1000;
+    auto const cli =
+        clara::Opt(max_regions, "max_regions")["-r"]["--max-regions"]("Max number of Regions") |
+        clara::Opt(verbosity, "verbosity")["-v"]["--verbosity"]("Controls the amount of output") |
+        clara::Opt(n_frames, "n_frames")["-n"]["--n-frames"]("Number of frame evaluations");
+
+    // Parse arguments.
+    auto const result = cli.parse(clara::Args(argc, argv));
+    if (!result) {
+        std::cerr << "Error in command line: " << result.errorMessage() << std::endl;
+        exit(1);
+    }
diff --git a/plots/main.cpp b/plots/main.cpp
index 193eda7fc6432d968476e3069cdc23cdc8fde0a9..038f85f2d238ec311e1498b168092111ad150055 100644
--- a/plots/main.cpp
+++ b/plots/main.cpp
@@ -97,50 +97,6 @@ private:
     Gaussian gaussian_;
 };
 
-//--------------------------------------------------------------------------------------------------
-template <typename Distribution>
-void run_experiment(
-    Scalar x,
-    Scalar y,
-    uint32_t pop_size,
-    Scalar learning_rate,
-    uint32_t n_iterations,
-    uint32_t print_stride
-) {
-    ExpectedValueOptimizer<Distribution> optimizer1(pop_size, learning_rate);
-    optimizer1.objective() = [](VectorX<Scalar> const& x) {
-        return rosenbrock(x);
-    };
-    optimizer1.distribution().gaussian().mean()[0] = x;
-    optimizer1.distribution().gaussian().mean()[1] = y;
-
-    std::vector<std::pair<Scalar, Scalar>> means;
-    means.reserve(n_iterations / print_stride + 1);
-    std::vector<Scalar> std_devs;
-    std_devs.reserve(n_iterations / print_stride + 1);
-    for (uint32_t i = 0; i < n_iterations; ++i) {
-        if (i % print_stride == 0) {
-            auto const& mean = optimizer1.distribution().gaussian().mean();
-            auto const& covar = optimizer1.distribution().gaussian().covariance();
-            means.emplace_back(mean[0], mean[1]);
-            std_devs.push_back(std::sqrt(covar(0, 0)));
-            //std::cout << i << ": " << mean[0] << ", " << mean[1] << ": " << optimizer1.objective()(mean) << '\n';
-            //std::cout << optimizer1.distribution().gaussian().covariance().format(matrix_format) << '\n';
-        }
-        optimizer1.step();
-    }
-    auto const& mean = optimizer1.distribution().gaussian().mean();
-    auto const& covar = optimizer1.distribution().gaussian().covariance();
-    means.emplace_back(mean[0], mean[1]);
-    std_devs.push_back(std::sqrt(covar(0, 0)));
-    //std::cout << n_iterations << ": " << mean[0] << ", " << mean[1] << ": " << optimizer1.objective()(mean) << '\n';
-    //std::cout << optimizer1.distribution().gaussian().covariance().format(matrix_format) << '\n';
-
-    //std::ofstream data_file(...);
-    std::cout << PyAssign("means", means)
-        << PyAssign("std_devs", std_devs);
-}
-
 //--------------------------------------------------------------------------------------------------
 int main() {
     //Eigen::IOFormat matrix_format(4, 0, ", ", "\n", "[", "]");