Skip to content
Snippets Groups Projects
Commit f36a3193 authored by Erik Strand's avatar Erik Strand
Browse files

Change termination condition for part f

parent 50540719
No related branches found
No related tags found
No related merge requests found
......@@ -104,7 +104,7 @@ int main() {
python_print("recovered_sample_values", recovered_sample_values);
// Part (d)
constexpr uint32_t n_subsamples = 100;
constexpr uint32_t n_subsamples = 200;
std::vector<uint32_t> const subset_indices = select_subsample(n_samples, n_subsamples);
Vector subset_sample_times = vector_subset(sample_times, subset_indices);
Vector subset_sample_values = vector_subset(sample_values, subset_indices);
......@@ -119,7 +119,7 @@ int main() {
Vector subset_differences = subset_sample_values - subset_recovered_sample_values;
Scalar loss = subset_differences.squaredNorm();
Vector gradient = -2 * subset_dct_matrix * subset_differences;
constexpr Scalar learning_rate = 0.5;
constexpr Scalar learning_rate = 0.1;
while (loss > 1e-6) {
recovered_dct -= learning_rate * gradient;
recovered_sample_values = dct_matrix.transpose() * recovered_dct;
......@@ -139,17 +139,13 @@ int main() {
subset_differences = subset_sample_values - subset_recovered_sample_values;
loss = subset_differences.squaredNorm() + recovered_dct.squaredNorm();
gradient = -2 * subset_dct_matrix * subset_differences + 2 * recovered_dct;
Scalar last_loss = std::numeric_limits<Scalar>::infinity();
Scalar relative_change = (loss - last_loss) / loss;
while (relative_change > 1e-6) {
while (gradient.squaredNorm() > 1e-3) {
recovered_dct -= learning_rate * gradient;
recovered_sample_values = dct_matrix.transpose() * recovered_dct;
subset_recovered_sample_values = vector_subset(recovered_sample_values, subset_indices);
subset_differences = subset_sample_values - subset_recovered_sample_values;
loss = subset_differences.squaredNorm() + recovered_dct.squaredNorm();
gradient = -2 * subset_dct_matrix * subset_differences + 2 * recovered_dct;
last_loss = std::numeric_limits<Scalar>::infinity();
relative_change = (loss - last_loss) / loss;
//std::cout << loss << '\n';
}
Scalar const final_loss_f = loss;
......@@ -163,8 +159,8 @@ int main() {
loss = subset_differences.squaredNorm() + recovered_dct.cwiseAbs().sum();
gradient = -2 * subset_dct_matrix * subset_differences
+ (recovered_dct.array() / recovered_dct.cwiseAbs().array()).matrix();
last_loss = std::numeric_limits<Scalar>::infinity();
relative_change = (loss - last_loss) / loss;
Scalar last_loss = std::numeric_limits<Scalar>::infinity();
Scalar relative_change = (loss - last_loss) / loss;
while (relative_change > 1e-6) {
recovered_dct -= learning_rate * gradient;
recovered_sample_values = dct_matrix.transpose() * recovered_dct;
......
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment