#!/usr/bin/env python
# coding: utf-8

# # Convergence of Steepest Descent
# 
# Copyright (C) 2026 Andreas Kloeckner
# 
# <details>
# <summary>MIT License</summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# 
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# 
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# </details>

# In[1]:


import sympy as sp


# In[115]:


lam1, lam2 = sp.symbols("lambda_1, lambda_2", positive=True)
u, v = sp.symbols("u,v", positive=True)

def grad(expr, vec):
    return sp.Matrix([expr.diff(vec[0]), expr.diff(vec[1])]) 


# In[116]:


A = sp.Matrix([[lam1, 0], [0, lam2]])
A


# In[117]:


x = sp.Matrix([u,v])
x


# In[118]:


objective = (sp.Rational(1, 2) * x.T @ A @ x)[0]
objective


# **Question:** What is the minimizer we're looking for?

# In[119]:


grad_objective = grad(objective, x)
grad_objective


# **Question:** What does this coincide with? Can you prove it?
# 
# Set up the line search as `line` and `line_objective`:

# In[120]:


descent_direction = - grad_objective

alpha = sp.Symbol("alpha")
line = x + alpha * descent_direction
line_objective = objective.subs(u, line[0]).subs(v, line[1])
line_objective


# And find the optimal $\alpha$:

# In[121]:


alpha_opt = sp.solve(line_objective.diff(alpha), alpha)[0]
alpha_opt


# **Question:** What is this in general? Can you prove it?
# 
# Next, find the next iterate:

# In[122]:


x_next = line.subs(alpha, alpha_opt)
x_next


# Next, consider the decrease in energy error:

# In[140]:


def energy_error_squared(vec):
    return (vec.T @ A @ vec)[0]

ratio = sp.factor(energy_error_squared(x_next)/energy_error_squared(x))
ratio


# Take gradient, find critical point:

# In[145]:


bad_soln, = sp.solve(grad(ratio, x), x)
x_bad = sp.Matrix(bad_soln)
x_bad


# In[147]:


sp.factor(ratio.subs(u, x_bad[0]).subs(v, x_bad[1]))


# In[ ]:




