# ## MIT License

# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#

# In[104]:
import numpy as np
import numpy.linalg as la
np.set_printoptions(precision=3)
# Let's prepare a matrix with some random or deliberately chosen eigenvalues:
# In[105]:
n = 6
if 1:
np.random.seed(70)
eigvecs = np.random.randn(n, n)
eigvals = np.sort(np.random.randn(n))
# Uncomment for near-duplicate largest-magnitude eigenvalue
# eigvals[1] = eigvals[0] + 1e-3
A = eigvecs.dot(np.diag(eigvals)).dot(la.inv(eigvecs))
print(eigvals)
else:
# Complex eigenvalues
np.random.seed(40)
A = np.random.randn(n, n)
print(la.eig(A)[0])
# Let's also pick an initial vector:
# In[106]:
x0 = np.random.randn(n)
x0
# ### Power iteration
# In[107]:
x = x0
# Now implement plain power iteration.
# In[127]:
for i in range(20):
x = A @ x
print(x)
# * What's the problem with this method?
# * Does anything useful come of this?
# * How do we fix it?
# ### Normalized power iteration
# Back to the beginning: Reset to the initial vector.
# In[128]:
x = x0/la.norm(x0)
# Implement normalized power iteration.
# In[129]:
for i in range(10):
x = A @ x
nrm = la.norm(x)
x = x/nrm
print(x)
print(nrm)
# * What do you observe about the norm?
# * What about the sign?
# * What is the vector $x$ now?
#
# Extensions:
#
# * Now try the matrix variants above.
# * Suggest a better way of estimating the eigenvalue. [Hint](https://en.wikipedia.org/wiki/Rayleigh_quotient)
# ------
# ### Inverse iteration
#
# What if we want the smallest eigenvalue (by magnitude)?
#
# Once again, reset to the beginning.
# In[143]:
x = x0/la.norm(x0)
# In[144]:
for i in range(30):
x = la.solve(A, x)
nrm = la.norm(x)
x = x/nrm
print(x)
# * What's the computational cost per iteration?
# * Can we make this method search for a specific eigenvalue?
# ------
# ### Inverse Shift iteration
#
# What if we want the eigenvalue closest to a give value $\sigma$?
#
# Once again, reset to the beginning.
# In[148]:
x = x0/la.norm(x0)
# In[149]:
sigma = 1
A_sigma = A-sigma*np.eye(A.shape[0])
for i in range(30):
x = la.solve(A_sigma, x)
nrm = la.norm(x)
x = x/nrm
print(x)
# --------------
# ### Rayleigh quotient iteration
#
# Can we feed an estimate of the current approximate eigenvalue back into the calculation? (Hint: Rayleigh quotient)
#
# Reset once more.
# In[153]:
x = x0/la.norm(x0)
# Run this cell in-place (Ctrl-Enter) many times.
# In[154]:
for i in range(10):
sigma = np.dot(x, np.dot(A, x))/np.dot(x, x)
x = la.solve(A-sigma*np.eye(n), x)
x = x/la.norm(x)
print(x)
# * What's a reasonable stopping criterion?
# * Computational downside of this iteration?