#!/usr/bin/env python
# coding: utf-8

# # Power Iteration and its Variants
# 
# Copyright (C) 2020 Andreas Kloeckner
# 
# <details>
# <summary>MIT License</summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# 
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# 
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# </details>

# In[9]:


import numpy as np
import numpy.linalg as la
np.set_printoptions(precision=3, linewidth=120)


# Let's  prepare a matrix with some random or deliberately chosen eigenvalues:

# In[95]:


n = 6

if 1:
    np.random.seed(70)
    eigvecs = np.random.randn(n, n)
    eigvals = np.sort(np.random.randn(n))
    # Uncomment for near-duplicate largest-magnitude eigenvalue
    # eigvals[1] = eigvals[0] + 1e-3

    A = eigvecs.dot(np.diag(eigvals)).dot(la.inv(eigvecs))
    print(eigvals)

else:
    # Complex eigenvalues
    np.random.seed(40)
    A = np.random.randn(n, n)
    print(la.eig(A)[0])


# Let's also pick an initial vector:

# In[96]:


x0 = np.random.randn(n)
x0


# ### Power iteration

# In[97]:


x = x0


# Now implement plain power iteration.

# In[98]:


for i in range(20):
    x = A @ x
    print(x)


# * What's the problem with this method?
# * Does anything useful come of this?
# * How do we fix it?

# ### Normalized power iteration

# Back to the beginning: Reset to the initial vector.

# In[99]:


x0 = np.random.randn(n)
x = x0


# Implement normalized power iteration.

# In[100]:


for i in range(10):
    x = A @ x
    nrm = la.norm(x)
    x = x/nrm
    print(la.solve(eigvecs, x))
    #print(x)

print(nrm)


# ### Checking convergence

# In[101]:


x = x0
errors = []
coeffs = la.solve(eigvecs, x0)

for i in range(10):
    x = A @ x
    errors.append(
        la.norm(x/eigvals[0]**(i+1) - coeffs[0]*eigvecs[:,0], 2))
    print("coefficients:", la.solve(eigvecs, x/la.norm(x,2)))

conv_factor = eigvals[1]/eigvals[0]

errors = np.array(errors)
for i in range(len(errors)-1):
    print(f"{i=}: {errors[i]=:.6e}, {errors[i+1]/errors[i]=:.6g}, {conv_factor=:.6g}")


# * Now try the matrix variants above.

# ------
# ### Inverse iteration
# 
# What if we want the eigenvalue closest to a give value $\sigma$?
# 
# Once again, reset to the beginning.

# In[148]:


x = x0/la.norm(x0)


# In[149]:


sigma = 1
A_sigma = A-sigma*np.eye(A.shape[0])
for i in range(30):
    x = la.solve(A_sigma, x)
    nrm = la.norm(x)
    x = x/nrm
    print(x)


# --------------
# ### Rayleigh quotient iteration
# 
# Can we feed an estimate of the current approximate eigenvalue back into the calculation? (Hint: Rayleigh quotient)
# 
# Reset once more.

# In[153]:


x = x0/la.norm(x0)


# Run this cell in-place (Ctrl-Enter) many times.

# In[154]:


for i in range(10):
    sigma = np.dot(x, np.dot(A, x))/np.dot(x, x)
    x = la.solve(A-sigma*np.eye(n), x)
    x = x/la.norm(x)
    print(x)


# * What's a reasonable stopping criterion?
# * Computational downside of this iteration?
