import numpy as np
import numpy.linalg as la
import scipy.linalg as spla
# tall and skinny w/nullspace
np.random.seed(12)
A = np.random.randn(6, 4)
b = np.random.randn(6)
A[3] = A[4] + A[5]
A[1] = A[5] + A[1]
A[2] = A[3] + A[1]
A[0] = A[3] + A[1]
Compute the full SVD of A
, store it in U
, sigma
, VT
:
U, sigma, VT = la.svd(A)
sigma
Compute the reduced SVD matrices: rU
, rSigma
, rVT
rU = U[:,0:4]
rSigma = np.diag(sigma)
rVT = VT[0:4,:]
And check that we've actually factorized A
:
(rU.dot(rSigma).dot(rVT) - A).round(4)
Now define Sigma_pinv
as the "pseudo-"inverse of Sigma
, where "pseudo" means "don't divide by zero":
Sigma_pinv = np.zeros((4,4))
Sigma_pinv[:3,:3] = np.diag(1/sigma[:3])
Sigma_pinv.round(3)
Now compute the SVD-based solution for the least-squares problem:
x_svd = rVT.T@Sigma_pinv@rU.T@b
la.norm(A.dot(x_svd)-b, 2)
la.norm(x_svd,2)