Least Squares using the Reduced SVD

In [2]:
import numpy as np
import numpy.linalg as la
import scipy.linalg as spla
In [3]:
# tall and skinny w/nullspace
np.random.seed(12)
A = np.random.randn(6, 4)
b = np.random.randn(6)
A[3] = A[4] + A[5]
A[1] = A[5] + A[1]
A[2] = A[3] + A[1]
A[0] = A[3] + A[1]

Compute the full SVD of A, store it in U, sigma, VT:

In [17]:
U, sigma, VT = la.svd(A)
sigma
Out[17]:
array([  6.79251394e+00,   1.60288467e+00,   9.47584950e-01,
         3.56139308e-16])

Compute the reduced SVD matrices: rU, rSigma, rVT

In [7]:
rU = U[:,0:4]
rSigma = np.diag(sigma)
rVT = VT[0:4,:]

And check that we've actually factorized A:

In [8]:
(rU.dot(rSigma).dot(rVT) - A).round(4)
Out[8]:
array([[ 0., -0.,  0.,  0.],
       [ 0.,  0., -0.,  0.],
       [ 0., -0.,  0.,  0.],
       [ 0., -0.,  0.,  0.],
       [ 0., -0.,  0.,  0.],
       [ 0., -0., -0.,  0.]])

Now define Sigma_pinv as the "pseudo-"inverse of Sigma, where "pseudo" means "don't divide by zero":

In [11]:
Sigma_pinv = np.zeros((4,4))
Sigma_pinv[:3,:3] = np.diag(1/sigma[:3])
Sigma_pinv.round(3)
Out[11]:
array([[ 0.147,  0.   ,  0.   ,  0.   ],
       [ 0.   ,  0.624,  0.   ,  0.   ],
       [ 0.   ,  0.   ,  1.055,  0.   ],
       [ 0.   ,  0.   ,  0.   ,  0.   ]])

Now compute the SVD-based solution for the least-squares problem:

In [12]:
x_svd = rVT.T@Sigma_pinv@rU.T@b
In [13]:
la.norm(A.dot(x_svd)-b, 2)
Out[13]:
2.1267152888030978
In [16]:
la.norm(x_svd,2)
Out[16]:
0.77354943014895849
In [ ]: