|
| 1 | +import numpy.typing as npt |
| 2 | +from typing import Tuple |
| 3 | +import numpy as np |
| 4 | +from numpy.linalg import norm |
| 5 | +from scipy.linalg import svd |
| 6 | + |
| 7 | + |
| 8 | +def rpca_pcp_ialm( |
| 9 | + observations: npt.ArrayLike, |
| 10 | + sparsity_factor: float, |
| 11 | + max_iter: int = 1000, |
| 12 | + mu: float | None = None, |
| 13 | + mu_upper_bound: float | None = None, |
| 14 | + rho: float = 1.5, |
| 15 | + tol: float = 1e-7, |
| 16 | + verbose: bool = True, |
| 17 | +) -> Tuple[npt.ArrayLike, npt.ArrayLike]: |
| 18 | + """ |
| 19 | + Solve the Principal Component Pursuit (PCP) convex relaxation of Robust PCA using the Inexact Augmented Lagrange Multiplier (IALM) method. |
| 20 | +
|
| 21 | + See README for algorithmic details and references. |
| 22 | +
|
| 23 | + Mu is updated every loop by multiplying it by `rho` until reaching `mu_upper_bound`. |
| 24 | +
|
| 25 | + Parameters: |
| 26 | + observations: The m x n input matrix to decompose ('D' in the IALM paper). |
| 27 | + sparsity_factor: Weight on the sparse term in the objective ('lambda' in the IALM paper). |
| 28 | + max_iter: Maximum number of iterations to perform. |
| 29 | + mu: Initial value for the penalty parameter. If None, defaults to 1/spectral norm of observations. |
| 30 | + mu_upper_bound: Maximum allowed value for `mu`. If None, defaults to `mu * 1e7`. |
| 31 | + rho: Multiplicative factor to increase `mu` in each iteration. |
| 32 | + tol: Tolerance for stopping criterion (relative Frobenius norm of the residual). |
| 33 | + verbose: If True, print status and debug information during optimization. |
| 34 | +
|
| 35 | + Returns: |
| 36 | + low_rank_component: The recovered low-rank matrix ('A' in the IALM paper). |
| 37 | + sparse_component: The recovered sparse matrix ('E' in the IALM paper). |
| 38 | + """ |
| 39 | + if mu is None: |
| 40 | + mu = float(1.25 / norm(observations, ord=2)) |
| 41 | + if mu_upper_bound is None: |
| 42 | + mu_upper_bound = mu * 1e7 |
| 43 | + |
| 44 | + norm_fro_obs = norm(observations, ord="fro") |
| 45 | + |
| 46 | + dual = observations / np.maximum( |
| 47 | + norm(observations, ord=2), norm(observations, ord=np.inf) / sparsity_factor |
| 48 | + ) |
| 49 | + sparse = np.zeros_like(observations) |
| 50 | + |
| 51 | + i = 0 |
| 52 | + while True: |
| 53 | + # compute next iteration of a |
| 54 | + u, s, v = svd(observations - sparse + 1.0 / mu * dual, full_matrices=False) |
| 55 | + s_thresholded = np.maximum(s - 1.0 / mu, 0) |
| 56 | + low_rank = (u * s_thresholded) @ v |
| 57 | + |
| 58 | + # compute next iteration of e |
| 59 | + residual_for_sparse = observations - low_rank + 1.0 / mu * dual |
| 60 | + sparse = np.sign(residual_for_sparse) * np.maximum( |
| 61 | + np.abs(residual_for_sparse) - sparsity_factor / mu, 0 |
| 62 | + ) |
| 63 | + |
| 64 | + # calculate error |
| 65 | + residual = observations - low_rank - sparse |
| 66 | + err = norm(residual, ord="fro") / norm_fro_obs |
| 67 | + |
| 68 | + i += 1 |
| 69 | + |
| 70 | + if verbose: |
| 71 | + print(f"iter {i:<4} | err {err:<25} | mu {mu:<25}") |
| 72 | + |
| 73 | + if err < tol: |
| 74 | + if verbose: |
| 75 | + print("Finished optimization. Error smaller than tolerance.") |
| 76 | + break |
| 77 | + if i == max_iter: |
| 78 | + if verbose: |
| 79 | + print("Finized optimization. Max iterations reached.") |
| 80 | + break |
| 81 | + |
| 82 | + # update dual and mu |
| 83 | + dual = dual + mu * (residual) |
| 84 | + mu = min(mu * rho, mu_upper_bound) |
| 85 | + return low_rank, sparse |
0 commit comments