|
| 1 | +# (C) Copyright IBM 2024. |
| 2 | +# |
| 3 | +# This code is licensed under the Apache License, Version 2.0. You may |
| 4 | +# obtain a copy of this license in the LICENSE.txt file in the root directory |
| 5 | +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. |
| 6 | +# |
| 7 | +# Any modifications or derivative works of this code must retain this |
| 8 | +# copyright notice, and modified files need to carry a notice indicating |
| 9 | +# that they have been altered from the originals. |
| 10 | + |
| 11 | +from __future__ import annotations |
| 12 | + |
| 13 | +from typing import Callable |
| 14 | + |
| 15 | +import numpy as np |
| 16 | +from scipy.optimize import OptimizeResult |
| 17 | +from scipy.sparse.linalg import LinearOperator |
| 18 | + |
| 19 | +from ffsim import states |
| 20 | + |
| 21 | + |
| 22 | +class WrappedCallable: |
| 23 | + """Callable wrapper used to count function calls.""" |
| 24 | + |
| 25 | + def __init__( |
| 26 | + self, func: Callable[[np.ndarray], np.ndarray], optimize_result: OptimizeResult |
| 27 | + ): |
| 28 | + self.func = func |
| 29 | + self.optimize_result = optimize_result |
| 30 | + |
| 31 | + def __call__(self, x: np.ndarray) -> np.ndarray: |
| 32 | + self.optimize_result.nfev += 1 |
| 33 | + return self.func(x) |
| 34 | + |
| 35 | + |
| 36 | +class WrappedLinearOperator: |
| 37 | + """LinearOperator wrapper used to count LinearOperator applications.""" |
| 38 | + |
| 39 | + def __init__(self, linop: LinearOperator, optimize_result: OptimizeResult): |
| 40 | + self.linop = linop |
| 41 | + self.optimize_result = optimize_result |
| 42 | + |
| 43 | + def __matmul__(self, other: np.ndarray): |
| 44 | + if len(other.shape) == 1: |
| 45 | + self.optimize_result.nlinop += 1 |
| 46 | + else: |
| 47 | + _, n = other.shape |
| 48 | + self.optimize_result.nlinop += n |
| 49 | + return self.linop @ other |
| 50 | + |
| 51 | + def __rmatmul__(self, other: np.ndarray): |
| 52 | + if len(other.shape) == 1: |
| 53 | + self.optimize_result.nlinop += 1 |
| 54 | + else: |
| 55 | + n, _ = other.shape |
| 56 | + self.optimize_result.nlinop += n |
| 57 | + return other @ self.linop |
| 58 | + |
| 59 | + |
| 60 | +def gradient_finite_diff( |
| 61 | + params_to_vec: Callable[[np.ndarray], np.ndarray], |
| 62 | + theta: np.ndarray, |
| 63 | + index: int, |
| 64 | + epsilon: float, |
| 65 | +) -> np.ndarray: |
| 66 | + """Return the gradient of one of the components of a function. |
| 67 | +
|
| 68 | + Given a function that maps a vector of "parameters" to an output vector, return |
| 69 | + the gradient of one of the parameter components. |
| 70 | +
|
| 71 | + Args: |
| 72 | + params_to_vec: Function that maps a parameter vector to an output vector. |
| 73 | + theta: The parameters at which to evaluate the gradient. |
| 74 | + index: The index of the parameter to take the gradient of. |
| 75 | + epsilon: Finite difference step size. |
| 76 | +
|
| 77 | + Returns: |
| 78 | + The gradient of the desired parameter component. |
| 79 | + """ |
| 80 | + unit = states.one_hot(len(theta), index, dtype=float) |
| 81 | + plus = theta + epsilon * unit |
| 82 | + minus = theta - epsilon * unit |
| 83 | + return (params_to_vec(plus) - params_to_vec(minus)) / (2 * epsilon) |
| 84 | + |
| 85 | + |
| 86 | +def jacobian_finite_diff( |
| 87 | + params_to_vec: Callable[[np.ndarray], np.ndarray], |
| 88 | + theta: np.ndarray, |
| 89 | + dim: int, |
| 90 | + epsilon: float, |
| 91 | +) -> np.ndarray: |
| 92 | + """Return the Jacobian matrix of a function. |
| 93 | +
|
| 94 | + Given a function that maps a vector of "parameters" to an output vector, return |
| 95 | + the matrix whose :math:$i$-th column contains the gradient of the |
| 96 | + :math:$i$-th component of the function. |
| 97 | +
|
| 98 | + Args: |
| 99 | + params_to_vec: Function that maps a parameter vector to an output vector. |
| 100 | + theta: The parameters at which to evaluate the Jacobian. |
| 101 | + dim: The dimension of an output vector of the function. |
| 102 | + epsilon: Finite difference step size. |
| 103 | +
|
| 104 | + Returns: |
| 105 | + The Jacobian matrix. |
| 106 | + """ |
| 107 | + jac = np.zeros((dim, len(theta)), dtype=complex) |
| 108 | + for i in range(len(theta)): |
| 109 | + jac[:, i] = gradient_finite_diff(params_to_vec, theta, i, epsilon) |
| 110 | + return jac |
| 111 | + |
| 112 | + |
| 113 | +def orthogonalize_columns(mat: np.ndarray, vec: np.ndarray) -> np.ndarray: |
| 114 | + """Orthogonalize the columns of a matrix with respect to a vector. |
| 115 | +
|
| 116 | + Given a matrix and a vector, return a new matrix whose columns contain the |
| 117 | + components of the old columns orthogonal to the vector. |
| 118 | +
|
| 119 | + Args: |
| 120 | + mat: The matrix. |
| 121 | + vec: The vector. |
| 122 | +
|
| 123 | + Returns: |
| 124 | + The new matrix with columns orthogonal to the vector. |
| 125 | + """ |
| 126 | + coeffs = vec.T.conj() @ mat |
| 127 | + return mat - vec.reshape((-1, 1)) * coeffs.reshape((1, -1)) |
0 commit comments