From 4ae6149843aecd0fafaef3620dde1902dd5aab68 Mon Sep 17 00:00:00 2001 From: abhishekmittal15 <55945158+abhishekmittal15@users.noreply.github.com> Date: Mon, 7 Nov 2022 08:08:48 +0530 Subject: [PATCH 01/13] Implemented the projection gradient Algorithm --- procrustes/psdp.py | 110 +++++++- procrustes/test/test_psdp.py | 498 +++++++++++++++++++++-------------- 2 files changed, 402 insertions(+), 206 deletions(-) diff --git a/procrustes/psdp.py b/procrustes/psdp.py index 4d15464..2cd7c3f 100644 --- a/procrustes/psdp.py +++ b/procrustes/psdp.py @@ -37,6 +37,114 @@ "psdp_opt" ] +def init_procustes_projgrad( + A: np.ndarray, + B: np.ndarray, + choice: int = 0 +) -> np.ndarray: + r""" + Returns the starting point of the transformation S of the projection gradient algorithm + """ + n, m = A.shape + + # We will find two choices S1 and S2 and return the one that gives a lower error in the minimization function + + # Finding S1 + S1T = np.linalg.lstsq(A.conj().T, B.conj().T, rcond = 1)[0] + S1 = S1T.conj().T + S1 = _psd_proj(S1) + e1 = np.linalg.norm(S1@A-B, 'fro') + + # Finding S2 + eps = 1e-6 + S2 = np.zeros((n, n)) + for i in range(n): + S2[i, i] = max(0, (A[i,:]@B[i,:].conj().T) / (np.linalg.norm(A[i,:])**2 + eps)) # Adding eps to avoid 0 division + e2 = np.linalg.norm(S2@A-B, "fro") + + if e1 < e2 or choice == 1: + return S1 + elif e2 < e1 or choice == 2: + return S2 + +def psdp_projgrad( + a: np.ndarray, + b: np.ndarray, + max_iterations: int = 1000, + delta: float = 1e-6, + tol: float = 1e-6, + pad: bool = True, + translate: bool = False, + scale: bool = False, + unpad_col: bool = False, + unpad_row: bool = False, + check_finite: bool = True, + weight: Optional[np.ndarray] = None, +) -> ProcrustesResult: + r""" + We want to minimize the function ||SAT-B||_F where A and B are n*m matrices and S is n*n transformation we want to find that minimizes the above function and T is just an identity matrix for now. We are only considering left transformation. + + The paper minimizes the following function ||AX-B||_F, so the mapping between the problem statements are + Term used in paper Term used in our implementation + -------------------------------------------------------------- + A S + X A + B B + """ + A, B = setup_input_arrays( + a, + b, + unpad_col, + unpad_row, + pad, + translate, + scale, + check_finite, + weight, + ) + + if A.shape != B.shape: + raise ValueError( + f"Shape of A and B does not match: {a.shape} != {b.shape} " + "Check pad, unpad_col, and unpad_row arguments." + ) + + n, m = A.shape + + S = init_procustes_projgrad(A, B) + + # Performing some precomputations + AAT = A@A.conj().T + x, _ = np.linalg.eig(AAT) + max_eig = np.max(x) ** 2 # Not sure if these can be complex too, if they are complex, then need to take norm + min_eig = np.min(x) ** 2 # Same as above + BAT = B@A.conj().T + + # Initialization of the algorithm + i = 1 + eps = 1 + eps0 = 0 + err = np.zeros((max_iterations + 1, 1)) + + while i <= max_iterations and eps >= delta*eps0: + S_old = S + # Projected gradient step + S = _psd_proj(S - (S@AAT - BAT)/max_eig) + if i ==1: + eps0 = np.linalg.norm(S-S_old, 'fro') + eps = np.linalg.norm(S-S_old, 'fro') + err[i] = np.linalg.norm(S@A-B, 'fro') + if err[i] < tol: + break + i += 1 + + return ProcrustesResult( + new_a=A, + new_b=B, + error=compute_error(a=a, b=b, t=np.eye(m), s=S), + t=np.eye(m), + s=S, + ) def psdp_opt( a: np.ndarray, @@ -813,4 +921,4 @@ def _psd_proj(arr: np.ndarray, do_cholesky: bool = True) -> np.ndarray: _ = np.linalg.cholesky(arr) return arr except np.linalg.LinAlgError: - return _make_positive(arr) + return _make_positive(arr) \ No newline at end of file diff --git a/procrustes/test/test_psdp.py b/procrustes/test/test_psdp.py index cf6c903..9862054 100644 --- a/procrustes/test/test_psdp.py +++ b/procrustes/test/test_psdp.py @@ -23,78 +23,35 @@ r"""Testings for PSDP (positive semi-definite Procrustes) module.""" import numpy as np -from numpy.testing import assert_almost_equal -from procrustes.psdp import psdp_opt, psdp_peng, psdp_woodgate +from numpy.testing import assert_almost_equal, assert_array_less +from procrustes.psdp import psdp_opt, psdp_peng, psdp_woodgate, psdp_projgrad - -def test_psdp_woodgate_identity(n=np.random.randint(50, 100)): +def test_psdp_projgrad_identity(n=np.random.randint(50, 100)): r"""Test Woodgate's algorithm for PSDP with identity matrix.""" a = np.eye(n) b = np.eye(n) - res = psdp_woodgate(a=a, b=b) - s, error = res["s"], res["error"] - assert_almost_equal(s, np.eye(n)) - assert_almost_equal(error, 0.0) - - -def test_psdp_peng_identity(n=np.random.randint(50, 100)): - r"""Test Peng's algorithm for PSDP with identity matrix.""" - a = np.eye(n) - b = np.eye(n) - res = psdp_peng(a=a, b=b) - s, error = res["s"], res["error"] - assert_almost_equal(s, np.eye(n)) - assert_almost_equal(error, 0.0) - - -def test_psdp_opt_identity(n=np.random.randint(50, 100)): - r"""Test OptPSDP with identity matrix.""" - a = np.eye(n) - b = np.eye(n) - res = psdp_opt(a=a, b=b) + res = psdp_projgrad(a=a, b=b) s, error = res["s"], res["error"] assert_almost_equal(s, np.eye(n)) assert_almost_equal(error, 0.0) -def test_psdp_woodgate_diagonal(): +def test_psdp_projgrad_diagonal(): r"""Test Woodgate's algorithm for PSDP with diagonal matrix.""" a = np.diag([1, 2, 3, 4]) b = np.eye(4) - res = psdp_woodgate(a=a, b=b) + res = psdp_projgrad(a=a, b=b) s, error = res["s"], res["error"] actual_result = np.diag([0.99999, 0.5, 0.33333, 0.25]) assert_almost_equal(s, actual_result, decimal=5) assert_almost_equal(error, 0.0, decimal=3) -def test_psdp_peng_diagonal(): - r"""Test Peng's algorithm for PSDP with diagonal matrix.""" - a = np.diag([1, 2, 3, 4]) - b = np.eye(4) - res = psdp_peng(a=a, b=b) - s, error = res["s"], res["error"] - actual_result = np.diag([0.99999, 0.5, 0.33333, 0.25]) - assert_almost_equal(s, actual_result, decimal=5) - assert_almost_equal(error, 0.0, decimal=3) - - -def test_psdp_opt_diagonal(): - r"""Test OptPSDP with diagonal matrix.""" - a = np.diag([1, 2, 3, 4]) - b = np.eye(4) - res = psdp_opt(a=a, b=b) - s, error = res["s"], res["error"] - actual_result = np.diag([1, 0.5, 0.33333, 0.25]) - assert_almost_equal(s, actual_result, decimal=1) - assert_almost_equal(error, 0.0, decimal=3) - - -def test_psdp_woodgate_generic_square(): +def test_psdp_projgrad_generic_square(): r"""Test Woodgate's algorithm for PSDP with 2 generic square matrices.""" a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) - res = psdp_woodgate(a=a, b=b) + res = psdp_projgrad(a=a, b=b, max_iterations= 10000) s, error = res["s"], res["error"] actual_result = np.array( [ @@ -103,91 +60,26 @@ def test_psdp_woodgate_generic_square(): [0.24342428, -0.12044658, 0.26510708], ] ) - assert_almost_equal(s, actual_result) + assert_almost_equal(s, actual_result, decimal=4) assert_almost_equal(error, 31.371190566800497, decimal=3) -def test_psdp_peng_generic_square(): - r"""Test Peng's algorithm for PSDP with 2 generic square matrices.""" - a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) - b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) - res = psdp_peng(a=a, b=b) - s, error = res["s"], res["error"] - actual_result = np.array( - [ - [0.1440107, -0.05853613, 0.00939016], - [-0.05853613, 0.02379322, -0.00381683], - [0.00939016, -0.00381683, 0.00061228], - ] - ) - assert_almost_equal(s, actual_result) - assert_almost_equal(error, 33.43617791613022, decimal=3) - - -def test_psdp_opt_generic_square(): - r"""Test OptPSDP with 2 generic square matrices.""" - a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) - b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) - res = psdp_opt(a=a, b=b) - s, error = res["s"], res["error"] - actual_result = np.array( - [ - [0.22423989, -0.11129353, 0.24612947], - [-0.11129353, 0.0552366, -0.12215765], - [0.24612947, -0.12215765, 0.27015584], - ] - ) - assert_almost_equal(s, actual_result, decimal=3) - assert_almost_equal(error, 31.371443352940343, decimal=3) - - -def test_psdp_woodgate_generic_non_square(): +def test_psdp_projgrad_generic_non_square(): r"""Test Woodgate's algorithm for PSDP with 2 generic non-square matrices.""" a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) - res = psdp_woodgate(a=a, b=b) + res = psdp_projgrad(a=a, b=b, max_iterations=10000) s, error = res["s"], res["error"] - actual_result = np.array( - [ - [2.57997197, 1.11007896, -1.08770156], - [1.11007896, 1.68429863, 0.12829214], - [-1.08770156, 0.12829214, 0.75328052], - ] + actual_result = np.array([ + [ 2.58262946, 1.10868691, -1.08661918], + [ 1.10868691, 1.67636517, 0.13242428], + [-1.08661918, 0.13242428, 0.75597659]] ) assert_almost_equal(s, actual_result, decimal=5) - assert_almost_equal(error, 32.200295757989856, decimal=3) + assert_array_less(error, 32.200295757989856) -def test_psdp_peng_generic_non_square(): - r"""Test Peng's algorithm for PSDP with 2 generic non-square matrices.""" - a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) - b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) - res = psdp_peng(a=a, b=b) - s, error = res["s"], res["error"] - actual_result = np.array( - [ - [2.58773004, 1.10512076, -1.07911235], - [1.10512076, 1.65881535, 0.14189328], - [-1.07911235, 0.14189328, 0.75610083], - ] - ) - assert_almost_equal(s, actual_result, decimal=5) - assert_almost_equal(error, 32.21671819685297) - - -def test_psdp_opt_generic_non_square(): - r"""Test OptPSDP with 2 generic non-square matrices.""" - a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) - b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) - res = psdp_opt(a=a, b=b) - error = res["error"] - # Keeping error assertion to be less than a threshold value - # rather than "almost equal" to a given value because increasing - # the number of iterations might reduce the error obtained. - assert error <= 32.2 - - -def test_psdp_woodgate_non_full_rank(): +def test_psdp_projgrad_non_full_rank(): r"""Test Woodgate's algorithm for PSDP for full rank matrix.""" a = np.array( [ @@ -209,89 +101,285 @@ def test_psdp_woodgate_non_full_rank(): [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], ] ) - res = psdp_woodgate(a=a, b=b) + res = psdp_projgrad(a=a, b=b, max_iterations=10000) s, error = res["s"], res["error"] - actual_result = np.array( - [ - [5.41919155, 1.62689999, -0.31097211, 3.87095011, 5.4023016, 1.6426171], - [1.62689999, 9.65181183, -3.52330026, 2.48010767, 1.64006384, 9.61898593], - [-0.31097211, -3.52330026, 2.71903863, 0.02576251, -0.30656676, -3.5354399], - [3.87095011, 2.48010767, 0.02576251, 5.97846041, 3.85902148, 2.48086299], - [5.4023016, 1.64006384, -0.30656676, 3.85902148, 5.40096355, 1.62207837], - [1.6426171, 9.61898593, -3.5354399, 2.48086299, 1.62207837, 9.65931602], - ] - ) + actual_result = np.array([ + [ 5.40878932, 1.63338805, -0.30680274, 3.87229356, 5.40863988, 1.63366874], + [ 1.63338805, 9.63678713, -3.53016912, 2.47908485, 1.63323779, 9.63660762], + [-0.30680274, -3.53016912, 2.71131028, 0.02464064, -0.30684737, -3.53027101], + [ 3.87229356, 2.47908485, 0.02464064, 5.9697877, 3.87199514, 2.47930511], + [ 5.40863988, 1.63323779, -0.30684737, 3.87199514, 5.40849846, 1.63356974], + [ 1.63366874, 9.63660762, -3.53027101, 2.47930511, 1.63356974, 9.63675614] + ]) assert_almost_equal(s, actual_result, decimal=2) - assert_almost_equal(error, 0.0, decimal=2) + assert_almost_equal(error, 0.0, decimal=5) -def test_psdp_peng_non_full_rank(): - r"""Test Peng's algorithm for PSDP for full rank matrix.""" - a = np.array( - [ - [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], - [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], - [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], - [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], - [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], - [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], - ] - ) - b = np.array( - [ - [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], - [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], - [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], - [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], - [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], - [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], - ] - ) - res = psdp_peng(a=a, b=b) - s, error = res["s"], res["error"] - actual_result = np.array( - [ - [5.40904359, 1.63342796, -0.30678904, 3.87229001, 5.40837406, 1.63361756], - [1.63342796, 9.63675505, -3.53016762, 2.47911674, 1.63339076, 9.63661932], - [ - -0.30678904, - -3.53016762, - 2.71130391, - 0.02463814, - -0.30688059, - -3.53026462, - ], - [3.87229001, 2.47911674, 0.02463814, 5.96975888, 3.87182355, 2.47929281], - [5.40837406, 1.63339076, -0.30688059, 3.87182355, 5.40770783, 1.6335514], - [1.63361756, 9.63661932, -3.53026462, 2.47929281, 1.6335514, 9.63674522], - ] - ) - assert_almost_equal(s, actual_result, decimal=2) - assert_almost_equal(error, 0.0, decimal=2) +# def test_psdp_woodgate_identity(n=np.random.randint(50, 100)): +# r"""Test Woodgate's algorithm for PSDP with identity matrix.""" +# a = np.eye(n) +# b = np.eye(n) +# res = psdp_woodgate(a=a, b=b) +# s, error = res["s"], res["error"] +# assert_almost_equal(s, np.eye(n)) +# assert_almost_equal(error, 0.0) -def test_psdp_opt_non_full_rank(): - r"""Test OptPSDP when the to be transformed matrix doesn't have full rank.""" - a = np.array( - [ - [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], - [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], - [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], - [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], - [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], - [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], - ] - ) - b = np.array( - [ - [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], - [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], - [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], - [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], - [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], - [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], - ] - ) - res = psdp_opt(a=a, b=b) - error = res["error"] - assert_almost_equal(error, 0.0, decimal=2) +# def test_psdp_peng_identity(n=np.random.randint(50, 100)): +# r"""Test Peng's algorithm for PSDP with identity matrix.""" +# a = np.eye(n) +# b = np.eye(n) +# res = psdp_peng(a=a, b=b) +# s, error = res["s"], res["error"] +# assert_almost_equal(s, np.eye(n)) +# assert_almost_equal(error, 0.0) + + +# def test_psdp_opt_identity(n=np.random.randint(50, 100)): +# r"""Test OptPSDP with identity matrix.""" +# a = np.eye(n) +# b = np.eye(n) +# res = psdp_opt(a=a, b=b) +# s, error = res["s"], res["error"] +# assert_almost_equal(s, np.eye(n)) +# assert_almost_equal(error, 0.0) + + +# def test_psdp_woodgate_diagonal(): +# r"""Test Woodgate's algorithm for PSDP with diagonal matrix.""" +# a = np.diag([1, 2, 3, 4]) +# b = np.eye(4) +# res = psdp_woodgate(a=a, b=b) +# s, error = res["s"], res["error"] +# actual_result = np.diag([0.99999, 0.5, 0.33333, 0.25]) +# assert_almost_equal(s, actual_result, decimal=5) +# assert_almost_equal(error, 0.0, decimal=3) + + +# def test_psdp_peng_diagonal(): +# r"""Test Peng's algorithm for PSDP with diagonal matrix.""" +# a = np.diag([1, 2, 3, 4]) +# b = np.eye(4) +# res = psdp_peng(a=a, b=b) +# s, error = res["s"], res["error"] +# actual_result = np.diag([0.99999, 0.5, 0.33333, 0.25]) +# assert_almost_equal(s, actual_result, decimal=5) +# assert_almost_equal(error, 0.0, decimal=3) + + +# def test_psdp_opt_diagonal(): +# r"""Test OptPSDP with diagonal matrix.""" +# a = np.diag([1, 2, 3, 4]) +# b = np.eye(4) +# res = psdp_opt(a=a, b=b) +# s, error = res["s"], res["error"] +# actual_result = np.diag([1, 0.5, 0.33333, 0.25]) +# assert_almost_equal(s, actual_result, decimal=1) +# assert_almost_equal(error, 0.0, decimal=3) + + +# def test_psdp_woodgate_generic_square(): +# r"""Test Woodgate's algorithm for PSDP with 2 generic square matrices.""" +# a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) +# b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) +# res = psdp_woodgate(a=a, b=b) +# s, error = res["s"], res["error"] +# actual_result = np.array( +# [ +# [0.22351489, -0.11059539, 0.24342428], +# [-0.11059539, 0.05472271, -0.12044658], +# [0.24342428, -0.12044658, 0.26510708], +# ] +# ) +# assert_almost_equal(s, actual_result) +# assert_almost_equal(error, 31.371190566800497, decimal=3) + + +# def test_psdp_peng_generic_square(): +# r"""Test Peng's algorithm for PSDP with 2 generic square matrices.""" +# a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) +# b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) +# res = psdp_peng(a=a, b=b) +# s, error = res["s"], res["error"] +# actual_result = np.array( +# [ +# [0.1440107, -0.05853613, 0.00939016], +# [-0.05853613, 0.02379322, -0.00381683], +# [0.00939016, -0.00381683, 0.00061228], +# ] +# ) +# assert_almost_equal(s, actual_result) +# assert_almost_equal(error, 33.43617791613022, decimal=3) + + +# def test_psdp_opt_generic_square(): +# r"""Test OptPSDP with 2 generic square matrices.""" +# a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) +# b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) +# res = psdp_opt(a=a, b=b) +# s, error = res["s"], res["error"] +# actual_result = np.array( +# [ +# [0.22423989, -0.11129353, 0.24612947], +# [-0.11129353, 0.0552366, -0.12215765], +# [0.24612947, -0.12215765, 0.27015584], +# ] +# ) +# assert_almost_equal(s, actual_result, decimal=3) +# assert_almost_equal(error, 31.371443352940343, decimal=3) + + +# def test_psdp_woodgate_generic_non_square(): +# r"""Test Woodgate's algorithm for PSDP with 2 generic non-square matrices.""" +# a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) +# b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) +# res = psdp_woodgate(a=a, b=b) +# s, error = res["s"], res["error"] +# actual_result = np.array( +# [ +# [2.57997197, 1.11007896, -1.08770156], +# [1.11007896, 1.68429863, 0.12829214], +# [-1.08770156, 0.12829214, 0.75328052], +# ] +# ) +# assert_almost_equal(s, actual_result, decimal=5) +# assert_almost_equal(error, 32.200295757989856, decimal=3) + + +# def test_psdp_peng_generic_non_square(): +# r"""Test Peng's algorithm for PSDP with 2 generic non-square matrices.""" +# a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) +# b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) +# res = psdp_peng(a=a, b=b) +# s, error = res["s"], res["error"] +# actual_result = np.array( +# [ +# [2.58773004, 1.10512076, -1.07911235], +# [1.10512076, 1.65881535, 0.14189328], +# [-1.07911235, 0.14189328, 0.75610083], +# ] +# ) +# assert_almost_equal(s, actual_result, decimal=5) +# assert_almost_equal(error, 32.21671819685297) + + +# def test_psdp_opt_generic_non_square(): +# r"""Test OptPSDP with 2 generic non-square matrices.""" +# a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) +# b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) +# res = psdp_opt(a=a, b=b) +# error = res["error"] +# # Keeping error assertion to be less than a threshold value +# # rather than "almost equal" to a given value because increasing +# # the number of iterations might reduce the error obtained. +# assert error <= 32.2 + + +# def test_psdp_woodgate_non_full_rank(): +# r"""Test Woodgate's algorithm for PSDP for full rank matrix.""" +# a = np.array( +# [ +# [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], +# [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], +# [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], +# [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], +# [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], +# [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], +# ] +# ) +# b = np.array( +# [ +# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], +# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], +# [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], +# [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], +# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], +# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], +# ] +# ) +# res = psdp_woodgate(a=a, b=b) +# s, error = res["s"], res["error"] +# actual_result = np.array( +# [ +# [5.41919155, 1.62689999, -0.31097211, 3.87095011, 5.4023016, 1.6426171], +# [1.62689999, 9.65181183, -3.52330026, 2.48010767, 1.64006384, 9.61898593], +# [-0.31097211, -3.52330026, 2.71903863, 0.02576251, -0.30656676, -3.5354399], +# [3.87095011, 2.48010767, 0.02576251, 5.97846041, 3.85902148, 2.48086299], +# [5.4023016, 1.64006384, -0.30656676, 3.85902148, 5.40096355, 1.62207837], +# [1.6426171, 9.61898593, -3.5354399, 2.48086299, 1.62207837, 9.65931602], +# ] +# ) +# assert_almost_equal(s, actual_result, decimal=2) +# assert_almost_equal(error, 0.0, decimal=2) + + +# def test_psdp_peng_non_full_rank(): +# r"""Test Peng's algorithm for PSDP for full rank matrix.""" +# a = np.array( +# [ +# [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], +# [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], +# [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], +# [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], +# [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], +# [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], +# ] +# ) +# b = np.array( +# [ +# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], +# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], +# [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], +# [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], +# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], +# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], +# ] +# ) +# res = psdp_peng(a=a, b=b) +# s, error = res["s"], res["error"] +# actual_result = np.array( +# [ +# [5.40904359, 1.63342796, -0.30678904, 3.87229001, 5.40837406, 1.63361756], +# [1.63342796, 9.63675505, -3.53016762, 2.47911674, 1.63339076, 9.63661932], +# [ +# -0.30678904, +# -3.53016762, +# 2.71130391, +# 0.02463814, +# -0.30688059, +# -3.53026462, +# ], +# [3.87229001, 2.47911674, 0.02463814, 5.96975888, 3.87182355, 2.47929281], +# [5.40837406, 1.63339076, -0.30688059, 3.87182355, 5.40770783, 1.6335514], +# [1.63361756, 9.63661932, -3.53026462, 2.47929281, 1.6335514, 9.63674522], +# ] +# ) +# assert_almost_equal(s, actual_result, decimal=2) +# assert_almost_equal(error, 0.0, decimal=2) + + +# def test_psdp_opt_non_full_rank(): +# r"""Test OptPSDP when the to be transformed matrix doesn't have full rank.""" +# a = np.array( +# [ +# [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], +# [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], +# [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], +# [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], +# [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], +# [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], +# ] +# ) +# b = np.array( +# [ +# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], +# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], +# [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], +# [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], +# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], +# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], +# ] +# ) +# res = psdp_opt(a=a, b=b) +# error = res["error"] +# assert_almost_equal(error, 0.0, decimal=2) From 4ca6379664b1df289fcf6ec5ccb6cb9cee492315 Mon Sep 17 00:00:00 2001 From: abhishekmittal15 <55945158+abhishekmittal15@users.noreply.github.com> Date: Mon, 7 Nov 2022 08:11:26 +0530 Subject: [PATCH 02/13] Cleaned the code a bit --- procrustes/test/test_psdp.py | 546 +++++++++++++++++------------------ 1 file changed, 273 insertions(+), 273 deletions(-) diff --git a/procrustes/test/test_psdp.py b/procrustes/test/test_psdp.py index 9862054..dc41634 100644 --- a/procrustes/test/test_psdp.py +++ b/procrustes/test/test_psdp.py @@ -27,7 +27,7 @@ from procrustes.psdp import psdp_opt, psdp_peng, psdp_woodgate, psdp_projgrad def test_psdp_projgrad_identity(n=np.random.randint(50, 100)): - r"""Test Woodgate's algorithm for PSDP with identity matrix.""" + r"""Test Projected Gradient's algorithm for PSDP with identity matrix.""" a = np.eye(n) b = np.eye(n) res = psdp_projgrad(a=a, b=b) @@ -37,7 +37,7 @@ def test_psdp_projgrad_identity(n=np.random.randint(50, 100)): def test_psdp_projgrad_diagonal(): - r"""Test Woodgate's algorithm for PSDP with diagonal matrix.""" + r"""Test Projected Gradient's algorithm for PSDP with diagonal matrix.""" a = np.diag([1, 2, 3, 4]) b = np.eye(4) res = psdp_projgrad(a=a, b=b) @@ -48,7 +48,7 @@ def test_psdp_projgrad_diagonal(): def test_psdp_projgrad_generic_square(): - r"""Test Woodgate's algorithm for PSDP with 2 generic square matrices.""" + r"""Test Projected Gradient's algorithm for PSDP with 2 generic square matrices.""" a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) res = psdp_projgrad(a=a, b=b, max_iterations= 10000) @@ -65,7 +65,7 @@ def test_psdp_projgrad_generic_square(): def test_psdp_projgrad_generic_non_square(): - r"""Test Woodgate's algorithm for PSDP with 2 generic non-square matrices.""" + r"""Test Projected Grdient's algorithm for PSDP with 2 generic non-square matrices.""" a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) res = psdp_projgrad(a=a, b=b, max_iterations=10000) @@ -80,7 +80,7 @@ def test_psdp_projgrad_generic_non_square(): def test_psdp_projgrad_non_full_rank(): - r"""Test Woodgate's algorithm for PSDP for full rank matrix.""" + r"""Test Projected Gradient's algorithm for PSDP for full rank matrix.""" a = np.array( [ [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], @@ -115,271 +115,271 @@ def test_psdp_projgrad_non_full_rank(): assert_almost_equal(error, 0.0, decimal=5) -# def test_psdp_woodgate_identity(n=np.random.randint(50, 100)): -# r"""Test Woodgate's algorithm for PSDP with identity matrix.""" -# a = np.eye(n) -# b = np.eye(n) -# res = psdp_woodgate(a=a, b=b) -# s, error = res["s"], res["error"] -# assert_almost_equal(s, np.eye(n)) -# assert_almost_equal(error, 0.0) - - -# def test_psdp_peng_identity(n=np.random.randint(50, 100)): -# r"""Test Peng's algorithm for PSDP with identity matrix.""" -# a = np.eye(n) -# b = np.eye(n) -# res = psdp_peng(a=a, b=b) -# s, error = res["s"], res["error"] -# assert_almost_equal(s, np.eye(n)) -# assert_almost_equal(error, 0.0) - - -# def test_psdp_opt_identity(n=np.random.randint(50, 100)): -# r"""Test OptPSDP with identity matrix.""" -# a = np.eye(n) -# b = np.eye(n) -# res = psdp_opt(a=a, b=b) -# s, error = res["s"], res["error"] -# assert_almost_equal(s, np.eye(n)) -# assert_almost_equal(error, 0.0) - - -# def test_psdp_woodgate_diagonal(): -# r"""Test Woodgate's algorithm for PSDP with diagonal matrix.""" -# a = np.diag([1, 2, 3, 4]) -# b = np.eye(4) -# res = psdp_woodgate(a=a, b=b) -# s, error = res["s"], res["error"] -# actual_result = np.diag([0.99999, 0.5, 0.33333, 0.25]) -# assert_almost_equal(s, actual_result, decimal=5) -# assert_almost_equal(error, 0.0, decimal=3) - - -# def test_psdp_peng_diagonal(): -# r"""Test Peng's algorithm for PSDP with diagonal matrix.""" -# a = np.diag([1, 2, 3, 4]) -# b = np.eye(4) -# res = psdp_peng(a=a, b=b) -# s, error = res["s"], res["error"] -# actual_result = np.diag([0.99999, 0.5, 0.33333, 0.25]) -# assert_almost_equal(s, actual_result, decimal=5) -# assert_almost_equal(error, 0.0, decimal=3) - - -# def test_psdp_opt_diagonal(): -# r"""Test OptPSDP with diagonal matrix.""" -# a = np.diag([1, 2, 3, 4]) -# b = np.eye(4) -# res = psdp_opt(a=a, b=b) -# s, error = res["s"], res["error"] -# actual_result = np.diag([1, 0.5, 0.33333, 0.25]) -# assert_almost_equal(s, actual_result, decimal=1) -# assert_almost_equal(error, 0.0, decimal=3) - - -# def test_psdp_woodgate_generic_square(): -# r"""Test Woodgate's algorithm for PSDP with 2 generic square matrices.""" -# a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) -# b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) -# res = psdp_woodgate(a=a, b=b) -# s, error = res["s"], res["error"] -# actual_result = np.array( -# [ -# [0.22351489, -0.11059539, 0.24342428], -# [-0.11059539, 0.05472271, -0.12044658], -# [0.24342428, -0.12044658, 0.26510708], -# ] -# ) -# assert_almost_equal(s, actual_result) -# assert_almost_equal(error, 31.371190566800497, decimal=3) - - -# def test_psdp_peng_generic_square(): -# r"""Test Peng's algorithm for PSDP with 2 generic square matrices.""" -# a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) -# b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) -# res = psdp_peng(a=a, b=b) -# s, error = res["s"], res["error"] -# actual_result = np.array( -# [ -# [0.1440107, -0.05853613, 0.00939016], -# [-0.05853613, 0.02379322, -0.00381683], -# [0.00939016, -0.00381683, 0.00061228], -# ] -# ) -# assert_almost_equal(s, actual_result) -# assert_almost_equal(error, 33.43617791613022, decimal=3) - - -# def test_psdp_opt_generic_square(): -# r"""Test OptPSDP with 2 generic square matrices.""" -# a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) -# b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) -# res = psdp_opt(a=a, b=b) -# s, error = res["s"], res["error"] -# actual_result = np.array( -# [ -# [0.22423989, -0.11129353, 0.24612947], -# [-0.11129353, 0.0552366, -0.12215765], -# [0.24612947, -0.12215765, 0.27015584], -# ] -# ) -# assert_almost_equal(s, actual_result, decimal=3) -# assert_almost_equal(error, 31.371443352940343, decimal=3) - - -# def test_psdp_woodgate_generic_non_square(): -# r"""Test Woodgate's algorithm for PSDP with 2 generic non-square matrices.""" -# a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) -# b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) -# res = psdp_woodgate(a=a, b=b) -# s, error = res["s"], res["error"] -# actual_result = np.array( -# [ -# [2.57997197, 1.11007896, -1.08770156], -# [1.11007896, 1.68429863, 0.12829214], -# [-1.08770156, 0.12829214, 0.75328052], -# ] -# ) -# assert_almost_equal(s, actual_result, decimal=5) -# assert_almost_equal(error, 32.200295757989856, decimal=3) - - -# def test_psdp_peng_generic_non_square(): -# r"""Test Peng's algorithm for PSDP with 2 generic non-square matrices.""" -# a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) -# b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) -# res = psdp_peng(a=a, b=b) -# s, error = res["s"], res["error"] -# actual_result = np.array( -# [ -# [2.58773004, 1.10512076, -1.07911235], -# [1.10512076, 1.65881535, 0.14189328], -# [-1.07911235, 0.14189328, 0.75610083], -# ] -# ) -# assert_almost_equal(s, actual_result, decimal=5) -# assert_almost_equal(error, 32.21671819685297) - - -# def test_psdp_opt_generic_non_square(): -# r"""Test OptPSDP with 2 generic non-square matrices.""" -# a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) -# b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) -# res = psdp_opt(a=a, b=b) -# error = res["error"] -# # Keeping error assertion to be less than a threshold value -# # rather than "almost equal" to a given value because increasing -# # the number of iterations might reduce the error obtained. -# assert error <= 32.2 - - -# def test_psdp_woodgate_non_full_rank(): -# r"""Test Woodgate's algorithm for PSDP for full rank matrix.""" -# a = np.array( -# [ -# [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], -# [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], -# [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], -# [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], -# [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], -# [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], -# ] -# ) -# b = np.array( -# [ -# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], -# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], -# [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], -# [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], -# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], -# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], -# ] -# ) -# res = psdp_woodgate(a=a, b=b) -# s, error = res["s"], res["error"] -# actual_result = np.array( -# [ -# [5.41919155, 1.62689999, -0.31097211, 3.87095011, 5.4023016, 1.6426171], -# [1.62689999, 9.65181183, -3.52330026, 2.48010767, 1.64006384, 9.61898593], -# [-0.31097211, -3.52330026, 2.71903863, 0.02576251, -0.30656676, -3.5354399], -# [3.87095011, 2.48010767, 0.02576251, 5.97846041, 3.85902148, 2.48086299], -# [5.4023016, 1.64006384, -0.30656676, 3.85902148, 5.40096355, 1.62207837], -# [1.6426171, 9.61898593, -3.5354399, 2.48086299, 1.62207837, 9.65931602], -# ] -# ) -# assert_almost_equal(s, actual_result, decimal=2) -# assert_almost_equal(error, 0.0, decimal=2) - - -# def test_psdp_peng_non_full_rank(): -# r"""Test Peng's algorithm for PSDP for full rank matrix.""" -# a = np.array( -# [ -# [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], -# [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], -# [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], -# [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], -# [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], -# [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], -# ] -# ) -# b = np.array( -# [ -# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], -# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], -# [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], -# [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], -# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], -# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], -# ] -# ) -# res = psdp_peng(a=a, b=b) -# s, error = res["s"], res["error"] -# actual_result = np.array( -# [ -# [5.40904359, 1.63342796, -0.30678904, 3.87229001, 5.40837406, 1.63361756], -# [1.63342796, 9.63675505, -3.53016762, 2.47911674, 1.63339076, 9.63661932], -# [ -# -0.30678904, -# -3.53016762, -# 2.71130391, -# 0.02463814, -# -0.30688059, -# -3.53026462, -# ], -# [3.87229001, 2.47911674, 0.02463814, 5.96975888, 3.87182355, 2.47929281], -# [5.40837406, 1.63339076, -0.30688059, 3.87182355, 5.40770783, 1.6335514], -# [1.63361756, 9.63661932, -3.53026462, 2.47929281, 1.6335514, 9.63674522], -# ] -# ) -# assert_almost_equal(s, actual_result, decimal=2) -# assert_almost_equal(error, 0.0, decimal=2) - - -# def test_psdp_opt_non_full_rank(): -# r"""Test OptPSDP when the to be transformed matrix doesn't have full rank.""" -# a = np.array( -# [ -# [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], -# [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], -# [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], -# [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], -# [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], -# [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], -# ] -# ) -# b = np.array( -# [ -# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], -# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], -# [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], -# [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], -# [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], -# [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], -# ] -# ) -# res = psdp_opt(a=a, b=b) -# error = res["error"] -# assert_almost_equal(error, 0.0, decimal=2) +def test_psdp_woodgate_identity(n=np.random.randint(50, 100)): + r"""Test Woodgate's algorithm for PSDP with identity matrix.""" + a = np.eye(n) + b = np.eye(n) + res = psdp_woodgate(a=a, b=b) + s, error = res["s"], res["error"] + assert_almost_equal(s, np.eye(n)) + assert_almost_equal(error, 0.0) + + +def test_psdp_peng_identity(n=np.random.randint(50, 100)): + r"""Test Peng's algorithm for PSDP with identity matrix.""" + a = np.eye(n) + b = np.eye(n) + res = psdp_peng(a=a, b=b) + s, error = res["s"], res["error"] + assert_almost_equal(s, np.eye(n)) + assert_almost_equal(error, 0.0) + + +def test_psdp_opt_identity(n=np.random.randint(50, 100)): + r"""Test OptPSDP with identity matrix.""" + a = np.eye(n) + b = np.eye(n) + res = psdp_opt(a=a, b=b) + s, error = res["s"], res["error"] + assert_almost_equal(s, np.eye(n)) + assert_almost_equal(error, 0.0) + + +def test_psdp_woodgate_diagonal(): + r"""Test Woodgate's algorithm for PSDP with diagonal matrix.""" + a = np.diag([1, 2, 3, 4]) + b = np.eye(4) + res = psdp_woodgate(a=a, b=b) + s, error = res["s"], res["error"] + actual_result = np.diag([0.99999, 0.5, 0.33333, 0.25]) + assert_almost_equal(s, actual_result, decimal=5) + assert_almost_equal(error, 0.0, decimal=3) + + +def test_psdp_peng_diagonal(): + r"""Test Peng's algorithm for PSDP with diagonal matrix.""" + a = np.diag([1, 2, 3, 4]) + b = np.eye(4) + res = psdp_peng(a=a, b=b) + s, error = res["s"], res["error"] + actual_result = np.diag([0.99999, 0.5, 0.33333, 0.25]) + assert_almost_equal(s, actual_result, decimal=5) + assert_almost_equal(error, 0.0, decimal=3) + + +def test_psdp_opt_diagonal(): + r"""Test OptPSDP with diagonal matrix.""" + a = np.diag([1, 2, 3, 4]) + b = np.eye(4) + res = psdp_opt(a=a, b=b) + s, error = res["s"], res["error"] + actual_result = np.diag([1, 0.5, 0.33333, 0.25]) + assert_almost_equal(s, actual_result, decimal=1) + assert_almost_equal(error, 0.0, decimal=3) + + +def test_psdp_woodgate_generic_square(): + r"""Test Woodgate's algorithm for PSDP with 2 generic square matrices.""" + a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) + b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) + res = psdp_woodgate(a=a, b=b) + s, error = res["s"], res["error"] + actual_result = np.array( + [ + [0.22351489, -0.11059539, 0.24342428], + [-0.11059539, 0.05472271, -0.12044658], + [0.24342428, -0.12044658, 0.26510708], + ] + ) + assert_almost_equal(s, actual_result) + assert_almost_equal(error, 31.371190566800497, decimal=3) + + +def test_psdp_peng_generic_square(): + r"""Test Peng's algorithm for PSDP with 2 generic square matrices.""" + a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) + b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) + res = psdp_peng(a=a, b=b) + s, error = res["s"], res["error"] + actual_result = np.array( + [ + [0.1440107, -0.05853613, 0.00939016], + [-0.05853613, 0.02379322, -0.00381683], + [0.00939016, -0.00381683, 0.00061228], + ] + ) + assert_almost_equal(s, actual_result) + assert_almost_equal(error, 33.43617791613022, decimal=3) + + +def test_psdp_opt_generic_square(): + r"""Test OptPSDP with 2 generic square matrices.""" + a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) + b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) + res = psdp_opt(a=a, b=b) + s, error = res["s"], res["error"] + actual_result = np.array( + [ + [0.22423989, -0.11129353, 0.24612947], + [-0.11129353, 0.0552366, -0.12215765], + [0.24612947, -0.12215765, 0.27015584], + ] + ) + assert_almost_equal(s, actual_result, decimal=3) + assert_almost_equal(error, 31.371443352940343, decimal=3) + + +def test_psdp_woodgate_generic_non_square(): + r"""Test Woodgate's algorithm for PSDP with 2 generic non-square matrices.""" + a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) + b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) + res = psdp_woodgate(a=a, b=b) + s, error = res["s"], res["error"] + actual_result = np.array( + [ + [2.57997197, 1.11007896, -1.08770156], + [1.11007896, 1.68429863, 0.12829214], + [-1.08770156, 0.12829214, 0.75328052], + ] + ) + assert_almost_equal(s, actual_result, decimal=5) + assert_almost_equal(error, 32.200295757989856, decimal=3) + + +def test_psdp_peng_generic_non_square(): + r"""Test Peng's algorithm for PSDP with 2 generic non-square matrices.""" + a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) + b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) + res = psdp_peng(a=a, b=b) + s, error = res["s"], res["error"] + actual_result = np.array( + [ + [2.58773004, 1.10512076, -1.07911235], + [1.10512076, 1.65881535, 0.14189328], + [-1.07911235, 0.14189328, 0.75610083], + ] + ) + assert_almost_equal(s, actual_result, decimal=5) + assert_almost_equal(error, 32.21671819685297) + + +def test_psdp_opt_generic_non_square(): + r"""Test OptPSDP with 2 generic non-square matrices.""" + a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) + b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) + res = psdp_opt(a=a, b=b) + error = res["error"] + # Keeping error assertion to be less than a threshold value + # rather than "almost equal" to a given value because increasing + # the number of iterations might reduce the error obtained. + assert error <= 32.2 + + +def test_psdp_woodgate_non_full_rank(): + r"""Test Woodgate's algorithm for PSDP for full rank matrix.""" + a = np.array( + [ + [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], + [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], + [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], + [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], + [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], + [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], + ] + ) + b = np.array( + [ + [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], + [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], + [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], + [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], + [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], + [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], + ] + ) + res = psdp_woodgate(a=a, b=b) + s, error = res["s"], res["error"] + actual_result = np.array( + [ + [5.41919155, 1.62689999, -0.31097211, 3.87095011, 5.4023016, 1.6426171], + [1.62689999, 9.65181183, -3.52330026, 2.48010767, 1.64006384, 9.61898593], + [-0.31097211, -3.52330026, 2.71903863, 0.02576251, -0.30656676, -3.5354399], + [3.87095011, 2.48010767, 0.02576251, 5.97846041, 3.85902148, 2.48086299], + [5.4023016, 1.64006384, -0.30656676, 3.85902148, 5.40096355, 1.62207837], + [1.6426171, 9.61898593, -3.5354399, 2.48086299, 1.62207837, 9.65931602], + ] + ) + assert_almost_equal(s, actual_result, decimal=2) + assert_almost_equal(error, 0.0, decimal=2) + + +def test_psdp_peng_non_full_rank(): + r"""Test Peng's algorithm for PSDP for full rank matrix.""" + a = np.array( + [ + [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], + [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], + [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], + [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], + [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], + [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], + ] + ) + b = np.array( + [ + [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], + [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], + [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], + [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], + [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], + [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], + ] + ) + res = psdp_peng(a=a, b=b) + s, error = res["s"], res["error"] + actual_result = np.array( + [ + [5.40904359, 1.63342796, -0.30678904, 3.87229001, 5.40837406, 1.63361756], + [1.63342796, 9.63675505, -3.53016762, 2.47911674, 1.63339076, 9.63661932], + [ + -0.30678904, + -3.53016762, + 2.71130391, + 0.02463814, + -0.30688059, + -3.53026462, + ], + [3.87229001, 2.47911674, 0.02463814, 5.96975888, 3.87182355, 2.47929281], + [5.40837406, 1.63339076, -0.30688059, 3.87182355, 5.40770783, 1.6335514], + [1.63361756, 9.63661932, -3.53026462, 2.47929281, 1.6335514, 9.63674522], + ] + ) + assert_almost_equal(s, actual_result, decimal=2) + assert_almost_equal(error, 0.0, decimal=2) + + +def test_psdp_opt_non_full_rank(): + r"""Test OptPSDP when the to be transformed matrix doesn't have full rank.""" + a = np.array( + [ + [0.3452, -0.9897, 0.8082, -0.1739, -1.4692, -0.2531, 1.0339], + [0.2472, -1.4457, -0.6672, -0.5790, 1.2516, -0.8184, -0.4790], + [-1.3567, -0.9348, 0.7573, 1.7002, -0.9627, -0.5655, 2.5222], + [1.6639, 0.6111, -0.1858, 0.0485, 0.1136, 0.1372, -0.0149], + [-0.1400, -0.3303, -0.2965, 0.0218, 0.0565, -0.1907, -0.2544], + [-1.2662, 0.1905, 0.3302, -0.4041, 1.1479, -1.4716, 0.0857], + ] + ) + b = np.array( + [ + [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], + [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], + [-0.1030, 2.3164, 3.0813, 8.1280, -10.6447, 6.6903, 7.9874], + [8.1678, -4.5977, 0.0559, -2.6948, 1.1326, -6.5904, 2.0167], + [6.3043, -6.5364, 1.2659, -2.7625, -2.9861, -5.4362, 2.7422], + [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], + ] + ) + res = psdp_opt(a=a, b=b) + error = res["error"] + assert_almost_equal(error, 0.0, decimal=2) From 350037951f50c50d0cfe77589a1a63b09686f5a6 Mon Sep 17 00:00:00 2001 From: abhishekmittal15 <55945158+abhishekmittal15@users.noreply.github.com> Date: Mon, 7 Nov 2022 08:20:36 +0530 Subject: [PATCH 03/13] Added psdp_projgrad to the __all__ list --- procrustes/psdp.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/procrustes/psdp.py b/procrustes/psdp.py index 2cd7c3f..014ee05 100644 --- a/procrustes/psdp.py +++ b/procrustes/psdp.py @@ -34,7 +34,8 @@ __all__ = [ "psdp_woodgate", "psdp_peng", - "psdp_opt" + "psdp_opt", + "psdp_projgrad" ] def init_procustes_projgrad( From dc728c58d861e5e566acaa9ad28092b3433a7c1a Mon Sep 17 00:00:00 2001 From: abhishekmittal15 <55945158+abhishekmittal15@users.noreply.github.com> Date: Mon, 7 Nov 2022 08:34:47 +0530 Subject: [PATCH 04/13] Moved the initialisation function of the proj grad algorithm to the end of the file --- procrustes/psdp.py | 65 ++++++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/procrustes/psdp.py b/procrustes/psdp.py index 014ee05..2731f6e 100644 --- a/procrustes/psdp.py +++ b/procrustes/psdp.py @@ -38,35 +38,6 @@ "psdp_projgrad" ] -def init_procustes_projgrad( - A: np.ndarray, - B: np.ndarray, - choice: int = 0 -) -> np.ndarray: - r""" - Returns the starting point of the transformation S of the projection gradient algorithm - """ - n, m = A.shape - - # We will find two choices S1 and S2 and return the one that gives a lower error in the minimization function - - # Finding S1 - S1T = np.linalg.lstsq(A.conj().T, B.conj().T, rcond = 1)[0] - S1 = S1T.conj().T - S1 = _psd_proj(S1) - e1 = np.linalg.norm(S1@A-B, 'fro') - - # Finding S2 - eps = 1e-6 - S2 = np.zeros((n, n)) - for i in range(n): - S2[i, i] = max(0, (A[i,:]@B[i,:].conj().T) / (np.linalg.norm(A[i,:])**2 + eps)) # Adding eps to avoid 0 division - e2 = np.linalg.norm(S2@A-B, "fro") - - if e1 < e2 or choice == 1: - return S1 - elif e2 < e1 or choice == 2: - return S2 def psdp_projgrad( a: np.ndarray, @@ -112,7 +83,7 @@ def psdp_projgrad( n, m = A.shape - S = init_procustes_projgrad(A, B) + S = _init_procustes_projgrad(A, B) # Performing some precomputations AAT = A@A.conj().T @@ -922,4 +893,36 @@ def _psd_proj(arr: np.ndarray, do_cholesky: bool = True) -> np.ndarray: _ = np.linalg.cholesky(arr) return arr except np.linalg.LinAlgError: - return _make_positive(arr) \ No newline at end of file + return _make_positive(arr) + + + +def _init_procustes_projgrad( + A: np.ndarray, + B: np.ndarray, + choice: int = 0 +) -> np.ndarray: + r""" + Returns the starting point of the transformation S of the projection gradient algorithm + """ + n, m = A.shape + + # We will find two choices S1 and S2 and return the one that gives a lower error in the minimization function + + # Finding S1 + S1T = np.linalg.lstsq(A.conj().T, B.conj().T, rcond = 1)[0] + S1 = S1T.conj().T + S1 = _psd_proj(S1) + e1 = np.linalg.norm(S1@A-B, 'fro') + + # Finding S2 + eps = 1e-6 + S2 = np.zeros((n, n)) + for i in range(n): + S2[i, i] = max(0, (A[i,:]@B[i,:].conj().T) / (np.linalg.norm(A[i,:])**2 + eps)) # Adding eps to avoid 0 division + e2 = np.linalg.norm(S2@A-B, "fro") + + if e1 < e2 or choice == 1: + return S1 + elif e2 < e1 or choice == 2: + return S2 \ No newline at end of file From 288e6786dec244ab906e7fe9c9c5b513ad726505 Mon Sep 17 00:00:00 2001 From: abhishekmittal15 <55945158+abhishekmittal15@users.noreply.github.com> Date: Thu, 17 Nov 2022 15:30:47 +0530 Subject: [PATCH 05/13] Refactored projgrad Improved readability of psdp_projgrad and added explicit stop conditions for the algorithm --- procrustes/psdp.py | 115 +++++++++++++++++++++++++++++++---- procrustes/test/test_psdp.py | 6 +- 2 files changed, 107 insertions(+), 14 deletions(-) diff --git a/procrustes/psdp.py b/procrustes/psdp.py index 2731f6e..6d127d1 100644 --- a/procrustes/psdp.py +++ b/procrustes/psdp.py @@ -35,16 +35,14 @@ "psdp_woodgate", "psdp_peng", "psdp_opt", - "psdp_projgrad" + "psdp_projgrad", ] def psdp_projgrad( a: np.ndarray, b: np.ndarray, - max_iterations: int = 1000, - delta: float = 1e-6, - tol: float = 1e-6, + options_dict: Dict = {}, pad: bool = True, translate: bool = False, scale: bool = False, @@ -54,7 +52,9 @@ def psdp_projgrad( weight: Optional[np.ndarray] = None, ) -> ProcrustesResult: r""" - We want to minimize the function ||SAT-B||_F where A and B are n*m matrices and S is n*n transformation we want to find that minimizes the above function and T is just an identity matrix for now. We are only considering left transformation. + Projected gradient method for the positive semi-definite Procrustes problem. + + We want to minimize the function F = ||SAT-B||_F where A and B are n*m matrices and S is n*n transformation we want to find that minimizes the above function and T is just an identity matrix for now. We are only considering left transformation. The paper minimizes the following function ||AX-B||_F, so the mapping between the problem statements are Term used in paper Term used in our implementation @@ -62,6 +62,72 @@ def psdp_projgrad( A S X A B B + + Parameters + ---------- + a : np.ndarray + The matrix :math:`\mathbf{A}` which is to be transformed. + + b : np.ndarray + The target matrix :math:`\mathbf{B}`. + + options : Dict, optional + Dictionary with fields that serve as parameters for the algorithm. + + max_iter : int + Maximum number of iterations. + Default value is 10000. + + s_tol : float + Stop control for ||S_i - S_{i-1}||_F / ||S_1 - S_0||_F + Defaut value is 1e-5. Should be kept below 1 + + f_tol : float + Stop control for ||F_i - F_{i-1}||_F/(1+||F_{i-1}||_F). + Default value is 1e-12. Should be kept way less than 1 + + pad : bool, optional + Add zero rows (at the bottom) and/or columns (to the right-hand side) of matrices + :math:`\mathbf{A}` and :math:`\mathbf{B}` so that they have the same shape. + + translate : bool, optional + If True, both arrays are centered at origin (columns of the arrays will have mean zero). + + scale : bool, optional + If True, both arrays are normalized with respect to the Frobenius norm, i.e., + :math:`\text{Tr}\left[\mathbf{A}^\dagger\mathbf{A}\right] = 1` and + :math:`\text{Tr}\left[\mathbf{B}^\dagger\mathbf{B}\right] = 1`. + + unpad_col : bool, optional + If True, zero columns (with values less than 1.0e-8) on the right-hand side of the intial + :math:`\mathbf{A}` and :math:`\mathbf{B}` matrices are removed. + + unpad_row : bool, optional + If True, zero rows (with values less than 1.0e-8) at the bottom of the intial + :math:`\mathbf{A}` and :math:`\mathbf{B}` matrices are removed. + + check_finite : bool, optional + If True, convert the input to an array, checking for NaNs or Infs. + + weight : ndarray, optional + The 1D-array representing the weights of each row of :math:`\mathbf{A}`. This defines the + elements of the diagonal matrix :math:`\mathbf{W}` that is multiplied by :math:`\mathbf{A}` + matrix, i.e., :math:`\mathbf{A} \rightarrow \mathbf{WA}`. + + Returns + ------- + ProcrustesResult + The result of the Procrustes transformation. + + Notes + ----- + The Projected Gradient algorithm (on which this implementation is based) is defined well in p. 131 + of [1]_. + + References + ---------- + .. [1] Nicolas Gillis, Punit Sharma, "A semi-analytical approach for the positive semidefinite + Procrustesproblem, Linear Algebra and its Applications, Volume 540, 2018, Pages 112-137 """ A, B = setup_input_arrays( a, @@ -83,31 +149,58 @@ def psdp_projgrad( n, m = A.shape - S = _init_procustes_projgrad(A, B) + # Specifies the default parameters of the algorithm + options = { + # The maximum number of iterations the gradient descent algorithm must run for + "max_iter": 10000, + # The minimum ratio of ||S_i - S_{i - 1}||_F with ||S_1 - S_0||_F, below which algorithm terminates + "s_tol": 1e-5, + # The minimum ratio of ||F_i - F_{i - 1}||_F with 1 + ||F_{i - 1}||_F, below which algorithm terminates + "f_tol": 1e-12 + } + + options.update(options_dict) # Performing some precomputations AAT = A@A.conj().T x, _ = np.linalg.eig(AAT) max_eig = np.max(x) ** 2 # Not sure if these can be complex too, if they are complex, then need to take norm - min_eig = np.min(x) ** 2 # Same as above BAT = B@A.conj().T # Initialization of the algorithm i = 1 + err = np.zeros((options["max_iter"] + 1, 1)) + # S is the right transformation in our problem statement + S = _init_procustes_projgrad(A, B) + # F is the function whose norm we want to minimize, F = S@A - B + F = S @ A - B + # eps = ||S_i - S_{i - 1}||_F eps = 1 + # eps0 = ||S_1 - S_0||_F eps0 = 0 - err = np.zeros((max_iterations + 1, 1)) - while i <= max_iterations and eps >= delta*eps0: + # Algorithm is run until the max iterations + while i <= options["max_iter"]: S_old = S + F_old = F # Projected gradient step S = _psd_proj(S - (S@AAT - BAT)/max_eig) + F = S @ A - B + err[i] = np.linalg.norm(F, 'fro') + + # Stop conditions to check if the algorithm should be terminated + + # If the ||F_i - F_{i - 1}||_F / (1 + ||F_old||_F) is lesser than some tolerance level, then we terminate the algorithm, as there is not much improvement gain in the transformation S for the extra iterations we perform + if np.linalg.norm(F - F_old, 'fro') / (1 + np.linalg.norm(F_old, 'fro')) < options["f_tol"]: + break + + # If the ||S_i - S_{i - 1}||_F / ||S_1 - S_0||_F is lesser than some tolerance level, then we terminate the algorithm. TODO: to decide if this is useful or not. if i ==1: eps0 = np.linalg.norm(S-S_old, 'fro') eps = np.linalg.norm(S-S_old, 'fro') - err[i] = np.linalg.norm(S@A-B, 'fro') - if err[i] < tol: + if eps >= options["s_tol"] * eps0: break + i += 1 return ProcrustesResult( diff --git a/procrustes/test/test_psdp.py b/procrustes/test/test_psdp.py index dc41634..f959955 100644 --- a/procrustes/test/test_psdp.py +++ b/procrustes/test/test_psdp.py @@ -51,7 +51,7 @@ def test_psdp_projgrad_generic_square(): r"""Test Projected Gradient's algorithm for PSDP with 2 generic square matrices.""" a = np.array([[1, 6, 0], [4, 3, 0], [0, 0, -0.5]]) b = np.array([[1, 0, 0], [0, -2, 3], [0, 2, 4]]) - res = psdp_projgrad(a=a, b=b, max_iterations= 10000) + res = psdp_projgrad(a=a, b=b) s, error = res["s"], res["error"] actual_result = np.array( [ @@ -68,7 +68,7 @@ def test_psdp_projgrad_generic_non_square(): r"""Test Projected Grdient's algorithm for PSDP with 2 generic non-square matrices.""" a = np.array([[5, 1, 6, -1], [3, 2, 0, 2], [2, 4, 3, -3]]) b = np.array([[15, 1, 15 - 3, 2 + 5], [10, 5, 6, 3], [-3, 3, -3, -2 + 4]]) - res = psdp_projgrad(a=a, b=b, max_iterations=10000) + res = psdp_projgrad(a=a, b=b) s, error = res["s"], res["error"] actual_result = np.array([ [ 2.58262946, 1.10868691, -1.08661918], @@ -101,7 +101,7 @@ def test_psdp_projgrad_non_full_rank(): [-0.5694, -9.4371, -5.5455, -15.6041, 24.4958, -20.4567, -11.4576], ] ) - res = psdp_projgrad(a=a, b=b, max_iterations=10000) + res = psdp_projgrad(a=a, b=b) s, error = res["s"], res["error"] actual_result = np.array([ [ 5.40878932, 1.63338805, -0.30680274, 3.87229356, 5.40863988, 1.63366874], From 0d467ff162340e9471e4809b2314aca36d8bf179 Mon Sep 17 00:00:00 2001 From: abhishekmittal15 <55945158+abhishekmittal15@users.noreply.github.com> Date: Fri, 18 Nov 2022 15:37:38 +0530 Subject: [PATCH 06/13] Fixed the issue causing the tests to fail --- procrustes/psdp.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/procrustes/psdp.py b/procrustes/psdp.py index 6d127d1..60aa015 100644 --- a/procrustes/psdp.py +++ b/procrustes/psdp.py @@ -190,15 +190,15 @@ def psdp_projgrad( # Stop conditions to check if the algorithm should be terminated - # If the ||F_i - F_{i - 1}||_F / (1 + ||F_old||_F) is lesser than some tolerance level, then we terminate the algorithm, as there is not much improvement gain in the transformation S for the extra iterations we perform + # If the ||F_i - F_{i - 1}||_F / (1 + ||F_old||_F) is lesser than some tolerance level, then we terminate the algorithm, as there is not much improvement gain in the optimisation problem for the extra iterations we perform if np.linalg.norm(F - F_old, 'fro') / (1 + np.linalg.norm(F_old, 'fro')) < options["f_tol"]: break # If the ||S_i - S_{i - 1}||_F / ||S_1 - S_0||_F is lesser than some tolerance level, then we terminate the algorithm. TODO: to decide if this is useful or not. - if i ==1: + if i == 1: eps0 = np.linalg.norm(S-S_old, 'fro') eps = np.linalg.norm(S-S_old, 'fro') - if eps >= options["s_tol"] * eps0: + if eps < options["s_tol"] * eps0: break i += 1 From cb6141f0cbe3468c1075538c356cb4818e6a7a09 Mon Sep 17 00:00:00 2001 From: abhishekmittal15 <55945158+abhishekmittal15@users.noreply.github.com> Date: Fri, 18 Nov 2022 16:40:09 +0530 Subject: [PATCH 07/13] Fixed linting errors and warnings --- procrustes/psdp.py | 66 ++++++++++++++++++++++-------------- procrustes/test/test_psdp.py | 15 ++++---- 2 files changed, 48 insertions(+), 33 deletions(-) diff --git a/procrustes/psdp.py b/procrustes/psdp.py index 60aa015..2535167 100644 --- a/procrustes/psdp.py +++ b/procrustes/psdp.py @@ -42,7 +42,7 @@ def psdp_projgrad( a: np.ndarray, b: np.ndarray, - options_dict: Dict = {}, + options_dict: Dict = None, pad: bool = True, translate: bool = False, scale: bool = False, @@ -54,9 +54,12 @@ def psdp_projgrad( r""" Projected gradient method for the positive semi-definite Procrustes problem. - We want to minimize the function F = ||SAT-B||_F where A and B are n*m matrices and S is n*n transformation we want to find that minimizes the above function and T is just an identity matrix for now. We are only considering left transformation. + We want to minimize the function F = ||SAT-B||_F where A and B are n*m matrices and S is n*n + transformation we want to find that minimizes the above function and T is just an identity + matrix for now. We are only considering left transformation. - The paper minimizes the following function ||AX-B||_F, so the mapping between the problem statements are + The paper minimizes the following function ||AX-B||_F, so the mapping between the problem + statements are Term used in paper Term used in our implementation -------------------------------------------------------------- A S @@ -121,7 +124,8 @@ def psdp_projgrad( Notes ----- - The Projected Gradient algorithm (on which this implementation is based) is defined well in p. 131 + The Projected Gradient algorithm (on which this implementation is based) is defined well in + p. 131 of [1]_. References @@ -140,31 +144,36 @@ def psdp_projgrad( check_finite, weight, ) - + if A.shape != B.shape: raise ValueError( - f"Shape of A and B does not match: {a.shape} != {b.shape} " + f"Shape of A and B does not match: {A.shape} != {B.shape} " "Check pad, unpad_col, and unpad_row arguments." ) - n, m = A.shape + # A.shape = (n, m) + _, m = A.shape # Specifies the default parameters of the algorithm options = { # The maximum number of iterations the gradient descent algorithm must run for "max_iter": 10000, - # The minimum ratio of ||S_i - S_{i - 1}||_F with ||S_1 - S_0||_F, below which algorithm terminates + # The minimum ratio of ||S_i - S_{i - 1}||_F with ||S_1 - S_0||_F, below which algorithm + # terminates "s_tol": 1e-5, - # The minimum ratio of ||F_i - F_{i - 1}||_F with 1 + ||F_{i - 1}||_F, below which algorithm terminates + # The minimum ratio of ||F_i - F_{i - 1}||_F with 1 + ||F_{i - 1}||_F, below which + # algorithm terminates "f_tol": 1e-12 } - options.update(options_dict) - + if options_dict: + options.update(options_dict) + # Performing some precomputations AAT = A@A.conj().T x, _ = np.linalg.eig(AAT) - max_eig = np.max(x) ** 2 # Not sure if these can be complex too, if they are complex, then need to take norm + max_eig = np.max(x) ** 2 # Not sure if these can be complex too, if they are complex, then + # need to take norm BAT = B@A.conj().T # Initialization of the algorithm @@ -173,28 +182,31 @@ def psdp_projgrad( # S is the right transformation in our problem statement S = _init_procustes_projgrad(A, B) # F is the function whose norm we want to minimize, F = S@A - B - F = S @ A - B + F = S@A - B # eps = ||S_i - S_{i - 1}||_F - eps = 1 + eps = 1 # eps0 = ||S_1 - S_0||_F eps0 = 0 # Algorithm is run until the max iterations - while i <= options["max_iter"]: + while i <= options["max_iter"]: S_old = S F_old = F # Projected gradient step S = _psd_proj(S - (S@AAT - BAT)/max_eig) - F = S @ A - B + F = S @ A - B err[i] = np.linalg.norm(F, 'fro') - + # Stop conditions to check if the algorithm should be terminated - - # If the ||F_i - F_{i - 1}||_F / (1 + ||F_old||_F) is lesser than some tolerance level, then we terminate the algorithm, as there is not much improvement gain in the optimisation problem for the extra iterations we perform + + # If the ||F_i - F_{i - 1}||_F / (1 + ||F_old||_F) is lesser than some tolerance level, + # then we terminate the algorithm, as there is not much improvement gain in the + # optimisation problem for the extra iterations we perform if np.linalg.norm(F - F_old, 'fro') / (1 + np.linalg.norm(F_old, 'fro')) < options["f_tol"]: break - - # If the ||S_i - S_{i - 1}||_F / ||S_1 - S_0||_F is lesser than some tolerance level, then we terminate the algorithm. TODO: to decide if this is useful or not. + + # If the ||S_i - S_{i - 1}||_F / ||S_1 - S_0||_F is lesser than some tolerance level, then + # we terminate the algorithm. TODO: to decide if this is useful or not. if i == 1: eps0 = np.linalg.norm(S-S_old, 'fro') eps = np.linalg.norm(S-S_old, 'fro') @@ -211,6 +223,7 @@ def psdp_projgrad( s=S, ) + def psdp_opt( a: np.ndarray, b: np.ndarray, @@ -989,7 +1002,6 @@ def _psd_proj(arr: np.ndarray, do_cholesky: bool = True) -> np.ndarray: return _make_positive(arr) - def _init_procustes_projgrad( A: np.ndarray, B: np.ndarray, @@ -1000,10 +1012,11 @@ def _init_procustes_projgrad( """ n, m = A.shape - # We will find two choices S1 and S2 and return the one that gives a lower error in the minimization function + # We will find two choices S1 and S2 and return the one that gives a lower error in the + # minimization function # Finding S1 - S1T = np.linalg.lstsq(A.conj().T, B.conj().T, rcond = 1)[0] + S1T = np.linalg.lstsq(A.conj().T, B.conj().T, rcond=1)[0] S1 = S1T.conj().T S1 = _psd_proj(S1) e1 = np.linalg.norm(S1@A-B, 'fro') @@ -1012,10 +1025,11 @@ def _init_procustes_projgrad( eps = 1e-6 S2 = np.zeros((n, n)) for i in range(n): - S2[i, i] = max(0, (A[i,:]@B[i,:].conj().T) / (np.linalg.norm(A[i,:])**2 + eps)) # Adding eps to avoid 0 division + S2[i, i] = max(0, (A[i, :]@B[i, :].conj().T) / (np.linalg.norm(A[i, :])**2 + eps)) + # Adding eps to avoid 0 division e2 = np.linalg.norm(S2@A-B, "fro") if e1 < e2 or choice == 1: return S1 elif e2 < e1 or choice == 2: - return S2 \ No newline at end of file + return S2 diff --git a/procrustes/test/test_psdp.py b/procrustes/test/test_psdp.py index f959955..cb4bdf1 100644 --- a/procrustes/test/test_psdp.py +++ b/procrustes/test/test_psdp.py @@ -26,6 +26,7 @@ from numpy.testing import assert_almost_equal, assert_array_less from procrustes.psdp import psdp_opt, psdp_peng, psdp_woodgate, psdp_projgrad + def test_psdp_projgrad_identity(n=np.random.randint(50, 100)): r"""Test Projected Gradient's algorithm for PSDP with identity matrix.""" a = np.eye(n) @@ -71,8 +72,8 @@ def test_psdp_projgrad_generic_non_square(): res = psdp_projgrad(a=a, b=b) s, error = res["s"], res["error"] actual_result = np.array([ - [ 2.58262946, 1.10868691, -1.08661918], - [ 1.10868691, 1.67636517, 0.13242428], + [2.58262946, 1.10868691, -1.08661918], + [1.10868691, 1.67636517, 0.13242428], [-1.08661918, 0.13242428, 0.75597659]] ) assert_almost_equal(s, actual_result, decimal=5) @@ -104,12 +105,12 @@ def test_psdp_projgrad_non_full_rank(): res = psdp_projgrad(a=a, b=b) s, error = res["s"], res["error"] actual_result = np.array([ - [ 5.40878932, 1.63338805, -0.30680274, 3.87229356, 5.40863988, 1.63366874], - [ 1.63338805, 9.63678713, -3.53016912, 2.47908485, 1.63323779, 9.63660762], + [5.40878932, 1.63338805, -0.30680274, 3.87229356, 5.40863988, 1.63366874], + [1.63338805, 9.63678713, -3.53016912, 2.47908485, 1.63323779, 9.63660762], [-0.30680274, -3.53016912, 2.71131028, 0.02464064, -0.30684737, -3.53027101], - [ 3.87229356, 2.47908485, 0.02464064, 5.9697877, 3.87199514, 2.47930511], - [ 5.40863988, 1.63323779, -0.30684737, 3.87199514, 5.40849846, 1.63356974], - [ 1.63366874, 9.63660762, -3.53027101, 2.47930511, 1.63356974, 9.63675614] + [3.87229356, 2.47908485, 0.02464064, 5.9697877, 3.87199514, 2.47930511], + [5.40863988, 1.63323779, -0.30684737, 3.87199514, 5.40849846, 1.63356974], + [1.63366874, 9.63660762, -3.53027101, 2.47930511, 1.63356974, 9.63675614] ]) assert_almost_equal(s, actual_result, decimal=2) assert_almost_equal(error, 0.0, decimal=5) From c954fffc04443d9f5d7d9a320c3090584f6097d5 Mon Sep 17 00:00:00 2001 From: abhishekmittal15 <55945158+abhishekmittal15@users.noreply.github.com> Date: Sat, 3 Dec 2022 17:01:39 +0530 Subject: [PATCH 08/13] Fixed some linting errors --- procrustes/psdp.py | 122 +++++++++++++++++++---------------- procrustes/test/test_psdp.py | 17 ++--- 2 files changed, 77 insertions(+), 62 deletions(-) diff --git a/procrustes/psdp.py b/procrustes/psdp.py index 60aa015..c6b8e96 100644 --- a/procrustes/psdp.py +++ b/procrustes/psdp.py @@ -42,7 +42,7 @@ def psdp_projgrad( a: np.ndarray, b: np.ndarray, - options_dict: Dict = {}, + options_dict: Dict = None, pad: bool = True, translate: bool = False, scale: bool = False, @@ -54,9 +54,13 @@ def psdp_projgrad( r""" Projected gradient method for the positive semi-definite Procrustes problem. - We want to minimize the function F = ||SAT-B||_F where A and B are n*m matrices and S is n*n transformation we want to find that minimizes the above function and T is just an identity matrix for now. We are only considering left transformation. + We want to minimize the function F = ||SAT-B||_F where A and B are n*m matrices and S is n*n + transformation we want to find that minimizes the above function and T is just an identity + matrix for now. We are only considering left transformation. + + The paper minimizes the following function ||AX-B||_F, so the mapping between the problem + statements are - The paper minimizes the following function ||AX-B||_F, so the mapping between the problem statements are Term used in paper Term used in our implementation -------------------------------------------------------------- A S @@ -121,15 +125,15 @@ def psdp_projgrad( Notes ----- - The Projected Gradient algorithm (on which this implementation is based) is defined well in p. 131 - of [1]_. + The Projected Gradient algorithm (on which this implementation is based) is defined well in + p. 131 of [1]_. References ---------- .. [1] Nicolas Gillis, Punit Sharma, "A semi-analytical approach for the positive semidefinite Procrustesproblem, Linear Algebra and its Applications, Volume 540, 2018, Pages 112-137 """ - A, B = setup_input_arrays( + a, b = setup_input_arrays( a, b, unpad_col, @@ -140,77 +144,83 @@ def psdp_projgrad( check_finite, weight, ) - - if A.shape != B.shape: + + if a.shape != b.shape: raise ValueError( f"Shape of A and B does not match: {a.shape} != {b.shape} " "Check pad, unpad_col, and unpad_row arguments." ) - n, m = A.shape + _, m = a.shape # Specifies the default parameters of the algorithm options = { # The maximum number of iterations the gradient descent algorithm must run for "max_iter": 10000, - # The minimum ratio of ||S_i - S_{i - 1}||_F with ||S_1 - S_0||_F, below which algorithm terminates + # The minimum ratio of ||S_i - S_{i - 1}||_F with ||S_1 - S_0||_F, below which algorithm + # terminates "s_tol": 1e-5, - # The minimum ratio of ||F_i - F_{i - 1}||_F with 1 + ||F_{i - 1}||_F, below which algorithm terminates + # The minimum ratio of ||F_i - F_{i - 1}||_F with 1 + ||F_{i - 1}||_F, below which algorithm + # terminates "f_tol": 1e-12 } options.update(options_dict) - + # Performing some precomputations - AAT = A@A.conj().T - x, _ = np.linalg.eig(AAT) - max_eig = np.max(x) ** 2 # Not sure if these can be complex too, if they are complex, then need to take norm - BAT = B@A.conj().T + aat = a@a.conj().T + x, _ = np.linalg.eig(aat) + max_eig = np.max(x) ** 2 # if they are complex, then need to take norm + bat = b@a.conj().T # Initialization of the algorithm i = 1 err = np.zeros((options["max_iter"] + 1, 1)) # S is the right transformation in our problem statement - S = _init_procustes_projgrad(A, B) + s = _init_procustes_projgrad(a, b) # F is the function whose norm we want to minimize, F = S@A - B - F = S @ A - B + f = s @ a - b # eps = ||S_i - S_{i - 1}||_F - eps = 1 + eps = 1 # eps0 = ||S_1 - S_0||_F eps0 = 0 # Algorithm is run until the max iterations - while i <= options["max_iter"]: - S_old = S - F_old = F + while i <= options["max_iter"]: + s_old = s + f_old = f # Projected gradient step - S = _psd_proj(S - (S@AAT - BAT)/max_eig) - F = S @ A - B - err[i] = np.linalg.norm(F, 'fro') - + s = _psd_proj(s - (s@aat - bat)/max_eig) + f = s @ a - b + err[i] = np.linalg.norm(f, 'fro') + # Stop conditions to check if the algorithm should be terminated - - # If the ||F_i - F_{i - 1}||_F / (1 + ||F_old||_F) is lesser than some tolerance level, then we terminate the algorithm, as there is not much improvement gain in the optimisation problem for the extra iterations we perform - if np.linalg.norm(F - F_old, 'fro') / (1 + np.linalg.norm(F_old, 'fro')) < options["f_tol"]: + + # If the ||F_i - F_{i - 1}||_F / (1 + ||F_old||_F) is lesser than some tolerance level, + # then we terminate the algorithm, as there is not much improvement gain in the + # optimisation problem for the extra iterations we perform + if np.linalg.norm(f - f_old, 'fro') / (1 + np.linalg.norm(f_old, 'fro')) < options["f_tol"]: break - - # If the ||S_i - S_{i - 1}||_F / ||S_1 - S_0||_F is lesser than some tolerance level, then we terminate the algorithm. TODO: to decide if this is useful or not. + + # If the ||S_i - S_{i - 1}||_F / ||S_1 - S_0||_F is lesser than some tolerance level, + # then we terminate the algorithm. TODO: to decide if this is useful or not. if i == 1: - eps0 = np.linalg.norm(S-S_old, 'fro') - eps = np.linalg.norm(S-S_old, 'fro') + eps0 = np.linalg.norm(s - s_old, 'fro') + eps = np.linalg.norm(s - s_old, 'fro') if eps < options["s_tol"] * eps0: break i += 1 return ProcrustesResult( - new_a=A, - new_b=B, - error=compute_error(a=a, b=b, t=np.eye(m), s=S), + new_a=a, + new_b=b, + error=compute_error(a=a, b=b, t=np.eye(m), s=s), t=np.eye(m), - s=S, + s=s, ) + def psdp_opt( a: np.ndarray, b: np.ndarray, @@ -989,33 +999,37 @@ def _psd_proj(arr: np.ndarray, do_cholesky: bool = True) -> np.ndarray: return _make_positive(arr) - def _init_procustes_projgrad( - A: np.ndarray, - B: np.ndarray, + a: np.ndarray, + b: np.ndarray, choice: int = 0 ) -> np.ndarray: - r""" - Returns the starting point of the transformation S of the projection gradient algorithm - """ - n, m = A.shape + r"""Return the starting point of the transformation S of the projection gradient algorithm.""" + + n, _ = a.shape - # We will find two choices S1 and S2 and return the one that gives a lower error in the minimization function + # We will find two choices S1 and S2 and return the one that gives a lower error in the + # minimization function # Finding S1 - S1T = np.linalg.lstsq(A.conj().T, B.conj().T, rcond = 1)[0] - S1 = S1T.conj().T - S1 = _psd_proj(S1) - e1 = np.linalg.norm(S1@A-B, 'fro') + s1t = np.linalg.lstsq(a.conj().T, b.conj().T, rcond=1)[0] + s1 = s1t.conj().T + s1 = _psd_proj(s1) + e1 = np.linalg.norm(s1@a-b, 'fro') # Finding S2 eps = 1e-6 - S2 = np.zeros((n, n)) + s2 = np.zeros((n, n)) for i in range(n): - S2[i, i] = max(0, (A[i,:]@B[i,:].conj().T) / (np.linalg.norm(A[i,:])**2 + eps)) # Adding eps to avoid 0 division - e2 = np.linalg.norm(S2@A-B, "fro") + s2[i, i] = max(0, (a[i, :]@b[i, :].conj().T) / (np.linalg.norm(a[i, :])**2 + eps)) + # Adding eps to avoid 0 division + e2 = np.linalg.norm(s2@a-b, "fro") + + s_init = None if e1 < e2 or choice == 1: - return S1 + s_init = s1 elif e2 < e1 or choice == 2: - return S2 \ No newline at end of file + s_init = s2 + + return s_init diff --git a/procrustes/test/test_psdp.py b/procrustes/test/test_psdp.py index f959955..fd458c8 100644 --- a/procrustes/test/test_psdp.py +++ b/procrustes/test/test_psdp.py @@ -24,7 +24,8 @@ import numpy as np from numpy.testing import assert_almost_equal, assert_array_less -from procrustes.psdp import psdp_opt, psdp_peng, psdp_woodgate, psdp_projgrad +from procrustes.psdp import psdp_opt, psdp_peng, psdp_projgrad, psdp_woodgate + def test_psdp_projgrad_identity(n=np.random.randint(50, 100)): r"""Test Projected Gradient's algorithm for PSDP with identity matrix.""" @@ -71,8 +72,8 @@ def test_psdp_projgrad_generic_non_square(): res = psdp_projgrad(a=a, b=b) s, error = res["s"], res["error"] actual_result = np.array([ - [ 2.58262946, 1.10868691, -1.08661918], - [ 1.10868691, 1.67636517, 0.13242428], + [2.58262946, 1.10868691, -1.08661918], + [1.10868691, 1.67636517, 0.13242428], [-1.08661918, 0.13242428, 0.75597659]] ) assert_almost_equal(s, actual_result, decimal=5) @@ -104,12 +105,12 @@ def test_psdp_projgrad_non_full_rank(): res = psdp_projgrad(a=a, b=b) s, error = res["s"], res["error"] actual_result = np.array([ - [ 5.40878932, 1.63338805, -0.30680274, 3.87229356, 5.40863988, 1.63366874], - [ 1.63338805, 9.63678713, -3.53016912, 2.47908485, 1.63323779, 9.63660762], + [5.40878932, 1.63338805, -0.30680274, 3.87229356, 5.40863988, 1.63366874], + [1.63338805, 9.63678713, -3.53016912, 2.47908485, 1.63323779, 9.63660762], [-0.30680274, -3.53016912, 2.71131028, 0.02464064, -0.30684737, -3.53027101], - [ 3.87229356, 2.47908485, 0.02464064, 5.9697877, 3.87199514, 2.47930511], - [ 5.40863988, 1.63323779, -0.30684737, 3.87199514, 5.40849846, 1.63356974], - [ 1.63366874, 9.63660762, -3.53027101, 2.47930511, 1.63356974, 9.63675614] + [3.87229356, 2.47908485, 0.02464064, 5.9697877, 3.87199514, 2.47930511], + [5.40863988, 1.63323779, -0.30684737, 3.87199514, 5.40849846, 1.63356974], + [1.63366874, 9.63660762, -3.53027101, 2.47930511, 1.63356974, 9.63675614] ]) assert_almost_equal(s, actual_result, decimal=2) assert_almost_equal(error, 0.0, decimal=5) From 4b7cf27b646e098bb481b5ac1e22fa0b6a7ce43d Mon Sep 17 00:00:00 2001 From: Abhishek Mittal Date: Sat, 3 Dec 2022 17:17:58 +0530 Subject: [PATCH 09/13] Added a check if options dict is provided or not --- procrustes/psdp.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/procrustes/psdp.py b/procrustes/psdp.py index c6b8e96..92d160e 100644 --- a/procrustes/psdp.py +++ b/procrustes/psdp.py @@ -165,7 +165,8 @@ def psdp_projgrad( "f_tol": 1e-12 } - options.update(options_dict) + if options_dict: + options.update(options_dict) # Performing some precomputations aat = a@a.conj().T From 25b43870a1c80ca9cd750554a7959b90f637675d Mon Sep 17 00:00:00 2001 From: Abhishek Mittal Date: Sat, 3 Dec 2022 17:20:38 +0530 Subject: [PATCH 10/13] Fixed a minor linting error --- procrustes/test/test_psdp.py | 1 - 1 file changed, 1 deletion(-) diff --git a/procrustes/test/test_psdp.py b/procrustes/test/test_psdp.py index b460fc6..fd458c8 100644 --- a/procrustes/test/test_psdp.py +++ b/procrustes/test/test_psdp.py @@ -27,7 +27,6 @@ from procrustes.psdp import psdp_opt, psdp_peng, psdp_projgrad, psdp_woodgate - def test_psdp_projgrad_identity(n=np.random.randint(50, 100)): r"""Test Projected Gradient's algorithm for PSDP with identity matrix.""" a = np.eye(n) From bb13af4265b89769def727bc301c0699439bbb62 Mon Sep 17 00:00:00 2001 From: Fanwang Meng Date: Mon, 2 Jan 2023 20:52:10 -0500 Subject: [PATCH 11/13] Increase number of lines allowed for one module --- tox.ini | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tox.ini b/tox.ini index 20e71f3..1c40b7e 100644 --- a/tox.ini +++ b/tox.ini @@ -182,6 +182,8 @@ extension-pkg-whitelist=numpy [FORMAT] # Maximum number of characters on a single line. max-line-length=100 +# Maximum number of lines in a module +max-module-lines=2000 [MESSAGES CONTROL] disable= From 8e1c8337a944b8e14c8a947214d391374c040290 Mon Sep 17 00:00:00 2001 From: Fanwang Meng Date: Mon, 2 Jan 2023 21:02:15 -0500 Subject: [PATCH 12/13] Drop support of Python 3.6 This is because 1. https://github.com/actions/setup-python/issues/544 2. Python 3.6 has came to end of its life and not longer supported --- .github/workflows/ci_tox.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tox.yml b/.github/workflows/ci_tox.yml index f464451..18cd604 100644 --- a/.github/workflows/ci_tox.yml +++ b/.github/workflows/ci_tox.yml @@ -11,7 +11,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ["3.6", "3.7", "3.8", "3.9", "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] runs-on: ${{ matrix.os }} steps: From ea5941ba8ef0d704398520654f78426e9cc09fe5 Mon Sep 17 00:00:00 2001 From: Fanwang Meng Date: Mon, 2 Jan 2023 21:17:41 -0500 Subject: [PATCH 13/13] Use the latest version of `setup-python` --- .github/workflows/ci_tox.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_tox.yml b/.github/workflows/ci_tox.yml index 18cd604..f75230c 100644 --- a/.github/workflows/ci_tox.yml +++ b/.github/workflows/ci_tox.yml @@ -17,7 +17,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: x64