diff --git a/README.rst b/README.rst index d82d5b3..383600c 100644 --- a/README.rst +++ b/README.rst @@ -63,7 +63,7 @@ pip Otherwise, to install ``picard``, you first need to install its dependencies:: - $ pip install numpy matplotlib numexpr scipy + $ pip install numpy matplotlib scipy Then install Picard with pip:: @@ -137,9 +137,11 @@ These are the dependencies to use Picard: * numpy (>=1.8) * matplotlib (>=1.3) -* numexpr (>= 2.0) * scipy (>=0.19) +Optionally to get faster computations, you can install + +* numexpr (>= 2.0) These are the dependencies to run the EEG example: diff --git a/picard/densities.py b/picard/densities.py index 03f4424..2317df1 100644 --- a/picard/densities.py +++ b/picard/densities.py @@ -5,8 +5,10 @@ # License: BSD (3-clause) import numpy as np -import numexpr as ne - +try: + import numexpr as ne +except ImportError: + ne = None from scipy.optimize import check_grad from numpy.testing import assert_allclose @@ -40,11 +42,18 @@ def __init__(self, params=None): def log_lik(self, Y): alpha = self.alpha # noqa + if ne is None: + absY = np.abs(Y) + np.exp(-2. * alpha * absY) + return absY + np.log1p(np.exp(-2. * alpha * absY)) / alpha return ne.evaluate('abs(Y) + log1p(exp(-2. * alpha * abs(Y))) / alpha') def score_and_der(self, Y): alpha = self.alpha - score = ne.evaluate('tanh(alpha * Y)') + if ne is None: + score = np.tanh(alpha * Y) + else: + score = ne.evaluate('tanh(alpha * Y)') return score, alpha - alpha * score ** 2 @@ -57,18 +66,29 @@ def __init__(self, params=None): def log_lik(self, Y): a = self.alpha # noqa + if ne is None: + return -np.exp(- a * Y ** 2 / 2.) / a return ne.evaluate('-exp(- a * Y ** 2 / 2.) / a') def score_and_der(self, Y): a = self.alpha # noqa - Y_sq = ne.evaluate('Y ** 2') # noqa - K = ne.evaluate('exp(- a / 2. * Y_sq)') # noqa - return ne.evaluate('Y * K'), ne.evaluate('(1- a * Y_sq) * K') + if ne is None: + Y_sq = Y ** 2 + K = np.exp(- a / 2. * Y_sq) + return Y * K, (1 - a * Y_sq) * K + else: + Y_sq = ne.evaluate('Y ** 2') # noqa + K = ne.evaluate('exp(- a / 2. * Y_sq)') # noqa + return ne.evaluate('Y * K'), ne.evaluate('(1- a * Y_sq) * K') class Cube(object): def log_lik(self, Y): + if ne is None: + return Y ** 4 / 4 return ne.evaluate('Y ** 4 / 4') def score_and_der(self, Y): + if ne is None: + return Y ** 3, 3 * Y ** 2 return ne.evaluate('Y ** 3'), ne.evaluate('3 * Y ** 2') diff --git a/picard/solver.py b/picard/solver.py index 6c666cb..63f1323 100644 --- a/picard/solver.py +++ b/picard/solver.py @@ -202,7 +202,7 @@ def picard(X, fun='tanh', n_components=None, ortho=True, extended=None, 'verbose': verbose, 'ortho': ortho, 'extended': extended, 'covariance': covariance} - with np.errstate(all='raise'): # So code breaks if overflow/Nans + with np.errstate(over='raise', invalid='raise'): Y, W, infos = core_picard(X1, **kwargs) del X1 diff --git a/picard/tests/test_solver.py b/picard/tests/test_solver.py index b512fd5..4d64cd1 100644 --- a/picard/tests/test_solver.py +++ b/picard/tests/test_solver.py @@ -3,10 +3,10 @@ # # License: BSD (3-clause) import warnings -from itertools import product import numpy as np from numpy.testing import assert_allclose +import pytest from picard import picard, permute, amari_distance from picard.densities import Tanh, Exp, Cube, check_density @@ -19,87 +19,93 @@ def test_dimension_reduction(): S = rng.laplace(size=(N, T)) A = rng.randn(N, N) X = np.dot(A, S) - K, W, Y = picard(X, n_components=n_components, ortho=False, - random_state=rng, max_iter=2) + K, W, Y = picard( + X, n_components=n_components, ortho=False, random_state=rng, max_iter=2 + ) assert K.shape == (n_components, N) assert W.shape == (n_components, n_components) assert Y.shape, (n_components, T) with warnings.catch_warnings(record=True) as w: - K, W, Y = picard(X, n_components=n_components, ortho=False, - whiten=False, max_iter=1) + K, W, Y = picard( + X, n_components=n_components, ortho=False, whiten=False, max_iter=1 + ) assert len(w) == 2 -def test_dots(): +@pytest.mark.parametrize("n_component", [5, 3]) +@pytest.mark.parametrize("whiten", [False, True]) +@pytest.mark.parametrize("ortho", [False, True]) +@pytest.mark.parametrize("w_init", [None, "id"]) +def test_dots(n_component, whiten, ortho, w_init): N, T = 5, 100 rng = np.random.RandomState(42) S = rng.laplace(size=(N, T)) A = rng.randn(N, N) X = np.dot(A, S) - n_components = [N, 3] - tf = [False, True] - w_inits = [None, 'id'] - for n_component, ortho, whiten, w_init in product(n_components, tf, tf, - w_inits): - if w_init == 'id': - if whiten: - w_init = np.eye(n_component) - else: - w_init = np.eye(N) - with warnings.catch_warnings(record=True): - K, W, Y, X_mean = picard(X, ortho=ortho, whiten=whiten, - return_X_mean=True, w_init=w_init, - n_components=n_component, - random_state=rng, max_iter=2, - verbose=False) - if not whiten: - K = np.eye(N) - if ortho and whiten: - assert_allclose(Y.dot(Y.T) / T, np.eye(n_component), atol=1e-8) - Y_prime = np.dot(W, K).dot(X - X_mean[:, None]) - assert_allclose(Y, Y_prime, atol=1e-7) + if w_init == "id": + if whiten: + w_init = np.eye(n_component) + else: + w_init = np.eye(N) + with warnings.catch_warnings(record=True): + K, W, Y, X_mean = picard( + X, + ortho=ortho, + whiten=whiten, + return_X_mean=True, + w_init=w_init, + n_components=n_component, + random_state=rng, + max_iter=2, + verbose=False, + ) + if not whiten: + K = np.eye(N) + if ortho and whiten: + assert_allclose(Y.dot(Y.T) / T, np.eye(n_component), atol=1e-8) + Y_prime = np.dot(W, K).dot(X - X_mean[:, None]) + assert_allclose(Y, Y_prime, atol=1e-7) def test_pre_fastica(): N, T = 3, 1000 rng = np.random.RandomState(42) - names = ['tanh', 'cube'] - for j, fun in enumerate([Tanh(params=dict(alpha=0.5)), 'cube']): + names = ["tanh", "cube"] + for j, fun in enumerate([Tanh(params=dict(alpha=0.5)), "cube"]): if j == 0: S = rng.laplace(size=(N, T)) else: S = rng.uniform(low=-1, high=1, size=(N, T)) A = rng.randn(N, N) X = np.dot(A, S) - K, W, Y = picard(X, fun=fun, ortho=False, random_state=0, - fastica_it=10) - if fun == 'tanh': + K, W, Y = picard( + X, fun=fun, ortho=False, random_state=0, fastica_it=10 + ) + if fun == "tanh": fun = Tanh() - elif fun == 'exp': + elif fun == "exp": fun = Exp() - elif fun == 'cube': + elif fun == "cube": fun = Cube() # Get the final gradient norm psiY = fun.score_and_der(Y)[0] G = np.inner(psiY, Y) / float(T) - np.eye(N) - err_msg = 'fun %s, gradient norm greater than tol' % names[j] - assert_allclose(G, np.zeros((N, N)), atol=1e-7, - err_msg=err_msg) + err_msg = "fun %s, gradient norm greater than tol" % names[j] + assert_allclose(G, np.zeros((N, N)), atol=1e-7, err_msg=err_msg) assert Y.shape == X.shape assert W.shape == A.shape assert K.shape == A.shape WA = W.dot(K).dot(A) WA = permute(WA) # Permute and scale - err_msg = 'fun %s, wrong unmixing matrix' % names[j] - assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1, - err_msg=err_msg) + err_msg = "fun %s, wrong unmixing matrix" % names[j] + assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1, err_msg=err_msg) def test_picard(): N, T = 3, 1000 rng = np.random.RandomState(42) - names = ['tanh', 'cube'] - for j, fun in enumerate([Tanh(params=dict(alpha=0.5)), 'cube']): + names = ["tanh", "cube"] + for j, fun in enumerate([Tanh(params=dict(alpha=0.5)), "cube"]): if j == 0: S = rng.laplace(size=(N, T)) else: @@ -107,26 +113,24 @@ def test_picard(): A = rng.randn(N, N) X = np.dot(A, S) K, W, Y = picard(X, fun=fun, ortho=False, random_state=0) - if fun == 'tanh': + if fun == "tanh": fun = Tanh() - elif fun == 'exp': + elif fun == "exp": fun = Exp() - elif fun == 'cube': + elif fun == "cube": fun = Cube() # Get the final gradient norm psiY = fun.score_and_der(Y)[0] G = np.inner(psiY, Y) / float(T) - np.eye(N) - err_msg = 'fun %s, gradient norm greater than tol' % names[j] - assert_allclose(G, np.zeros((N, N)), atol=1e-7, - err_msg=err_msg) + err_msg = "fun %s, gradient norm greater than tol" % names[j] + assert_allclose(G, np.zeros((N, N)), atol=1e-7, err_msg=err_msg) assert Y.shape == X.shape assert W.shape == A.shape assert K.shape == A.shape WA = W.dot(K).dot(A) WA = permute(WA) # Permute and scale - err_msg = 'fun %s, wrong unmixing matrix' % names[j] - assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1, - err_msg=err_msg) + err_msg = "fun %s, wrong unmixing matrix" % names[j] + assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1, err_msg=err_msg) def test_extended(): @@ -134,22 +138,21 @@ def test_extended(): n = N // 2 rng = np.random.RandomState(42) - S = np.concatenate((rng.laplace(size=(n, T)), - rng.uniform(low=-1, high=1, size=(n, T))), - axis=0) + S = np.concatenate( + (rng.laplace(size=(n, T)), rng.uniform(low=-1, high=1, size=(n, T))), + axis=0, + ) print(S.shape) A = rng.randn(N, N) X = np.dot(A, S) - K, W, Y = picard(X, ortho=False, random_state=0, - extended=True) + K, W, Y = picard(X, ortho=False, random_state=0, extended=True) assert Y.shape == X.shape assert W.shape == A.shape assert K.shape == A.shape WA = W.dot(K).dot(A) WA = permute(WA) # Permute and scale - err_msg = 'wrong unmixing matrix' - assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1, - err_msg=err_msg) + err_msg = "wrong unmixing matrix" + assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1, err_msg=err_msg) def test_shift(): @@ -159,15 +162,21 @@ def test_shift(): A = rng.randn(N, N) offset = rng.randn(N) X = np.dot(A, S) + offset[:, None] - _, W, Y, X_mean = picard(X, ortho=False, whiten=False, - return_X_mean=True, random_state=rng) + _, W, Y, X_mean = picard( + X, ortho=False, whiten=False, return_X_mean=True, random_state=rng + ) assert_allclose(offset, X_mean, rtol=0, atol=0.2) WA = W.dot(A) WA = permute(WA) assert_allclose(WA, np.eye(N), rtol=0, atol=0.2) - _, W, Y, X_mean = picard(X, ortho=False, whiten=False, - centering=False, return_X_mean=True, - random_state=rng) + _, W, Y, X_mean = picard( + X, + ortho=False, + whiten=False, + centering=False, + return_X_mean=True, + random_state=rng, + ) assert_allclose(X_mean, 0) @@ -177,44 +186,47 @@ def test_picardo(): S = rng.laplace(size=(N, T)) A = rng.randn(N, N) X = np.dot(A, S) - names = ['tanh', 'exp', 'cube'] + names = ["tanh", "exp", "cube"] for fastica_it in [None, 2]: for fun in names: print(fun) - K, W, Y = picard(X, fun=fun, ortho=True, random_state=rng, - fastica_it=fastica_it, verbose=True, - extended=True) - if fun == 'tanh': + K, W, Y = picard( + X, + fun=fun, + ortho=True, + random_state=rng, + fastica_it=fastica_it, + verbose=True, + extended=True, + ) + if fun == "tanh": fun = Tanh() - elif fun == 'exp': - fun = Exp(params={'alpha': 0.1}) - elif fun == 'cube': + elif fun == "exp": + fun = Exp(params={"alpha": 0.1}) + elif fun == "cube": fun = Cube() # Get the final gradient norm psiY = fun.score_and_der(Y)[0] G = np.inner(psiY, Y) / float(T) - np.eye(N) - G = (G - G.T) / 2. # take skew-symmetric part - err_msg = 'fun %s, gradient norm greater than tol' % fun - assert_allclose(G, np.zeros((N, N)), atol=1e-7, - err_msg=err_msg) + G = (G - G.T) / 2.0 # take skew-symmetric part + err_msg = "fun %s, gradient norm greater than tol" % fun + assert_allclose(G, np.zeros((N, N)), atol=1e-7, err_msg=err_msg) assert Y.shape == X.shape assert W.shape == A.shape assert K.shape == A.shape WA = W.dot(K).dot(A) WA = permute(WA) # Permute and scale - err_msg = 'fun %s, wrong unmixing matrix' % fun - assert_allclose(WA, np.eye(N), rtol=0, atol=0.1, - err_msg=err_msg) + err_msg = "fun %s, wrong unmixing matrix" % fun + assert_allclose(WA, np.eye(N), rtol=0, atol=0.1, err_msg=err_msg) def test_bad_custom_density(): - class CustomDensity(object): def log_lik(self, Y): - return Y ** 4 / 4 + return Y**4 / 4 def score_and_der(self, Y): - return Y ** 3, 3 * Y ** 2 + 2. + return Y**3, 3 * Y**2 + 2.0 fun = CustomDensity() X = np.random.randn(2, 10) @@ -223,7 +235,7 @@ def score_and_der(self, Y): except AssertionError: pass else: - raise (AssertionError, 'Bad function undetected') + raise (AssertionError, "Bad function undetected") def test_fun(): @@ -231,32 +243,33 @@ def test_fun(): check_density(fun) -def test_no_regression(): +@pytest.mark.parametrize("mode", ["lap", "gauss"]) +@pytest.mark.parametrize("ortho", [True, False]) +def test_no_regression(mode, ortho): n_tests = 10 baseline = {} - baseline['lap', True] = 17. - baseline['lap', False] = 23. - baseline['gauss', True] = 58. - baseline['gauss', False] = 60. + baseline["lap", True] = 17.0 + baseline["lap", False] = 23.0 + baseline["gauss", True] = 58.0 + baseline["gauss", False] = 60.0 N, T = 10, 1000 - for mode in ['lap', 'gauss']: - for ortho in [True, False]: - n_iters = [] - for i in range(n_tests): - rng = np.random.RandomState(i) - if mode == 'lap': - S = rng.laplace(size=(N, T)) - else: - S = rng.randn(N, T) - A = rng.randn(N, N) - X = np.dot(A, S) - _, _, _, n_iter = picard(X, return_n_iter=True, - ortho=ortho, random_state=rng) - n_iters.append(n_iter) - n_mean = np.mean(n_iters) - nb_mean = baseline[mode, ortho] - err_msg = 'mode=%s, ortho=%s. %d iterations, expecting <%d.' - assert n_mean < nb_mean, err_msg % (mode, ortho, n_mean, nb_mean) + n_iters = [] + for i in range(n_tests): + rng = np.random.RandomState(i) + if mode == "lap": + S = rng.laplace(size=(N, T)) + else: + S = rng.randn(N, T) + A = rng.randn(N, N) + X = np.dot(A, S) + _, _, _, n_iter = picard( + X, return_n_iter=True, ortho=ortho, random_state=rng + ) + n_iters.append(n_iter) + n_mean = np.mean(n_iters) + nb_mean = baseline[mode, ortho] + err_msg = "mode=%s, ortho=%s. %d iterations, expecting <%d." + assert n_mean < nb_mean, err_msg % (mode, ortho, n_mean, nb_mean) def test_amari_distance(): diff --git a/requirements.txt b/requirements.txt index 740900e..eeb9883 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ numpy >=1.8 matplotlib >=1.3 scipy >=0.16 -numexpr >= 2.0 scikit-learn>=0.23 diff --git a/setup.py b/setup.py index d0fc268..198c2dc 100644 --- a/setup.py +++ b/setup.py @@ -65,7 +65,6 @@ def package_tree(pkgroot): install_requires=[ 'numpy', 'scipy', - 'numexpr', 'scikit-learn' ], packages=package_tree('picard'),