diff --git a/README.rst b/README.rst index a90f0d37..56aad44c 100644 --- a/README.rst +++ b/README.rst @@ -47,9 +47,13 @@ More recently, Cieslak et al. [#r3]_ integrated both approaches in *SHORELine*, the work of ``eddy`` and *SHORELine*, while generalizing these methods to multiple acquisition schemes (single-shell, multi-shell, and diffusion spectrum imaging) using diffusion models available with DIPY [#r5]_. +.. BEGIN FLOWCHART + .. image:: https://raw.githubusercontent.com/nipreps/eddymotion/507fc9bab86696d5330fd6a86c3870968243aea8/docs/_static/eddymotion-flowchart.svg :alt: The eddymotion flowchart +.. END FLOWCHART + .. [#r1] S. Ben-Amitay et al., Motion correction and registration of high b-value diffusion weighted images, Magnetic Resonance in Medicine 67:1694–1702 (2012) .. [#r2] J. L. R. Andersson. et al., An integrated approach to correction for off-resonance effects and subject movement diff --git a/docs/conf.py b/docs/conf.py index 6826697d..e9cd3542 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -234,6 +234,11 @@ apidoc_separate_modules = True apidoc_extra_args = ["--module-first", "-d 1", "-T"] + +# -- Options for autodoc extension ------------------------------------------- +autoclass_content = "both" + + # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. @@ -253,3 +258,25 @@ # -- Options for versioning extension ---------------------------------------- scv_show_banner = True + + +# -- Special functions ------------------------------------------------------- +import inspect + + +def autodoc_process_signature(app, what, name, obj, options, signature, return_annotation): + """Replace the class signature by the signature from cls.__init__""" + + if what == "class" and hasattr(obj, "__init__"): + try: + init_signature = inspect.signature(obj.__init__) + # Convert the Signature object to a string + return str(init_signature), return_annotation + except ValueError: + # Handle cases where `inspect.signature` fails + return signature, return_annotation + return signature, return_annotation + + +def setup(app): + app.connect("autodoc-process-signature", autodoc_process_signature) diff --git a/docs/index.rst b/docs/index.rst index 8f3de245..0467a8e4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,9 +1,8 @@ .. include:: links.rst .. include:: ../README.rst - :end-line: 29 + :end-before: BEGIN FLOWCHART .. include:: ../README.rst - :start-line: 34 - + :start-after: END FLOWCHART .. image:: _static/eddymotion-flowchart.svg :alt: The eddymotion flowchart diff --git a/src/eddymotion/model/gpr.py b/src/eddymotion/model/gpr.py index 8f098a20..769242bc 100644 --- a/src/eddymotion/model/gpr.py +++ b/src/eddymotion/model/gpr.py @@ -64,7 +64,7 @@ class EddyMotionGPR(GaussianProcessRegressor): r""" - A GP regressor specialized for eddymotion. + A Gaussian process (GP) regressor specialized for eddymotion. This specialization of the default GP regressor is created to allow the following extended behaviors: @@ -80,22 +80,21 @@ class EddyMotionGPR(GaussianProcessRegressor): In principle, Scikit-Learn's implementation normalizes the training data as in [Andersson15]_ (see - `FSL's souce code `__). + `FSL's source code `__). From their paper (p. 167, end of first column): - Typically one just substracts the mean (:math:`\bar{\mathbf{f}}`) + Typically one just subtracts the mean (:math:`\bar{\mathbf{f}}`) from :math:`\mathbf{f}` and then add it back to :math:`f^{*}`, which is analogous to what is often done in "traditional" regression. Finally, the parameter :math:`\sigma^2` maps on to Scikit-learn's ``alpha`` - of the regressor. - Because it is not a parameter of the kernel, hyperparameter selection - through gradient-descent with analytical gradient calculations - would not work (the derivative of the kernel w.r.t. alpha is zero). + of the regressor. Because it is not a parameter of the kernel, hyperparameter + selection through gradient-descent with analytical gradient calculations + would not work (the derivative of the kernel w.r.t. ``alpha`` is zero). - I believe this is overlooked in [Andersson15]_, or they actually did not - use analytical gradient-descent: + This might have been overlooked in [Andersson15]_, or else they actually did + not use analytical gradient-descent: *A note on optimisation* @@ -105,13 +104,12 @@ class EddyMotionGPR(GaussianProcessRegressor): The reason for that is that such methods typically use fewer steps, and when the cost of calculating the derivatives is small/moderate compared to calculating the functions itself (as is the case for Eq. (12)) then - execution time can be much shorter. - However, we found that for the multi-shell case a heuristic optimisation - method such as the Nelder-Mead simplex method (Nelder and Mead, 1965) was - frequently better at avoiding local maxima. - Hence, that was the method we used for all optimisations in the present - paper. - + execution time can be much shorter. However, we found that for the + multi-shell case a heuristic optimisation method such as the Nelder-Mead + simplex method (Nelder and Mead, 1965) was frequently better at avoiding + local maxima. Hence, that was the method we used for all optimisations + in the present paper. + **Multi-shell regression (TODO).** For multi-shell modeling, the kernel :math:`k(\textbf{x}, \textbf{x'})` is updated following Eq. (14) in [Andersson15]_. @@ -264,7 +262,6 @@ def __init__( l_bounds: tuple[float, float] = BOUNDS_LAMBDA, ): r""" - Initialize an exponential Kriging kernel. Parameters ---------- @@ -273,7 +270,7 @@ def __init__( beta_l : :obj:`float`, optional The :math:`\lambda` hyperparameter. a_bounds : :obj:`tuple`, optional - Bounds for the a parameter. + Bounds for the ``a`` parameter. l_bounds : :obj:`tuple`, optional Bounds for the :math:`\lambda` hyperparameter. @@ -310,10 +307,10 @@ def __call__( Returns ------- - K : ndarray of shape (n_samples_X, n_samples_Y) + K : :obj:`~numpy.ndarray` of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) - K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + K_gradient : :obj:`~numpy.ndarray` of shape (n_samples_X, n_samples_X, n_dims),\ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` @@ -341,12 +338,12 @@ def diag(self, X: np.ndarray) -> np.ndarray: Parameters ---------- - X : ndarray of shape (n_samples_X, n_features) + X : :obj:`~numpy.ndarray` of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- - K_diag : ndarray of shape (n_samples_X,) + K_diag : :obj:`~numpy.ndarray` of shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.beta_l * np.ones(X.shape[0]) @@ -370,7 +367,6 @@ def __init__( l_bounds: tuple[float, float] = BOUNDS_LAMBDA, ): r""" - Initialize a spherical Kriging kernel. Parameters ---------- @@ -416,10 +412,10 @@ def __call__( Returns ------- - K : ndarray of shape (n_samples_X, n_samples_Y) + K : :obj:`~numpy.ndarray` of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) - K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + K_gradient : :obj:`~numpy.ndarray` of shape (n_samples_X, n_samples_X, n_dims),\ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when ``eval_gradient`` @@ -452,12 +448,12 @@ def diag(self, X: np.ndarray) -> np.ndarray: Parameters ---------- - X : ndarray of shape (n_samples_X, n_features) + X : :obj:`~numpy.ndarray` of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- - K_diag : ndarray of shape (n_samples_X,) + K_diag : :obj:`~numpy.ndarray` of shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.beta_l * np.ones(X.shape[0])