From d1af0d2bd03e116bb01c2d375e440fc90b9706f6 Mon Sep 17 00:00:00 2001 From: Martin Stancsics Date: Mon, 9 Sep 2024 13:28:02 +0200 Subject: [PATCH] Combine with other compatibility checks --- src/glum/_glm.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/src/glum/_glm.py b/src/glum/_glm.py index 0d962dd1..c3f6e134 100644 --- a/src/glum/_glm.py +++ b/src/glum/_glm.py @@ -27,7 +27,6 @@ import scipy.sparse as sps import scipy.sparse.linalg as splinalg import sklearn as skl -import sklearn.utils.validation import tabmat as tm from formulaic import Formula, FormulaSpec from formulaic.parser import DefaultFormulaParser @@ -75,18 +74,14 @@ if version.parse(skl.__version__).release < (1, 6): keyword_finiteness = "force_all_finite" -else: - keyword_finiteness = "ensure_all_finite" - -if hasattr(sklearn.utils.validation, "validate_data"): - validate_data = sklearn.utils.validation.validate_data -else: + _check_n_features = BaseEstimator._check_n_features validate_data = BaseEstimator._validate_data - -if hasattr(sklearn.utils.validation, "_check_n_features"): - _check_n_features = sklearn.utils.validation._check_n_features else: - _check_n_features = BaseEstimator._check_n_features + keyword_finiteness = "ensure_all_finite" + from sklearn.utils.validation import ( # type: ignore + _check_n_features, + validate_data, + ) _float_itemsize_to_dtype = {8: np.float64, 4: np.float32, 2: np.float16}