diff --git a/pypots/base.py b/pypots/base.py index 71647319..d10c7c6e 100644 --- a/pypots/base.py +++ b/pypots/base.py @@ -349,7 +349,7 @@ def fit( train_set : The dataset for model training, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for training, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -358,7 +358,7 @@ def fit( val_set : The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -383,7 +383,7 @@ def predict( test_set : The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/classification/base.py b/pypots/classification/base.py index 817302ec..a758fed3 100644 --- a/pypots/classification/base.py +++ b/pypots/classification/base.py @@ -81,7 +81,7 @@ def fit( train_set : The dataset for model training, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for training, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -90,7 +90,7 @@ def fit( val_set : The dataset for model validating, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -113,15 +113,15 @@ def predict( @abstractmethod def classify( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Classify the input data with the trained model. Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -132,8 +132,7 @@ def classify( array-like, shape [n_samples], Classification results of the given samples. """ - # this is for old API compatibility, will be removed in the future. - # Please implement predict() instead. + raise NotImplementedError @@ -395,7 +394,7 @@ def fit( train_set : The dataset for model training, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for training, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -404,7 +403,7 @@ def fit( val_set : The dataset for model validating, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -427,19 +426,17 @@ def predict( @abstractmethod def classify( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Classify the input data with the trained model. - Warnings - -------- - The method classify is deprecated. Please use `predict()` instead. + Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -450,6 +447,5 @@ def classify( array-like, shape [n_samples], Classification results of the given samples. """ - # this is for old API compatibility, will be removed in the future. - # Please implement predict() instead. + raise NotImplementedError diff --git a/pypots/classification/brits/data.py b/pypots/classification/brits/data.py index ec296513..8d26afdc 100644 --- a/pypots/classification/brits/data.py +++ b/pypots/classification/brits/data.py @@ -20,7 +20,7 @@ class DatasetForBRITS(DatasetForBRITS_Imputation): data : The dataset for model input, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for input, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/classification/brits/model.py b/pypots/classification/brits/model.py index 537970e8..b52bf4d5 100644 --- a/pypots/classification/brits/model.py +++ b/pypots/classification/brits/model.py @@ -17,7 +17,6 @@ from ..base import BaseNNClassifier from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class BRITS(BaseNNClassifier): @@ -257,19 +256,15 @@ def predict( def classify( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Classify the input data with the trained model. - Warnings - -------- - The method classify is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -280,8 +275,6 @@ def classify( array-like, shape [n_samples], Classification results of the given samples. """ - logger.warning( - "🚨DeprecationWarning: The method classify is deprecated. Please use `predict` instead." - ) - result_dict = self.predict(X, file_type=file_type) + + result_dict = self.predict(test_set, file_type=file_type) return result_dict["classification"] diff --git a/pypots/classification/grud/data.py b/pypots/classification/grud/data.py index cf743993..fc23132e 100644 --- a/pypots/classification/grud/data.py +++ b/pypots/classification/grud/data.py @@ -23,7 +23,7 @@ class DatasetForGRUD(BaseDataset): data : The dataset for model input, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for input, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/classification/grud/model.py b/pypots/classification/grud/model.py index e7b2ef91..f6413d9e 100644 --- a/pypots/classification/grud/model.py +++ b/pypots/classification/grud/model.py @@ -18,7 +18,6 @@ from ..base import BaseNNClassifier from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class GRUD(BaseNNClassifier): @@ -234,19 +233,15 @@ def predict( def classify( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Classify the input data with the trained model. - Warnings - -------- - The method classify is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -257,8 +252,6 @@ def classify( array-like, shape [n_samples], Classification results of the given samples. """ - logger.warning( - "🚨DeprecationWarning: The method classify is deprecated. Please use `predict` instead." - ) - result_dict = self.predict(X, file_type=file_type) + + result_dict = self.predict(test_set, file_type=file_type) return result_dict["classification"] diff --git a/pypots/classification/raindrop/data.py b/pypots/classification/raindrop/data.py index 429546ef..90150056 100644 --- a/pypots/classification/raindrop/data.py +++ b/pypots/classification/raindrop/data.py @@ -19,7 +19,7 @@ class DatasetForRaindrop(DatasetForGRUD): data : The dataset for model input, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for input, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/classification/raindrop/model.py b/pypots/classification/raindrop/model.py index 58f90930..78d64267 100644 --- a/pypots/classification/raindrop/model.py +++ b/pypots/classification/raindrop/model.py @@ -19,7 +19,6 @@ from ...classification.base import BaseNNClassifier from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class Raindrop(BaseNNClassifier): @@ -279,19 +278,15 @@ def predict( def classify( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Classify the input data with the trained model. - Warnings - -------- - The method classify is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -302,8 +297,6 @@ def classify( array-like, shape [n_samples], Classification results of the given samples. """ - logger.warning( - "🚨DeprecationWarning: The method classify is deprecated. Please use `predict` instead." - ) - result_dict = self.predict(X, file_type=file_type) + + result_dict = self.predict(test_set, file_type=file_type) return result_dict["classification"] diff --git a/pypots/classification/template/model.py b/pypots/classification/template/model.py index c1cf194f..dec46806 100644 --- a/pypots/classification/template/model.py +++ b/pypots/classification/template/model.py @@ -88,3 +88,10 @@ def predict( file_type: str = "hdf5", ) -> dict: raise NotImplementedError + + def classify( + self, + test_set: Union[dict, str], + file_type: str = "hdf5", + ) -> dict: + raise NotImplementedError diff --git a/pypots/clustering/base.py b/pypots/clustering/base.py index 2ecc46e3..bdf68645 100644 --- a/pypots/clustering/base.py +++ b/pypots/clustering/base.py @@ -81,7 +81,7 @@ def fit( train_set : The dataset for model training, should be a dictionary including the key 'X', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for training, can contain missing values. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains key-value pairs like a dict, and it has to include the key 'X'. @@ -89,7 +89,7 @@ def fit( val_set : The dataset for model validating, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -112,15 +112,15 @@ def predict( @abstractmethod def cluster( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Cluster the input with the trained model. Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -131,8 +131,7 @@ def cluster( array-like, Clustering results. """ - # this is for old API compatibility, will be removed in the future. - # Please implement predict() instead. + raise NotImplementedError @@ -388,7 +387,7 @@ def fit( train_set : The dataset for model training, should be a dictionary including the key 'X', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for training, can contain missing values. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains key-value pairs like a dict, and it has to include the key 'X'. @@ -396,7 +395,7 @@ def fit( val_set : The dataset for model validating, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -419,19 +418,17 @@ def predict( @abstractmethod def cluster( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Cluster the input with the trained model. - Warnings - -------- - The method cluster is deprecated. Please use `predict()` instead. + Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -442,6 +439,5 @@ def cluster( array-like, Clustering results. """ - # this is for old API compatibility, will be removed in the future. - # Please implement predict() instead. + raise NotImplementedError diff --git a/pypots/clustering/crli/data.py b/pypots/clustering/crli/data.py index d64a8ae9..5a252a66 100644 --- a/pypots/clustering/crli/data.py +++ b/pypots/clustering/crli/data.py @@ -19,7 +19,7 @@ class DatasetForCRLI(BaseDataset): data : The dataset for model input, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for input, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/clustering/crli/model.py b/pypots/clustering/crli/model.py index f6076f0c..abe4e655 100644 --- a/pypots/clustering/crli/model.py +++ b/pypots/clustering/crli/model.py @@ -381,7 +381,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -441,19 +441,15 @@ def predict( def cluster( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Cluster the input with the trained model. - Warnings - -------- - The method cluster is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -465,9 +461,6 @@ def cluster( Clustering results. """ - logger.warning( - "🚨DeprecationWarning: The method cluster is deprecated. Please use `predict` instead." - ) - result_dict = self.predict(X, file_type) + result_dict = self.predict(test_set, file_type=file_type) return result_dict["clustering"] diff --git a/pypots/clustering/vader/data.py b/pypots/clustering/vader/data.py index 21b27024..fdfdb497 100644 --- a/pypots/clustering/vader/data.py +++ b/pypots/clustering/vader/data.py @@ -19,7 +19,7 @@ class DatasetForVaDER(BaseDataset): data : The dataset for model input, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for input, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/clustering/vader/model.py b/pypots/clustering/vader/model.py index 3a895ca8..cfc85f97 100644 --- a/pypots/clustering/vader/model.py +++ b/pypots/clustering/vader/model.py @@ -394,7 +394,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -493,19 +493,15 @@ def func_to_apply( def cluster( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> Union[np.ndarray]: """Cluster the input with the trained model. - Warnings - -------- - The method cluster is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -517,9 +513,6 @@ def cluster( Clustering results. """ - logger.warning( - "🚨DeprecationWarning: The method cluster is deprecated. Please use `predict` instead." - ) - result_dict = self.predict(X, file_type) + result_dict = self.predict(test_set, file_type=file_type) return result_dict["clustering"] diff --git a/pypots/data/dataset/base.py b/pypots/data/dataset/base.py index 9388c351..b2cbbbf7 100644 --- a/pypots/data/dataset/base.py +++ b/pypots/data/dataset/base.py @@ -255,12 +255,17 @@ def _check_array_input( Parameters ---------- X : - Time-series data that must have a shape like [n_samples, expected_n_steps, expected_n_features]. + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), + n_features], or a path string locating a data file, e.g. h5 file. X_ori : If X is with artificial missingness, X_ori is the original X without artificial missing values. It must have the same shape as X. If X_ori is with original missing values, should be left as NaN. + X_pred : + The forecasting results of X , should be array-like of shape [n_samples, sequence length (n_steps), + n_features], or a path string locating a data file, e.g. h5 file. + y : Labels of time-series samples (X) that must have a shape like [n_samples] or [n_samples, n_classes]. diff --git a/pypots/forecasting/base.py b/pypots/forecasting/base.py index f1ef2e8f..0b8a153d 100644 --- a/pypots/forecasting/base.py +++ b/pypots/forecasting/base.py @@ -76,7 +76,7 @@ def fit( train_set : The dataset for model training, should be a dictionary including the key 'X', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for training, can contain missing values. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains key-value pairs like a dict, and it has to include the key 'X'. @@ -84,7 +84,7 @@ def fit( val_set : The dataset for model validating, should be a dictionary including the key 'X', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validation, can contain missing values. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains key-value pairs like a dict, and it has to include the key 'X'. @@ -106,26 +106,26 @@ def predict( @abstractmethod def forecast( self, - X: dict or str, + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Forecast the future the input with the trained model. Parameters ---------- - X : - Time-series data containing missing values. Shape [n_samples, sequence length (time steps), n_features]. + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), + n_features], or a path string locating a data file, e.g. h5 file. file_type : The type of the given file if X is a path string. Returns ------- - array-like, shape [n_samples, prediction_horizon, n_features], + array-like, shape [n_samples, n_pred_steps, n_features], Forecasting results. """ - # this is for old API compatibility, will be removed in the future. - # Please implement predict() instead. + raise NotImplementedError @@ -389,7 +389,7 @@ def fit( train_set : The dataset for model training, should be a dictionary including the key 'X', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for training, can contain missing values. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains key-value pairs like a dict, and it has to include the key 'X'. @@ -397,7 +397,7 @@ def fit( val_set : The dataset for model validating, should be a dictionary including the key 'X', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validation, can contain missing values. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains key-value pairs like a dict, and it has to include the key 'X'. @@ -419,28 +419,23 @@ def predict( @abstractmethod def forecast( self, - X: dict or str, + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Forecast the future the input with the trained model. - Warnings - -------- - The method forecast is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - Time-series data containing missing values. Shape [n_samples, sequence length (time steps), n_features]. + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), + n_features], or a path string locating a data file, e.g. h5 file. file_type : The type of the given file if X is a path string. Returns ------- - array-like, shape [n_samples, prediction_horizon, n_features], + array-like, shape [n_samples, n_pred_steps, n_features], Forecasting results. """ - # this is for old API compatibility, will be removed in the future. - # Please implement predict() instead. raise NotImplementedError diff --git a/pypots/forecasting/bttf/model.py b/pypots/forecasting/bttf/model.py index 40d5e714..af900f10 100644 --- a/pypots/forecasting/bttf/model.py +++ b/pypots/forecasting/bttf/model.py @@ -15,7 +15,6 @@ from .core import BTTF_forecast from ..base import BaseForecaster -from ...utils.logging import logger class BTTF(BaseForecaster): @@ -131,31 +130,25 @@ def predict( def forecast( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: - """Forecast the future the input with the trained model. - - Warnings - -------- - The method forecast is deprecated. Please use `predict()` instead. + """Forecast the future of the input with the trained model. Parameters ---------- - X : - Time-series data containing missing values. Shape [n_samples, sequence length (time steps), n_features]. + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), + n_features], or a path string locating a data file, e.g. h5 file. file_type : The type of the given file if X is a path string. Returns ------- - array-like, shape [n_samples, prediction_horizon, n_features], + array-like, shape [n_samples, n_pred_steps, n_features], Forecasting results. """ - logger.warning( - "🚨DeprecationWarning: The method forecast is deprecated. Please use `predict` instead." - ) - result_dict = self.predict(X, file_type=file_type) - forecasting = result_dict["forecasting"] - return forecasting + + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["forecasting"] diff --git a/pypots/forecasting/csdi/model.py b/pypots/forecasting/csdi/model.py index 21db4719..77d32d86 100644 --- a/pypots/forecasting/csdi/model.py +++ b/pypots/forecasting/csdi/model.py @@ -398,7 +398,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -458,19 +458,15 @@ def predict( def forecast( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: - """Impute missing values in the given data with the trained model. - - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. + """Forecast the future of the input with the trained model. Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -478,11 +474,9 @@ def forecast( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], - Imputed data. + array-like, shape [n_samples, n_pred_steps, n_features], + Forecasting results. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["forecasting"] + + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["forecasting"] diff --git a/pypots/imputation/autoformer/model.py b/pypots/imputation/autoformer/model.py index fb695885..dcdc8b64 100644 --- a/pypots/imputation/autoformer/model.py +++ b/pypots/imputation/autoformer/model.py @@ -19,7 +19,6 @@ from ...data.dataset import BaseDataset from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class Autoformer(BaseNNImputer): @@ -249,7 +248,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -297,19 +296,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -317,12 +312,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/base.py b/pypots/imputation/base.py index 18218c3e..f08d310f 100644 --- a/pypots/imputation/base.py +++ b/pypots/imputation/base.py @@ -77,7 +77,7 @@ def fit( train_set : The dataset for model training, should be a dictionary including the key 'X', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for training, can contain missing values. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains key-value pairs like a dict, and it has to include the key 'X'. @@ -85,7 +85,7 @@ def fit( val_set : The dataset for model validating, should be a dictionary including the key 'X', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains key-value pairs like a dict, and it has to include the key 'X'. @@ -107,15 +107,15 @@ def predict( @abstractmethod def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -123,11 +123,10 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - # this is for old API compatibility, will be removed in the future. - # Please implement predict() instead. + raise NotImplementedError @@ -392,7 +391,7 @@ def fit( train_set : The dataset for model training, should be a dictionary including the key 'X', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for training, can contain missing values. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains key-value pairs like a dict, and it has to include the key 'X'. @@ -400,7 +399,7 @@ def fit( val_set : The dataset for model validating, should be a dictionary including the key 'X', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains key-value pairs like a dict, and it has to include the key 'X'. @@ -422,19 +421,15 @@ def predict( @abstractmethod def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -442,9 +437,8 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - # this is for old API compatibility, will be removed in the future. - # Please implement predict() instead. + raise NotImplementedError diff --git a/pypots/imputation/brits/data.py b/pypots/imputation/brits/data.py index 72915017..285bea47 100644 --- a/pypots/imputation/brits/data.py +++ b/pypots/imputation/brits/data.py @@ -22,7 +22,7 @@ class DatasetForBRITS(BaseDataset): data : The dataset for model input, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for input, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/imputation/brits/model.py b/pypots/imputation/brits/model.py index 7c382402..5f1676cf 100644 --- a/pypots/imputation/brits/model.py +++ b/pypots/imputation/brits/model.py @@ -18,7 +18,6 @@ from ...data.checking import key_in_data_set from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class BRITS(BaseNNImputer): @@ -251,19 +250,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -271,11 +266,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/crossformer/model.py b/pypots/imputation/crossformer/model.py index cd248096..7db9aaba 100644 --- a/pypots/imputation/crossformer/model.py +++ b/pypots/imputation/crossformer/model.py @@ -19,7 +19,6 @@ from ...data.dataset import BaseDataset from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class Crossformer(BaseNNImputer): @@ -255,7 +254,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -303,19 +302,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -323,12 +318,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/csdi/model.py b/pypots/imputation/csdi/model.py index fca7d554..33c1535b 100644 --- a/pypots/imputation/csdi/model.py +++ b/pypots/imputation/csdi/model.py @@ -382,7 +382,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -435,19 +435,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -455,11 +451,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/dlinear/model.py b/pypots/imputation/dlinear/model.py index 5eb969bb..3721eead 100644 --- a/pypots/imputation/dlinear/model.py +++ b/pypots/imputation/dlinear/model.py @@ -19,7 +19,6 @@ from ...data.dataset import BaseDataset from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class DLinear(BaseNNImputer): @@ -226,7 +225,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -274,19 +273,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -294,12 +289,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/etsformer/model.py b/pypots/imputation/etsformer/model.py index 6a6fae30..6dbb2fbc 100644 --- a/pypots/imputation/etsformer/model.py +++ b/pypots/imputation/etsformer/model.py @@ -19,7 +19,6 @@ from ...data.dataset import BaseDataset from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class ETSformer(BaseNNImputer): @@ -249,7 +248,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -297,19 +296,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -317,12 +312,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/fedformer/model.py b/pypots/imputation/fedformer/model.py index ea750e9b..2d8ca073 100644 --- a/pypots/imputation/fedformer/model.py +++ b/pypots/imputation/fedformer/model.py @@ -19,7 +19,6 @@ from ...data.dataset import BaseDataset from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class FEDformer(BaseNNImputer): @@ -263,7 +262,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -311,19 +310,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -331,12 +326,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/gpvae/data.py b/pypots/imputation/gpvae/data.py index 66665c9e..af61ace3 100644 --- a/pypots/imputation/gpvae/data.py +++ b/pypots/imputation/gpvae/data.py @@ -20,7 +20,7 @@ class DatasetForGPVAE(BaseDataset): data : The dataset for model input, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for input, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/imputation/gpvae/model.py b/pypots/imputation/gpvae/model.py index 5e8058bb..0af6a73d 100644 --- a/pypots/imputation/gpvae/model.py +++ b/pypots/imputation/gpvae/model.py @@ -404,7 +404,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -454,20 +454,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", - n_sampling_times: int = 1, ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -475,13 +470,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict( - X, file_type=file_type, n_sampling_times=n_sampling_times - ) + + results_dict = self.predict(test_set, file_type=file_type) return results_dict["imputation"] diff --git a/pypots/imputation/informer/model.py b/pypots/imputation/informer/model.py index 457a383e..85b2b1be 100644 --- a/pypots/imputation/informer/model.py +++ b/pypots/imputation/informer/model.py @@ -19,7 +19,6 @@ from ...data.dataset import BaseDataset from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class Informer(BaseNNImputer): @@ -243,7 +242,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -291,19 +290,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -311,12 +306,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/locf/model.py b/pypots/imputation/locf/model.py index b88e9e7a..d20ebcfc 100644 --- a/pypots/imputation/locf/model.py +++ b/pypots/imputation/locf/model.py @@ -15,7 +15,6 @@ from .core import locf_numpy, locf_torch from ..base import BaseImputer -from ...utils.logging import logger class LOCF(BaseImputer): @@ -82,7 +81,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -127,19 +126,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -147,11 +142,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/mean/model.py b/pypots/imputation/mean/model.py index 33582f8d..129f15ec 100644 --- a/pypots/imputation/mean/model.py +++ b/pypots/imputation/mean/model.py @@ -14,7 +14,6 @@ import torch from ..base import BaseImputer -from ...utils.logging import logger class Mean(BaseImputer): @@ -56,7 +55,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -113,19 +112,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -133,11 +128,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/median/model.py b/pypots/imputation/median/model.py index 6295aa5f..ffa315e4 100644 --- a/pypots/imputation/median/model.py +++ b/pypots/imputation/median/model.py @@ -14,7 +14,6 @@ import torch from ..base import BaseImputer -from ...utils.logging import logger class Median(BaseImputer): @@ -56,7 +55,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -114,19 +113,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -134,11 +129,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/mrnn/data.py b/pypots/imputation/mrnn/data.py index 2d9a39d7..624efa3c 100644 --- a/pypots/imputation/mrnn/data.py +++ b/pypots/imputation/mrnn/data.py @@ -22,7 +22,7 @@ class DatasetForMRNN(BaseDataset): data : The dataset for model input, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for input, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/imputation/mrnn/model.py b/pypots/imputation/mrnn/model.py index 46f35a03..e3527432 100644 --- a/pypots/imputation/mrnn/model.py +++ b/pypots/imputation/mrnn/model.py @@ -19,7 +19,6 @@ from ...data.checking import key_in_data_set from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class MRNN(BaseNNImputer): @@ -253,19 +252,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -273,11 +268,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/patchtst/model.py b/pypots/imputation/patchtst/model.py index 5d3f4bf4..f4033a49 100644 --- a/pypots/imputation/patchtst/model.py +++ b/pypots/imputation/patchtst/model.py @@ -280,7 +280,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -328,19 +328,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -348,12 +344,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/saits/data.py b/pypots/imputation/saits/data.py index ebc1b163..d8893207 100644 --- a/pypots/imputation/saits/data.py +++ b/pypots/imputation/saits/data.py @@ -23,7 +23,7 @@ class DatasetForSAITS(BaseDataset): data : The dataset for model input, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for input, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/imputation/saits/model.py b/pypots/imputation/saits/model.py index a1ce27cb..ad0fd97b 100644 --- a/pypots/imputation/saits/model.py +++ b/pypots/imputation/saits/model.py @@ -286,7 +286,7 @@ def predict( test_set : The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -370,19 +370,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -390,12 +386,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/timesnet/model.py b/pypots/imputation/timesnet/model.py index 846a6c5a..34aba691 100644 --- a/pypots/imputation/timesnet/model.py +++ b/pypots/imputation/timesnet/model.py @@ -15,11 +15,10 @@ from .core import _TimesNet from .data import DatasetForTimesNet from ..base import BaseNNImputer -from ...data.dataset import BaseDataset from ...data.checking import key_in_data_set +from ...data.dataset import BaseDataset from ...optim.adam import Adam from ...optim.base import Optimizer -from ...utils.logging import logger class TimesNet(BaseNNImputer): @@ -239,7 +238,7 @@ def predict( test_set : dict or str The dataset for model validating, should be a dictionary including keys as 'X', or a path string locating a data file supported by PyPOTS (e.g. h5 file). - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for validating, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains @@ -287,19 +286,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -307,12 +302,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/transformer/model.py b/pypots/imputation/transformer/model.py index 047caccc..76d04fac 100644 --- a/pypots/imputation/transformer/model.py +++ b/pypots/imputation/transformer/model.py @@ -296,19 +296,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -316,11 +312,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"] diff --git a/pypots/imputation/usgan/data.py b/pypots/imputation/usgan/data.py index 9c50bffb..570ff6a2 100644 --- a/pypots/imputation/usgan/data.py +++ b/pypots/imputation/usgan/data.py @@ -18,7 +18,7 @@ class DatasetForUSGAN(DatasetForBRITS): data : The dataset for model input, should be a dictionary including keys as 'X' and 'y', or a path string locating a data file. - If it is a dict, X should be array-like of shape [n_samples, sequence length (time steps), n_features], + If it is a dict, X should be array-like of shape [n_samples, sequence length (n_steps), n_features], which is time-series data for input, can contain missing values, and y should be array-like of shape [n_samples], which is classification labels of X. If it is a path string, the path should point to a data file, e.g. a h5 file, which contains diff --git a/pypots/imputation/usgan/model.py b/pypots/imputation/usgan/model.py index 7a4aeded..69f3bdd1 100644 --- a/pypots/imputation/usgan/model.py +++ b/pypots/imputation/usgan/model.py @@ -437,19 +437,15 @@ def predict( def impute( self, - X: Union[dict, str], + test_set: Union[dict, str], file_type: str = "hdf5", ) -> np.ndarray: """Impute missing values in the given data with the trained model. - Warnings - -------- - The method impute is deprecated. Please use `predict()` instead. - Parameters ---------- - X : - The data samples for testing, should be array-like of shape [n_samples, sequence length (time steps), + test_set : + The data samples for testing, should be array-like of shape [n_samples, sequence length (n_steps), n_features], or a path string locating a data file, e.g. h5 file. file_type : @@ -457,11 +453,9 @@ def impute( Returns ------- - array-like, shape [n_samples, sequence length (time steps), n_features], + array-like, shape [n_samples, sequence length (n_steps), n_features], Imputed data. """ - logger.warning( - "🚨DeprecationWarning: The method impute is deprecated. Please use `predict` instead." - ) - results_dict = self.predict(X, file_type=file_type) - return results_dict["imputation"] + + result_dict = self.predict(test_set, file_type=file_type) + return result_dict["imputation"]