From a341d2045795af084adf0448d227eca3d5d48044 Mon Sep 17 00:00:00 2001 From: Ashish Peruri Date: Tue, 6 Aug 2024 15:48:24 -0700 Subject: [PATCH] fix CI --- mabwiser/mab.py | 4 ++-- tests/test_invalid.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mabwiser/mab.py b/mabwiser/mab.py index 7d8f95e..d2aa4a6 100755 --- a/mabwiser/mab.py +++ b/mabwiser/mab.py @@ -1295,7 +1295,7 @@ def _validate_mab_args(arms, learning_policy, neighborhood_policy, seed, n_jobs, check_true(isinstance(arms, list), TypeError("The arms should be provided in a list.")) check_false(None in arms, ValueError("The arm list cannot contain None.")) check_false(np.nan in arms, ValueError("The arm list cannot contain NaN.")) - check_false(np.Inf in arms, ValueError("The arm list cannot contain Infinity.")) + check_false(np.inf in arms, ValueError("The arm list cannot contain Infinity.")) check_true(len(arms) == len(set(arms)), ValueError("The list of arms cannot contain duplicate values.")) # Learning Policy type @@ -1405,7 +1405,7 @@ def _validate_arm(arm): """ check_false(arm is None, ValueError("The arm cannot be None.")) check_false(np.nan in [arm], ValueError("The arm cannot be NaN.")) - check_false(np.Inf in [arm], ValueError("The arm cannot be Infinity.")) + check_false(np.inf in [arm], ValueError("The arm cannot be Infinity.")) @staticmethod def _convert_array(array_like) -> np.ndarray: diff --git a/tests/test_invalid.py b/tests/test_invalid.py index 4bfcb09..e0564f4 100755 --- a/tests/test_invalid.py +++ b/tests/test_invalid.py @@ -476,14 +476,14 @@ def test_rewards_null_df(self): def test_rewards_inf_array(self): decisions = np.asarray([1, 1, 1, 2, 2, 2, 3, 3, 3]) - rewards = np.asarray([0, 0, 0, 0, 0, 0, 1, 1, np.Inf]) + rewards = np.asarray([0, 0, 0, 0, 0, 0, 1, 1, np.inf]) mab = MAB([1, 2, 3], LearningPolicy.EpsilonGreedy(epsilon=0)) with self.assertRaises(TypeError): mab.fit(decisions, rewards) def test_rewards_inf_df(self): history = pd.DataFrame({'decision': [1, 1, 1, 2, 2, 2, 3, 3, 3], - 'reward': [0, 0, 0, 0, 0, 0, 1, 1, np.Inf]}) + 'reward': [0, 0, 0, 0, 0, 0, 1, 1, np.inf]}) mab = MAB([1, 2, 3], LearningPolicy.EpsilonGreedy(epsilon=0)) with self.assertRaises(TypeError): mab.fit(history['decision'], history['reward']) @@ -562,7 +562,7 @@ def test_invalid_add_arm(self): with self.assertRaises(ValueError): mab.add_arm(np.nan) with self.assertRaises(ValueError): - mab.add_arm(np.Inf) + mab.add_arm(np.inf) with self.assertRaises(ValueError): mab.add_arm(3)