diff --git a/examples/mqoptimizer.py b/examples/mqoptimizer.py index b5c6960..edb83c2 100644 --- a/examples/mqoptimizer.py +++ b/examples/mqoptimizer.py @@ -19,7 +19,7 @@ model = "lightgbm" # Options: "lightgbm" or "xgboost" # Set objective function -objective = "check" # Options: "check", "huber", or "phuber" +objective = "check" # Options: "check", "huber", or "approx" # Set dataset train_dataset = MQDataset(data=x, label=y, alphas=alphas, model=model) diff --git a/examples/mqregressor.py b/examples/mqregressor.py index 6e258c4..2dfa811 100644 --- a/examples/mqregressor.py +++ b/examples/mqregressor.py @@ -16,8 +16,8 @@ model = "lightgbm" # Options: "lightgbm" or "xgboost" # Set objective function -objective = "huber" # Options: "check", "huber", or "phuber" -delta = 0.01 # Set when objective is "huber" or "phuber", default is 0.01 +objective = "huber" # Options: "check", "huber", or "approx" +delta = 0.01 # Set when objective is "huber", default is 0.01 # Train the model with fixed parameters # Initialize the LightGBM-based quantile regressor diff --git a/mqboost/__init__.py b/mqboost/__init__.py index 9953caa..02b8039 100644 --- a/mqboost/__init__.py +++ b/mqboost/__init__.py @@ -1,3 +1,4 @@ +# flake8: noqa from mqboost.dataset import MQDataset from mqboost.optimize import MQOptimizer from mqboost.regressor import MQRegressor diff --git a/mqboost/objective.py b/mqboost/objective.py index 900effc..3de5c5f 100644 --- a/mqboost/objective.py +++ b/mqboost/objective.py @@ -46,29 +46,29 @@ def _rho(u: np.ndarray, alpha: float) -> np.ndarray: return -u * _grad_rho(u=u, alpha=alpha) -def _grad_majorizer(u: np.ndarray, alpha: float, epsilon: float = 1e-5): +def _grad_approx(u: np.ndarray, alpha: float, epsilon: float = 1e-5): """ - Compute the gradient of the majorizer of the smooth approximated check loss function. + Compute the gradient of the approx of the smooth approximated check loss function. Args: u (np.ndarray): The error term. alpha (float): The quantile level. epsilon (float, optional): The perturbation imposing smoothness. Defaults to 1e-5. Returns: - np.ndarray: The gradient of the majorizer of the smooth approximated check loss function. + np.ndarray: The gradient of the approx of the smooth approximated check loss function. """ - _grad = (1 - 2 * alpha - u / (epsilon + np.abs(u))) / 2 + _grad = 0.5 * (1 - 2 * alpha - u / (epsilon + np.abs(u))) return _grad -def _hess_majorizer(u: np.ndarray, alpha: float, epsilon: float = 1e-5): +def _hess_approx(u: np.ndarray, alpha: float, epsilon: float = 1e-5): """ - Compute the Hessian of the majorizer of the smooth approximated check loss function. + Compute the Hessian of the approx of the smooth approximated check loss function. Args: u (np.ndarray): The error term. alpha (float): The quantile level. epsilon (float, optional): The perturbation imposing smoothness. Defaults to 1e-5. Returns: - np.ndarray: The Hessian of the majorizer of the smooth approximated check loss function. + np.ndarray: The Hessian of the approx of the smooth approximated check loss function. """ _hess = 1 / (2 * (epsilon + np.abs(u))) return _hess @@ -163,8 +163,8 @@ def _compute_grads_hess( huber_loss_grad_hess: Callable = partial( _compute_grads_hess, grad_fn=_grad_huber, hess_fn=_hess_rho ) -majorizer_loss_grad_hess: Callable = partial( - _compute_grads_hess, grad_fn=_grad_majorizer, hess_fn=_hess_majorizer +approx_loss_grad_hess: Callable = partial( + _compute_grads_hess, grad_fn=_grad_approx, hess_fn=_hess_approx ) @@ -255,6 +255,7 @@ def __init__( objective: ObjectiveName, model: ModelName, delta: float, + epsilon: float, ) -> None: """Initialize the MQObjective.""" if objective == ObjectiveName.huber: @@ -263,10 +264,7 @@ def __init__( elif objective == ObjectiveName.check: self._fobj = partial(check_loss_grad_hess, alphas=alphas) elif objective == ObjectiveName.approx: - self._delta = delta_validate(delta=delta) - self._fobj = partial( - majorizer_loss_grad_hess, alphas=alphas, epsilon=self._epsilon - ) + self._fobj = partial(approx_loss_grad_hess, alphas=alphas, epsilon=epsilon) self._eval_name = CHECK_LOSS if model == ModelName.lightgbm: diff --git a/mqboost/regressor.py b/mqboost/regressor.py index ebca184..075352f 100644 --- a/mqboost/regressor.py +++ b/mqboost/regressor.py @@ -45,7 +45,7 @@ def __init__( model: str = ModelName.lightgbm.value, objective: str = ObjectiveName.check.value, delta: float = 0.01, - epsilon: float = 1e-5 + epsilon: float = 1e-5, ) -> None: """Initialize the MQRegressor.""" self._params = params @@ -81,6 +81,7 @@ def fit( objective=self._objective, model=self._model, delta=self._delta, + epsilon=self._epsilon, ) if self.__is_lgb: params.update({MQStr.obj.value: self._MQObj.fobj})