Skip to content

Commit

Permalink
update approx
Browse files Browse the repository at this point in the history
  • Loading branch information
RektPunk committed Aug 19, 2024
1 parent 748378c commit 82172cb
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 17 deletions.
2 changes: 1 addition & 1 deletion examples/mqoptimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
model = "lightgbm" # Options: "lightgbm" or "xgboost"

# Set objective function
objective = "check" # Options: "check", "huber", or "phuber"
objective = "check" # Options: "check", "huber", or "approx"

# Set dataset
train_dataset = MQDataset(data=x, label=y, alphas=alphas, model=model)
Expand Down
4 changes: 2 additions & 2 deletions examples/mqregressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
model = "lightgbm" # Options: "lightgbm" or "xgboost"

# Set objective function
objective = "huber" # Options: "check", "huber", or "phuber"
delta = 0.01 # Set when objective is "huber" or "phuber", default is 0.01
objective = "huber" # Options: "check", "huber", or "approx"
delta = 0.01 # Set when objective is "huber", default is 0.01

# Train the model with fixed parameters
# Initialize the LightGBM-based quantile regressor
Expand Down
24 changes: 11 additions & 13 deletions mqboost/objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,29 +46,29 @@ def _rho(u: np.ndarray, alpha: float) -> np.ndarray:
return -u * _grad_rho(u=u, alpha=alpha)


def _grad_majorizer(u: np.ndarray, alpha: float, epsilon: float = 1e-5):
def _grad_approx(u: np.ndarray, alpha: float, epsilon: float = 1e-5):
"""
Compute the gradient of the majorizer of the smooth approximated check loss function.
Compute the gradient of the approx of the smooth approximated check loss function.
Args:
u (np.ndarray): The error term.
alpha (float): The quantile level.
epsilon (float, optional): The perturbation imposing smoothness. Defaults to 1e-5.
Returns:
np.ndarray: The gradient of the majorizer of the smooth approximated check loss function.
np.ndarray: The gradient of the approx of the smooth approximated check loss function.
"""
_grad = (1 - 2 * alpha - u / (epsilon + np.abs(u))) / 2
_grad = 0.5 * (1 - 2 * alpha - u / (epsilon + np.abs(u)))
return _grad


def _hess_majorizer(u: np.ndarray, alpha: float, epsilon: float = 1e-5):
def _hess_approx(u: np.ndarray, alpha: float, epsilon: float = 1e-5):
"""
Compute the Hessian of the majorizer of the smooth approximated check loss function.
Compute the Hessian of the approx of the smooth approximated check loss function.
Args:
u (np.ndarray): The error term.
alpha (float): The quantile level.
epsilon (float, optional): The perturbation imposing smoothness. Defaults to 1e-5.
Returns:
np.ndarray: The Hessian of the majorizer of the smooth approximated check loss function.
np.ndarray: The Hessian of the approx of the smooth approximated check loss function.
"""
_hess = 1 / (2 * (epsilon + np.abs(u)))
return _hess
Expand Down Expand Up @@ -163,8 +163,8 @@ def _compute_grads_hess(
huber_loss_grad_hess: Callable = partial(
_compute_grads_hess, grad_fn=_grad_huber, hess_fn=_hess_rho
)
majorizer_loss_grad_hess: Callable = partial(
_compute_grads_hess, grad_fn=_grad_majorizer, hess_fn=_hess_majorizer
approx_loss_grad_hess: Callable = partial(
_compute_grads_hess, grad_fn=_grad_approx, hess_fn=_hess_approx
)


Expand Down Expand Up @@ -255,6 +255,7 @@ def __init__(
objective: ObjectiveName,
model: ModelName,
delta: float,
epsilon: float,
) -> None:
"""Initialize the MQObjective."""
if objective == ObjectiveName.huber:
Expand All @@ -263,10 +264,7 @@ def __init__(
elif objective == ObjectiveName.check:
self._fobj = partial(check_loss_grad_hess, alphas=alphas)
elif objective == ObjectiveName.approx:
self._delta = delta_validate(delta=delta)
self._fobj = partial(
majorizer_loss_grad_hess, alphas=alphas, epsilon=self._epsilon
)
self._fobj = partial(approx_loss_grad_hess, alphas=alphas, epsilon=epsilon)

self._eval_name = CHECK_LOSS
if model == ModelName.lightgbm:
Expand Down
3 changes: 2 additions & 1 deletion mqboost/regressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def __init__(
model: str = ModelName.lightgbm.value,
objective: str = ObjectiveName.check.value,
delta: float = 0.01,
epsilon: float = 1e-5
epsilon: float = 1e-5,
) -> None:
"""Initialize the MQRegressor."""
self._params = params
Expand Down Expand Up @@ -81,6 +81,7 @@ def fit(
objective=self._objective,
model=self._model,
delta=self._delta,
epsilon=self._epsilon,
)
if self.__is_lgb:
params.update({MQStr.obj.value: self._MQObj.fobj})
Expand Down

0 comments on commit 82172cb

Please sign in to comment.