Skip to content

Commit

Permalink
update objective
Browse files Browse the repository at this point in the history
  • Loading branch information
RektPunk committed Jul 11, 2024
1 parent 96591b0 commit 75c4cc0
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 83 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,10 @@ monotonic_quantile_xgb = QuantileRegressorXgb(
y=y_test,
alphas=alphas
)
params = {
xgb_params = {
"learning_rate": 0.65,
"max_depth": 10,
}
monotonic_quantile_xgb.train(params=params)
monotonic_quantile_xgb.train(params=xgb_params)
preds_xgb = monotonic_quantile_xgb.predict(x=x_test, alphas=alphas)
```
8 changes: 5 additions & 3 deletions quantile_tree/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ class MonotoneQuantileRegressor:
alphas: AlphaLike
objective: ObjectiveName
_model_name: ModelName
**kwargs: Any
Methods
-------
Expand All @@ -45,17 +46,18 @@ def __init__(
alphas: AlphaLike,
objective: ObjectiveName,
_model_name: ModelName,
**kwargs,
**kwargs: Any,
):
"""
Set objevtive, dataset
Set objective, dataset
Args:
x (XdataLike)
y (YdataLike)
alphas (AlphaLike)
objective (ObjectiveName)
_model_name (ModelName)
**kwargs (Any)
"""
alphas = alpha_validate(alphas)
self._model_name = _model_name
Expand All @@ -69,7 +71,7 @@ def __init__(
)
else:
self.fobj = partial(OBJECTIVE_FUNC.get(objective), alphas=alphas)
# self.feval = partial(check_loss_eval, alphas=alphas)
# self.feval = partial(check_loss_eval, alphas=alphas) #TODO
self.dataset = TRAIN_DATASET_FUNC.get(self._model_name)(
data=self.x_train, label=self.y_train
)
Expand Down
18 changes: 11 additions & 7 deletions quantile_tree/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,16 @@

class QuantileRegressorLgb(MonotoneQuantileRegressor):
"""
Monotone quantile regressor which preserving monotonicity among quantiles
Monotone quantile regressor which preserving monotonicity among quantiles with LightGBM
Attributes
----------
x: XdataLike
y: YdataLike
alphas: AlphaLike
objective: ObjectiveName:
Determine objective function. default = "check"
objective: ObjectiveName
Determine objective function. options: "check" (default), "huber"
If objective is "huber", you can set "delta" (default = 0.05)
**kwargs: Any
Methods
-------
Expand All @@ -36,7 +37,7 @@ def __init__(
y: YdataLike,
alphas: AlphaLike,
objective: ObjectiveName = ObjectiveName.check,
**kwargs,
**kwargs: Any,
):
super().__init__(
x=x,
Expand Down Expand Up @@ -69,13 +70,16 @@ def train(self, params: Dict[str, Any]) -> lgb.basic.Booster:

class QuantileRegressorXgb(MonotoneQuantileRegressor):
"""
Monotone quantile regressor which preserving monotonicity among quantiles
Monotone quantile regressor which preserving monotonicity among quantiles with XGBoost
Attributes
----------
x: XdataLike
y: YdataLike
alphas: AlphaLike
objective: ObjectiveName: determine objective. default = "check"
objective: ObjectiveName
Determine objective function. options: "check" (default), "huber"
If objective is "huber", you can set "delta" (default = 0.05)
**kwargs: Any
Methods
-------
Expand All @@ -89,7 +93,7 @@ def __init__(
y: YdataLike,
alphas: AlphaLike,
objective: ObjectiveName = ObjectiveName.check,
**kwargs,
**kwargs: Any,
):
super().__init__(
x=x,
Expand Down
88 changes: 17 additions & 71 deletions quantile_tree/objective.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from typing import List, Tuple, Union
import numpy as np
from typing import Any, Callable, List, Tuple, Union
from functools import partial

import numpy as np
import lightgbm as lgb
import xgboost as xgb

Expand Down Expand Up @@ -37,89 +38,34 @@ def _train_pred_reshape(
return _y_train.reshape(len_alpha, -1), y_pred.reshape(len_alpha, -1)


def check_loss_grad_hess(
def _compute_grads_hess(
y_pred: np.ndarray,
dtrain: _DtrainLike,
alphas: List[float],
) -> Tuple[np.ndarray, np.ndarray]:
grad_fn: Callable[[np.ndarray, float, Any], np.ndarray],
**kwargs: Any
) -> np.ndarray:
"""
Return gradient and hessin of composite check quanitle loss
Compute gradients for given loss function
Args:
y_train (np.ndarray)
y_pred (np.ndarray)
dtrain (_DtrainLike)
alphas (List[float])
grad_fn (callable)
**kwargs (Any): Additional arguments for grad_fn
Returns:
Tuple[np.ndarray, np.ndarray]:
gradient
hessian
np.ndarray
"""
_len_alpha = len(alphas)
_y_train, _y_pred = _train_pred_reshape(y_pred, dtrain, _len_alpha)
grads = []
for alpha_inx in range(_len_alpha):
for alpha_inx in range(len(alphas)):
_err_for_alpha = _y_train[alpha_inx] - _y_pred[alpha_inx]
_grad = _grad_rho(_err_for_alpha, alphas[alpha_inx])
_grad = grad_fn(u=_err_for_alpha, alpha=alphas[alpha_inx], **kwargs)
grads.append(_grad)
return np.concatenate(grads), np.ones_like(y_pred)

grad = np.concatenate(grads)
hess = np.ones(y_pred.shape)

return grad, hess


def huber_loss_grad_hess(
y_pred: np.ndarray,
dtrain: _DtrainLike,
alphas: List[float],
delta: float,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Return gradient and hessin of composite huber quanitle loss
Args:
y_pred (np.ndarray)
dtrain (_DtrainLike)
alphas (List[float])
delta (float)
Returns:
Tuple[np.ndarray, np.ndarray]:
gradient
hessian
"""
_len_alpha = len(alphas)
_y_train, _y_pred = _train_pred_reshape(y_pred, dtrain, _len_alpha)

grads = []
for alpha_inx in range(_len_alpha):
_err_for_alpha = _y_train[alpha_inx] - _y_pred[alpha_inx]
_grad = _grad_huber(_err_for_alpha, alphas[alpha_inx], delta)
grads.append(_grad)

grad = np.concatenate(grads)
hess = np.ones(y_pred.shape)

return grad, hess


# def check_loss_eval(
# y_pred: np.ndarray,dtrain: _DtrainLike, alphas: List[float],
# ) -> Tuple[str, np.ndarray, bool]:
# """
# Return composite quantile loss
# Args:
# dtrain (_DtrainLike)
# y_pred (np.ndarray)
# alphas (List[float])

# Returns:
# Tuple[str, np.ndarray, bool]
# """
# _len_alpha = len(alphas)
# _y_train, _y_pred = _train_pred_reshape( y_pred, dtrain,_len_alpha)
# loss = []
# for alpha_inx in range(_len_alpha):
# _err_for_alpha = _y_train[alpha_inx] - _y_pred[alpha_inx]
# loss.append(_rho(_err_for_alpha, alphas[alpha_inx]))
# loss = np.concatenate(loss)
# return "loss", loss.mean(), False
check_loss_grad_hess = partial(_compute_grads_hess, grad_fn=_grad_rho)
huber_loss_grad_hess = partial(_compute_grads_hess, grad_fn=_grad_huber)

0 comments on commit 75c4cc0

Please sign in to comment.