Skip to content

Commit

Permalink
Optional normalization for learning to rank. (#10094)
Browse files Browse the repository at this point in the history
  • Loading branch information
trivialfis authored Mar 8, 2024
1 parent bc51619 commit e14c3b9
Show file tree
Hide file tree
Showing 8 changed files with 44 additions and 5 deletions.
6 changes: 5 additions & 1 deletion doc/parameter.rst
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,11 @@ These are parameters specific to learning to rank task. See :doc:`Learning to Ra

It specifies the number of pairs sampled for each document when pair method is ``mean``, or the truncation level for queries when the pair method is ``topk``. For example, to train with ``ndcg@6``, set ``lambdarank_num_pair_per_sample`` to :math:`6` and ``lambdarank_pair_method`` to ``topk``.

* ``lambdarank_unbiased`` [default = ``false``]
* ``lambdarank_normalization`` [default = ``true``]

Whether to normalize the leaf value by lambda gradient. This can sometimes stagnate the training progress.

* ``lambdarank_unbiased`` [default = ``false``]

Specify whether do we need to debias input click data.

Expand Down
5 changes: 3 additions & 2 deletions doc/tutorials/learning_to_rank.rst
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ Notice that the samples are sorted based on their query index in a non-decreasin
import xgboost as xgb
# Make a synthetic ranking dataset for demonstration
seed = 1994
seed = 1994
X, y = make_classification(random_state=seed)
rng = np.random.default_rng(seed)
n_query_groups = 3
Expand Down Expand Up @@ -146,7 +146,8 @@ The consideration of effective pairs also applies to the choice of pair method (

When using the mean strategy for generating pairs, where the target metric (like ``NDCG``) is computed over the whole query list, users can specify how many pairs should be generated per each document, by setting the ``lambdarank_num_pair_per_sample``. XGBoost will randomly sample ``lambdarank_num_pair_per_sample`` pairs for each element in the query group (:math:`|pairs| = |query| \times num\_pairsample`). Often, setting it to 1 can produce reasonable results. In cases where performance is inadequate due to insufficient number of effective pairs being generated, set ``lambdarank_num_pair_per_sample`` to a higher value. As more document pairs are generated, more effective pairs will be generated as well.

On the other hand, if you are prioritizing the top :math:`k` documents, the ``lambdarank_num_pair_per_sample`` should be set slightly higher than :math:`k` (with a few more documents) to obtain a good training result.
On the other hand, if you are prioritizing the top :math:`k` documents, the ``lambdarank_num_pair_per_sample`` should be set slightly higher than :math:`k` (with a few more documents) to obtain a good training result. Lastly, XGBoost employs additional regularization for learning to rank objectives, which can be disabled by setting the ``lambdarank_normalization`` to ``False``.


**Summary** If you have large amount of training data:

Expand Down
18 changes: 18 additions & 0 deletions python-package/xgboost/testing/ranking.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,3 +100,21 @@ def run_ranking_categorical(device: str) -> None:
scores = cross_val_score(ltr, X, y)
for s in scores:
assert s > 0.7


def run_normalization(device: str) -> None:
"""Test normalization."""
X, y, qid, _ = tm.make_ltr(2048, 4, 64, 3)
ltr = xgb.XGBRanker(objective="rank:pairwise", n_estimators=4, device=device)
ltr.fit(X, y, qid=qid, eval_set=[(X, y)], eval_qid=[qid])
e0 = ltr.evals_result()

ltr = xgb.XGBRanker(
objective="rank:pairwise",
n_estimators=4,
device=device,
lambdarank_normalization=False,
)
ltr.fit(X, y, qid=qid, eval_set=[(X, y)], eval_qid=[qid])
e1 = ltr.evals_result()
assert e1["validation_0"]["ndcg@32"][-1] > e0["validation_0"]["ndcg@32"][-1]
5 changes: 5 additions & 0 deletions src/common/ranking_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> {

// unbiased
bool lambdarank_unbiased{false};
bool lambdarank_normalization{true};
double lambdarank_bias_norm{1.0};
// ndcg
bool ndcg_exp_gain{true};
Expand All @@ -86,6 +87,7 @@ struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> {
return lambdarank_pair_method == that.lambdarank_pair_method &&
lambdarank_num_pair_per_sample == that.lambdarank_num_pair_per_sample &&
lambdarank_unbiased == that.lambdarank_unbiased &&
lambdarank_normalization == that.lambdarank_normalization &&
lambdarank_bias_norm == that.lambdarank_bias_norm && ndcg_exp_gain == that.ndcg_exp_gain;
}
bool operator!=(LambdaRankParam const& that) const { return !(*this == that); }
Expand Down Expand Up @@ -134,6 +136,9 @@ struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> {
DMLC_DECLARE_FIELD(lambdarank_unbiased)
.set_default(false)
.describe("Unbiased lambda mart. Use extended IPW to debias click position");
DMLC_DECLARE_FIELD(lambdarank_normalization)
.set_default(true)
.describe("Whether to normalize the leaf value for lambda rank.");
DMLC_DECLARE_FIELD(lambdarank_bias_norm)
.set_default(1.0)
.set_lower_bound(0.0)
Expand Down
2 changes: 1 addition & 1 deletion src/objective/lambdarank_obj.cc
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ class LambdaRankObj : public FitIntercept {
};

MakePairs(ctx_, iter, p_cache_, g, g_label, g_rank, loop);
if (sum_lambda > 0.0) {
if (sum_lambda > 0.0 && param_.lambdarank_normalization) {
double norm = std::log2(1.0 + sum_lambda) / sum_lambda;
std::transform(g_gpair.Values().data(), g_gpair.Values().data() + g_gpair.Size(),
g_gpair.Values().data(), [norm](GradientPair const& g) { return g * norm; });
Expand Down
3 changes: 2 additions & 1 deletion src/objective/lambdarank_obj.cu
Original file line number Diff line number Diff line change
Expand Up @@ -266,12 +266,13 @@ void CalcGrad(Context const* ctx, MetaInfo const& info, std::shared_ptr<ltr::Ran
*/
auto d_weights = common::MakeOptionalWeights(ctx, info.weights_);
auto w_norm = p_cache->WeightNorm();
auto norm = p_cache->Param().lambdarank_normalization;
thrust::for_each_n(ctx->CUDACtx()->CTP(), thrust::make_counting_iterator(0ul), d_gpair.Size(),
[=] XGBOOST_DEVICE(std::size_t i) mutable {
auto g = dh::SegmentId(d_gptr, i);
auto sum_lambda = thrust::get<2>(d_max_lambdas[g]);
// Normalization
if (sum_lambda > 0.0) {
if (sum_lambda > 0.0 && norm) {
double norm = std::log2(1.0 + sum_lambda) / sum_lambda;
d_gpair(i, 0) *= norm;
}
Expand Down
5 changes: 5 additions & 0 deletions tests/python-gpu/test_gpu_ranking.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

import xgboost
from xgboost import testing as tm
from xgboost.testing.ranking import run_normalization

pytestmark = tm.timeout(30)

Expand Down Expand Up @@ -126,3 +127,7 @@ def test_with_mq2008(objective, metric) -> None:
dtest = xgboost.DMatrix(x_test, y_test, qid=qid_test)

comp_training_with_rank_objective(dtrain, dtest, objective, metric)


def test_normalization() -> None:
run_normalization("cuda")
5 changes: 5 additions & 0 deletions tests/python/test_ranking.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from xgboost import testing as tm
from xgboost.testing.data import RelDataCV, simulate_clicks, sort_ltr_samples
from xgboost.testing.params import lambdarank_parameter_strategy
from xgboost.testing.ranking import run_normalization


def test_ndcg_custom_gain():
Expand Down Expand Up @@ -188,6 +189,10 @@ def after_training(self, model) -> bool:
assert df["ti+"].iloc[-1] < df["ti+"].iloc[0]


def test_normalization() -> None:
run_normalization("cpu")


class TestRanking:
@classmethod
def setup_class(cls):
Expand Down

0 comments on commit e14c3b9

Please sign in to comment.