From 8cad6b5c78ed204fd85a3044ede46a55517ef085 Mon Sep 17 00:00:00 2001 From: James Lamb Date: Sat, 28 Sep 2019 22:17:51 -0500 Subject: [PATCH] updated roxygen files --- R-package/man/getinfo.Rd | 4 +- R-package/man/lgb.Dataset.Rd | 13 +++-- R-package/man/lgb.cv.Rd | 64 +++++++++++++++--------- R-package/man/lgb.dump.Rd | 16 +++--- R-package/man/lgb.get.eval.result.Rd | 25 +++++---- R-package/man/lgb.importance.Rd | 11 ++-- R-package/man/lgb.load.Rd | 16 +++--- R-package/man/lgb.model.dt.tree.Rd | 11 ++-- R-package/man/lgb.plot.importance.Rd | 9 +++- R-package/man/lgb.plot.interpretation.Rd | 24 ++++++--- R-package/man/lgb.prepare.Rd | 7 ++- R-package/man/lgb.prepare2.Rd | 10 +++- R-package/man/lgb.prepare_rules.Rd | 8 ++- R-package/man/lgb.prepare_rules2.Rd | 46 ++++------------- R-package/man/lgb.save.Rd | 16 +++--- R-package/man/lgb.train.Rd | 56 +++++++++++++-------- R-package/man/lgb.unloader.Rd | 26 ++++++---- R-package/man/lgb_shared_params.Rd | 10 ++-- R-package/man/lightgbm.Rd | 28 +++++++---- R-package/man/predict.lgb.Booster.Rd | 34 ++++++++----- R-package/man/readRDS.lgb.Booster.Rd | 16 +++--- R-package/man/saveRDS.lgb.Booster.Rd | 33 ++++++++---- R-package/man/setinfo.Rd | 4 +- R-package/man/slice.Rd | 4 +- 24 files changed, 294 insertions(+), 197 deletions(-) diff --git a/R-package/man/getinfo.Rd b/R-package/man/getinfo.Rd index b5b8112bd2d3..8bdca02ca940 100644 --- a/R-package/man/getinfo.Rd +++ b/R-package/man/getinfo.Rd @@ -12,9 +12,9 @@ getinfo(dataset, ...) \arguments{ \item{dataset}{Object of class \code{lgb.Dataset}} -\item{...}{other parameters} - \item{name}{the name of the information field to get (see details)} + +\item{...}{other parameters} } \value{ info data diff --git a/R-package/man/lgb.Dataset.Rd b/R-package/man/lgb.Dataset.Rd index dd74fdf6626f..1aa7197f8237 100644 --- a/R-package/man/lgb.Dataset.Rd +++ b/R-package/man/lgb.Dataset.Rd @@ -4,9 +4,16 @@ \alias{lgb.Dataset} \title{Construct \code{lgb.Dataset} object} \usage{ -lgb.Dataset(data, params = list(), reference = NULL, colnames = NULL, - categorical_feature = NULL, free_raw_data = TRUE, info = list(), - ...) +lgb.Dataset( + data, + params = list(), + reference = NULL, + colnames = NULL, + categorical_feature = NULL, + free_raw_data = TRUE, + info = list(), + ... +) } \arguments{ \item{data}{a \code{matrix} object, a \code{dgCMatrix} object or a character representing a filename} diff --git a/R-package/man/lgb.cv.Rd b/R-package/man/lgb.cv.Rd index 5be785fa18d2..a419122592ff 100644 --- a/R-package/man/lgb.cv.Rd +++ b/R-package/man/lgb.cv.Rd @@ -4,13 +4,29 @@ \alias{lgb.cv} \title{Main CV logic for LightGBM} \usage{ -lgb.cv(params = list(), data, nrounds = 10, nfold = 3, - label = NULL, weight = NULL, obj = NULL, eval = NULL, - verbose = 1, record = TRUE, eval_freq = 1L, showsd = TRUE, - stratified = TRUE, folds = NULL, init_model = NULL, - colnames = NULL, categorical_feature = NULL, - early_stopping_rounds = NULL, callbacks = list(), - reset_data = FALSE, ...) +lgb.cv( + params = list(), + data, + nrounds = 10, + nfold = 3, + label = NULL, + weight = NULL, + obj = NULL, + eval = NULL, + verbose = 1, + record = TRUE, + eval_freq = 1L, + showsd = TRUE, + stratified = TRUE, + folds = NULL, + init_model = NULL, + colnames = NULL, + categorical_feature = NULL, + early_stopping_rounds = NULL, + callbacks = list(), + reset_data = FALSE, + ... +) } \arguments{ \item{params}{List of parameters} @@ -27,7 +43,7 @@ lgb.cv(params = list(), data, nrounds = 10, nfold = 3, \item{obj}{objective function, can be character or custom objective function. Examples include \code{regression}, \code{regression_l1}, \code{huber}, -\code{binary}, \code{lambdarank}, \code{multiclass}, \code{multiclass}} + \code{binary}, \code{lambdarank}, \code{multiclass}, \code{multiclass}} \item{eval}{evaluation function, can be (list of) character or custom eval function} @@ -54,17 +70,15 @@ the \code{nfold} and \code{stratified} parameters are ignored.} type int represents index, type str represents feature names} -\item{early_stopping_rounds}{int -Activates early stopping. -Requires at least one validation data and one metric -If there's more than one, will check all of them except the training data -Returns the model with (best_iter + early_stopping_rounds) -If early stopping occurs, the model will have 'best_iter' field} +\item{early_stopping_rounds}{int. Activates early stopping. Requires at least one validation data +and one metric. If there's more than one, will check all of them +except the training data. Returns the model with (best_iter + early_stopping_rounds). +If early stopping occurs, the model will have 'best_iter' field.} -\item{callbacks}{list of callback functions -List of callback functions that are applied at each iteration.} +\item{callbacks}{List of callback functions that are applied at each iteration.} -\item{reset_data}{Boolean, setting it to TRUE (not the default value) will transform the booster model into a predictor model which frees up memory and the original datasets} +\item{reset_data}{Boolean, setting it to TRUE (not the default value) will transform the booster model +into a predictor model which frees up memory and the original datasets} \item{...}{other parameters, see Parameters.rst for more information. A few key parameters: \itemize{ @@ -89,11 +103,13 @@ data(agaricus.train, package = "lightgbm") train <- agaricus.train dtrain <- lgb.Dataset(train$data, label = train$label) params <- list(objective = "regression", metric = "l2") -model <- lgb.cv(params, - dtrain, - 10, - nfold = 3, - min_data = 1, - learning_rate = 1, - early_stopping_rounds = 5) +model <- lgb.cv( + params = params + , data = dtrain + , nrounds = 10 + , nfold = 3 + , min_data = 1 + , learning_rate = 1 + , early_stopping_rounds = 5 +) } diff --git a/R-package/man/lgb.dump.Rd b/R-package/man/lgb.dump.Rd index 73226a36b1b2..c03dcdd7fc6b 100644 --- a/R-package/man/lgb.dump.Rd +++ b/R-package/man/lgb.dump.Rd @@ -27,13 +27,15 @@ test <- agaricus.test dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label) params <- list(objective = "regression", metric = "l2") valids <- list(test = dtest) -model <- lgb.train(params, - dtrain, - 10, - valids, - min_data = 1, - learning_rate = 1, - early_stopping_rounds = 5) +model <- lgb.train( + params = params + , data = dtrain + , nrounds = 10 + , valids = valids + , min_data = 1 + , learning_rate = 1 + , early_stopping_rounds = 5 +) json_model <- lgb.dump(model) } diff --git a/R-package/man/lgb.get.eval.result.Rd b/R-package/man/lgb.get.eval.result.Rd index b13c151c9b57..0b124ffb62cb 100644 --- a/R-package/man/lgb.get.eval.result.Rd +++ b/R-package/man/lgb.get.eval.result.Rd @@ -4,8 +4,13 @@ \alias{lgb.get.eval.result} \title{Get record evaluation result from booster} \usage{ -lgb.get.eval.result(booster, data_name, eval_name, iters = NULL, - is_err = FALSE) +lgb.get.eval.result( + booster, + data_name, + eval_name, + iters = NULL, + is_err = FALSE +) } \arguments{ \item{booster}{Object of class \code{lgb.Booster}} @@ -34,12 +39,14 @@ test <- agaricus.test dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label) params <- list(objective = "regression", metric = "l2") valids <- list(test = dtest) -model <- lgb.train(params, - dtrain, - 10, - valids, - min_data = 1, - learning_rate = 1, - early_stopping_rounds = 5) +model <- lgb.train( + params = params + , data = dtrain + , nrounds = 10 + , valids = valids + , min_data = 1 + , learning_rate = 1 + , early_stopping_rounds = 5 +) lgb.get.eval.result(model, "test", "l2") } diff --git a/R-package/man/lgb.importance.Rd b/R-package/man/lgb.importance.Rd index 6955fad34129..2a75e1c14eb3 100644 --- a/R-package/man/lgb.importance.Rd +++ b/R-package/man/lgb.importance.Rd @@ -29,9 +29,14 @@ data(agaricus.train, package = "lightgbm") train <- agaricus.train dtrain <- lgb.Dataset(train$data, label = train$label) -params <- list(objective = "binary", - learning_rate = 0.01, num_leaves = 63, max_depth = -1, - min_data_in_leaf = 1, min_sum_hessian_in_leaf = 1) +params <- list( + objective = "binary" + , learning_rate = 0.01 + , num_leaves = 63 + , max_depth = -1 + , min_data_in_leaf = 1 + , min_sum_hessian_in_leaf = 1 +) model <- lgb.train(params, dtrain, 10) tree_imp1 <- lgb.importance(model, percentage = TRUE) diff --git a/R-package/man/lgb.load.Rd b/R-package/man/lgb.load.Rd index 943eb81ee2e3..ebe14d0e39bc 100644 --- a/R-package/man/lgb.load.Rd +++ b/R-package/man/lgb.load.Rd @@ -29,13 +29,15 @@ test <- agaricus.test dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label) params <- list(objective = "regression", metric = "l2") valids <- list(test = dtest) -model <- lgb.train(params, - dtrain, - 10, - valids, - min_data = 1, - learning_rate = 1, - early_stopping_rounds = 5) +model <- lgb.train( + params = params + , data = dtrain + , nrounds = 10 + , valids = valids + , min_data = 1 + , learning_rate = 1 + , early_stopping_rounds = 5 +) lgb.save(model, "model.txt") load_booster <- lgb.load(filename = "model.txt") model_string <- model$save_model_to_string(NULL) # saves best iteration diff --git a/R-package/man/lgb.model.dt.tree.Rd b/R-package/man/lgb.model.dt.tree.Rd index c25d139fa12c..fc85105eee50 100644 --- a/R-package/man/lgb.model.dt.tree.Rd +++ b/R-package/man/lgb.model.dt.tree.Rd @@ -44,9 +44,14 @@ data(agaricus.train, package = "lightgbm") train <- agaricus.train dtrain <- lgb.Dataset(train$data, label = train$label) -params <- list(objective = "binary", - learning_rate = 0.01, num_leaves = 63, max_depth = -1, - min_data_in_leaf = 1, min_sum_hessian_in_leaf = 1) +params <- list( + objective = "binary" + , learning_rate = 0.01 + , num_leaves = 63 + , max_depth = -1 + , min_data_in_leaf = 1 + , min_sum_hessian_in_leaf = 1 +) model <- lgb.train(params, dtrain, 10) tree_dt <- lgb.model.dt.tree(model) diff --git a/R-package/man/lgb.plot.importance.Rd b/R-package/man/lgb.plot.importance.Rd index 84493b1e9e6c..cc419ab0ae8d 100644 --- a/R-package/man/lgb.plot.importance.Rd +++ b/R-package/man/lgb.plot.importance.Rd @@ -4,8 +4,13 @@ \alias{lgb.plot.importance} \title{Plot feature importance as a bar graph} \usage{ -lgb.plot.importance(tree_imp, top_n = 10, measure = "Gain", - left_margin = 10, cex = NULL) +lgb.plot.importance( + tree_imp, + top_n = 10, + measure = "Gain", + left_margin = 10, + cex = NULL +) } \arguments{ \item{tree_imp}{a \code{data.table} returned by \code{\link{lgb.importance}}.} diff --git a/R-package/man/lgb.plot.interpretation.Rd b/R-package/man/lgb.plot.interpretation.Rd index 6d3637e9ea41..a026d619c8f9 100644 --- a/R-package/man/lgb.plot.interpretation.Rd +++ b/R-package/man/lgb.plot.interpretation.Rd @@ -4,8 +4,13 @@ \alias{lgb.plot.interpretation} \title{Plot feature contribution as a bar graph} \usage{ -lgb.plot.interpretation(tree_interpretation_dt, top_n = 10, cols = 1, - left_margin = 10, cex = NULL) +lgb.plot.interpretation( + tree_interpretation_dt, + top_n = 10, + cols = 1, + left_margin = 10, + cex = NULL +) } \arguments{ \item{tree_interpretation_dt}{a \code{data.table} returned by \code{\link{lgb.interprete}}.} @@ -25,8 +30,8 @@ The \code{lgb.plot.interpretation} function creates a \code{barplot}. Plot previously calculated feature contribution as a bar graph. } \details{ -The graph represents each feature as a horizontal bar of length proportional to the defined contribution of a feature. -Features are shown ranked in a decreasing contribution order. +The graph represents each feature as a horizontal bar of length proportional to the defined +contribution of a feature. Features are shown ranked in a decreasing contribution order. } \examples{ library(lightgbm) @@ -39,9 +44,14 @@ setinfo(dtrain, "init_score", rep(Logit(mean(train$label)), length(train$label)) data(agaricus.test, package = "lightgbm") test <- agaricus.test -params <- list(objective = "binary", - learning_rate = 0.01, num_leaves = 63, max_depth = -1, - min_data_in_leaf = 1, min_sum_hessian_in_leaf = 1) +params <- list( + objective = "binary" + , learning_rate = 0.01 + , num_leaves = 63 + , max_depth = -1 + , min_data_in_leaf = 1 + , min_sum_hessian_in_leaf = 1 +) model <- lgb.train(params, dtrain, 10) tree_interpretation <- lgb.interprete(model, test$data, 1:5) diff --git a/R-package/man/lgb.prepare.Rd b/R-package/man/lgb.prepare.Rd index fd38d5046eb6..262a52aa1f21 100644 --- a/R-package/man/lgb.prepare.Rd +++ b/R-package/man/lgb.prepare.Rd @@ -10,10 +10,13 @@ lgb.prepare(data) \item{data}{A data.frame or data.table to prepare.} } \value{ -The cleaned dataset. It must be converted to a matrix format (\code{as.matrix}) for input in \code{lgb.Dataset}. +The cleaned dataset. It must be converted to a matrix format (\code{as.matrix}) + for input in \code{lgb.Dataset}. } \description{ -Attempts to prepare a clean dataset to prepare to put in a \code{lgb.Dataset}. Factors and characters are converted to numeric without integers. Please use \code{lgb.prepare_rules} if you want to apply this transformation to other datasets. +Attempts to prepare a clean dataset to prepare to put in a \code{lgb.Dataset}. +Factors and characters are converted to numeric without integers. Please use +\code{lgb.prepare_rules} if you want to apply this transformation to other datasets. } \examples{ library(lightgbm) diff --git a/R-package/man/lgb.prepare2.Rd b/R-package/man/lgb.prepare2.Rd index 106ab21d587f..1d8400220b64 100644 --- a/R-package/man/lgb.prepare2.Rd +++ b/R-package/man/lgb.prepare2.Rd @@ -10,10 +10,16 @@ lgb.prepare2(data) \item{data}{A data.frame or data.table to prepare.} } \value{ -The cleaned dataset. It must be converted to a matrix format (\code{as.matrix}) for input in \code{lgb.Dataset}. +The cleaned dataset. It must be converted to a matrix format (\code{as.matrix}) + for input in \code{lgb.Dataset}. } \description{ -Attempts to prepare a clean dataset to prepare to put in a \code{lgb.Dataset}. Factors and characters are converted to numeric (specifically: integer). Please use \code{lgb.prepare_rules2} if you want to apply this transformation to other datasets. This is useful if you have a specific need for integer dataset instead of numeric dataset. Note that there are programs which do not support integer-only input. Consider this as a half memory technique which is dangerous, especially for LightGBM. +Attempts to prepare a clean dataset to prepare to put in a \code{lgb.Dataset}. +Factors and characters are converted to numeric (specifically: integer). +Please use \code{lgb.prepare_rules2} if you want to apply this transformation to other datasets. +This is useful if you have a specific need for integer dataset instead of numeric dataset. +Note that there are programs which do not support integer-only input. Consider this as a half +memory technique which is dangerous, especially for LightGBM. } \examples{ library(lightgbm) diff --git a/R-package/man/lgb.prepare_rules.Rd b/R-package/man/lgb.prepare_rules.Rd index 939cb1fd58ef..d8caa74475aa 100644 --- a/R-package/man/lgb.prepare_rules.Rd +++ b/R-package/man/lgb.prepare_rules.Rd @@ -12,10 +12,14 @@ lgb.prepare_rules(data, rules = NULL) \item{rules}{A set of rules from the data preparator, if already used.} } \value{ -A list with the cleaned dataset (\code{data}) and the rules (\code{rules}). The data must be converted to a matrix format (\code{as.matrix}) for input in \code{lgb.Dataset}. +A list with the cleaned dataset (\code{data}) and the rules (\code{rules}). + The data must be converted to a matrix format (\code{as.matrix}) for input + in \code{lgb.Dataset}. } \description{ -Attempts to prepare a clean dataset to prepare to put in a \code{lgb.Dataset}. Factors and characters are converted to numeric. In addition, keeps rules created so you can convert other datasets using this converter. +Attempts to prepare a clean dataset to prepare to put in a \code{lgb.Dataset}. +Factors and characters are converted to numeric. In addition, keeps rules created +so you can convert other datasets using this converter. } \examples{ library(lightgbm) diff --git a/R-package/man/lgb.prepare_rules2.Rd b/R-package/man/lgb.prepare_rules2.Rd index 5e2d909e9d6f..64ccd2041e1e 100644 --- a/R-package/man/lgb.prepare_rules2.Rd +++ b/R-package/man/lgb.prepare_rules2.Rd @@ -12,43 +12,15 @@ lgb.prepare_rules2(data, rules = NULL) \item{rules}{A set of rules from the data preparator, if already used.} } \value{ -A list with the cleaned dataset (\code{data}) and the rules (\code{rules}). The data must be converted to a matrix format (\code{as.matrix}) for input in \code{lgb.Dataset}. +A list with the cleaned dataset (\code{data}) and the rules (\code{rules}). + The data must be converted to a matrix format (\code{as.matrix}) for input in + \code{lgb.Dataset}. } \description{ -Attempts to prepare a clean dataset to prepare to put in a \code{lgb.Dataset}. Factors and characters are converted to numeric (specifically: integer). In addition, keeps rules created so you can convert other datasets using this converter. This is useful if you have a specific need for integer dataset instead of numeric dataset. Note that there are programs which do not support integer-only input. Consider this as a half memory technique which is dangerous, especially for LightGBM. -} -\examples{ -library(lightgbm) -data(iris) - -str(iris) - -new_iris <- lgb.prepare_rules2(data = iris) # Autoconverter -str(new_iris$data) - -data(iris) # Erase iris dataset -iris$Species[1] <- "NEW FACTOR" # Introduce junk factor (NA) - -# Use conversion using known rules -# Unknown factors become 0, excellent for sparse datasets -newer_iris <- lgb.prepare_rules2(data = iris, rules = new_iris$rules) - -# Unknown factor is now zero, perfect for sparse datasets -newer_iris$data[1, ] # Species became 0 as it is an unknown factor - -newer_iris$data[1, 5] <- 1 # Put back real initial value - -# Is the newly created dataset equal? YES! -all.equal(new_iris$data, newer_iris$data) - -# Can we test our own rules? -data(iris) # Erase iris dataset - -# We remapped values differently -personal_rules <- list(Species = c("setosa" = 3L, - "versicolor" = 2L, - "virginica" = 1L)) -newest_iris <- lgb.prepare_rules2(data = iris, rules = personal_rules) -str(newest_iris$data) # SUCCESS! - +Attempts to prepare a clean dataset to prepare to put in a \code{lgb.Dataset}. +Factors and characters are converted to numeric (specifically: integer). +In addition, keeps rules created so you can convert other datasets using this converter. +This is useful if you have a specific need for integer dataset instead of numeric dataset. +Note that there are programs which do not support integer-only input. +Consider this as a half memory technique which is dangerous, especially for LightGBM. } diff --git a/R-package/man/lgb.save.Rd b/R-package/man/lgb.save.Rd index f0ab4c593b42..df483f6513bb 100644 --- a/R-package/man/lgb.save.Rd +++ b/R-package/man/lgb.save.Rd @@ -29,13 +29,15 @@ test <- agaricus.test dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label) params <- list(objective = "regression", metric = "l2") valids <- list(test = dtest) -model <- lgb.train(params, - dtrain, - 10, - valids, - min_data = 1, - learning_rate = 1, - early_stopping_rounds = 5) +model <- lgb.train( + params = params + , data = dtrain + , nrounds = 10 + , valids = valids + , min_data = 1 + , learning_rate = 1 + , early_stopping_rounds = 5 +) lgb.save(model, "model.txt") } diff --git a/R-package/man/lgb.train.Rd b/R-package/man/lgb.train.Rd index 79b16ef88910..f7096389d428 100644 --- a/R-package/man/lgb.train.Rd +++ b/R-package/man/lgb.train.Rd @@ -4,11 +4,24 @@ \alias{lgb.train} \title{Main training logic for LightGBM} \usage{ -lgb.train(params = list(), data, nrounds = 10, valids = list(), - obj = NULL, eval = NULL, verbose = 1, record = TRUE, - eval_freq = 1L, init_model = NULL, colnames = NULL, - categorical_feature = NULL, early_stopping_rounds = NULL, - callbacks = list(), reset_data = FALSE, ...) +lgb.train( + params = list(), + data, + nrounds = 10, + valids = list(), + obj = NULL, + eval = NULL, + verbose = 1, + record = TRUE, + eval_freq = 1L, + init_model = NULL, + colnames = NULL, + categorical_feature = NULL, + early_stopping_rounds = NULL, + callbacks = list(), + reset_data = FALSE, + ... +) } \arguments{ \item{params}{List of parameters} @@ -39,17 +52,16 @@ lgb.train(params = list(), data, nrounds = 10, valids = list(), type int represents index, type str represents feature names} -\item{early_stopping_rounds}{int -Activates early stopping. -Requires at least one validation data and one metric -If there's more than one, will check all of them except the training data -Returns the model with (best_iter + early_stopping_rounds) -If early stopping occurs, the model will have 'best_iter' field} +\item{early_stopping_rounds}{int. Activates early stopping. Requires at least one validation data +and one metric. If there's more than one, will check all of them +except the training data. Returns the model with (best_iter + early_stopping_rounds). +If early stopping occurs, the model will have 'best_iter' field.} -\item{callbacks}{list of callback functions -List of callback functions that are applied at each iteration.} +\item{callbacks}{List of callback functions that are applied at each iteration.} -\item{reset_data}{Boolean, setting it to TRUE (not the default value) will transform the booster model into a predictor model which frees up memory and the original datasets} +\item{reset_data}{Boolean, setting it to TRUE (not the default value) will transform the +booster model into a predictor model which frees up memory and the +original datasets} \item{...}{other parameters, see Parameters.rst for more information. A few key parameters: \itemize{ @@ -78,11 +90,13 @@ test <- agaricus.test dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label) params <- list(objective = "regression", metric = "l2") valids <- list(test = dtest) -model <- lgb.train(params, - dtrain, - 10, - valids, - min_data = 1, - learning_rate = 1, - early_stopping_rounds = 5) +model <- lgb.train( + params = params + , data = dtrain + , nrounds = 10 + , valids = valids + , min_data = 1 + , learning_rate = 1 + , early_stopping_rounds = 5 +) } diff --git a/R-package/man/lgb.unloader.Rd b/R-package/man/lgb.unloader.Rd index ab12dfe2cf23..b77046cf5644 100644 --- a/R-package/man/lgb.unloader.Rd +++ b/R-package/man/lgb.unloader.Rd @@ -7,11 +7,15 @@ lgb.unloader(restore = TRUE, wipe = FALSE, envir = .GlobalEnv) } \arguments{ -\item{restore}{Whether to reload \code{LightGBM} immediately after detaching from R. Defaults to \code{TRUE} which means automatically reload \code{LightGBM} once unloading is performed.} +\item{restore}{Whether to reload \code{LightGBM} immediately after detaching from R. +Defaults to \code{TRUE} which means automatically reload \code{LightGBM} once +unloading is performed.} -\item{wipe}{Whether to wipe all \code{lgb.Dataset} and \code{lgb.Booster} from the global environment. Defaults to \code{FALSE} which means to not remove them.} +\item{wipe}{Whether to wipe all \code{lgb.Dataset} and \code{lgb.Booster} from the global +environment. Defaults to \code{FALSE} which means to not remove them.} -\item{envir}{The environment to perform wiping on if \code{wipe == TRUE}. Defaults to \code{.GlobalEnv} which is the global environment.} +\item{envir}{The environment to perform wiping on if \code{wipe == TRUE}. Defaults to +\code{.GlobalEnv} which is the global environment.} } \value{ NULL invisibly. @@ -29,13 +33,15 @@ test <- agaricus.test dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label) params <- list(objective = "regression", metric = "l2") valids <- list(test = dtest) -model <- lgb.train(params, - dtrain, - 10, - valids, - min_data = 1, - learning_rate = 1, - early_stopping_rounds = 5) +model <- lgb.train( + params = params + , data = dtrain + , nrounds = 10 + , valids = valids + , min_data = 1 + , learning_rate = 1 + , early_stopping_rounds = 5 +) \dontrun{ lgb.unloader(restore = FALSE, wipe = FALSE, envir = .GlobalEnv) diff --git a/R-package/man/lgb_shared_params.Rd b/R-package/man/lgb_shared_params.Rd index 0a40c2ae31ed..3dcad6f47544 100644 --- a/R-package/man/lgb_shared_params.Rd +++ b/R-package/man/lgb_shared_params.Rd @@ -9,12 +9,10 @@ List of callback functions that are applied at each iteration.} \item{data}{a \code{lgb.Dataset} object, used for training} -\item{early_stopping_rounds}{int -Activates early stopping. -Requires at least one validation data and one metric -If there's more than one, will check all of them except the training data -Returns the model with (best_iter + early_stopping_rounds) -If early stopping occurs, the model will have 'best_iter' field} +\item{early_stopping_rounds}{int. Activates early stopping. Requires at least one validation data +and one metric. If there's more than one, will check all of them +except the training data. Returns the model with (best_iter + early_stopping_rounds). +If early stopping occurs, the model will have 'best_iter' field.} \item{eval_freq}{evaluation output frequency, only effect when verbose > 0} diff --git a/R-package/man/lightgbm.Rd b/R-package/man/lightgbm.Rd index d0582110a156..3f424dfc7542 100644 --- a/R-package/man/lightgbm.Rd +++ b/R-package/man/lightgbm.Rd @@ -4,10 +4,20 @@ \alias{lightgbm} \title{Train a LightGBM model} \usage{ -lightgbm(data, label = NULL, weight = NULL, params = list(), - nrounds = 10, verbose = 1, eval_freq = 1L, - early_stopping_rounds = NULL, save_name = "lightgbm.model", - init_model = NULL, callbacks = list(), ...) +lightgbm( + data, + label = NULL, + weight = NULL, + params = list(), + nrounds = 10, + verbose = 1, + eval_freq = 1L, + early_stopping_rounds = NULL, + save_name = "lightgbm.model", + init_model = NULL, + callbacks = list(), + ... +) } \arguments{ \item{data}{a \code{lgb.Dataset} object, used for training} @@ -24,12 +34,10 @@ lightgbm(data, label = NULL, weight = NULL, params = list(), \item{eval_freq}{evaluation output frequency, only effect when verbose > 0} -\item{early_stopping_rounds}{int -Activates early stopping. -Requires at least one validation data and one metric -If there's more than one, will check all of them except the training data -Returns the model with (best_iter + early_stopping_rounds) -If early stopping occurs, the model will have 'best_iter' field} +\item{early_stopping_rounds}{int. Activates early stopping. Requires at least one validation data +and one metric. If there's more than one, will check all of them +except the training data. Returns the model with (best_iter + early_stopping_rounds). +If early stopping occurs, the model will have 'best_iter' field.} \item{save_name}{File name to use when writing the trained model to disk. Should end in ".model".} diff --git a/R-package/man/predict.lgb.Booster.Rd b/R-package/man/predict.lgb.Booster.Rd index 480311889c76..e2359c225390 100644 --- a/R-package/man/predict.lgb.Booster.Rd +++ b/R-package/man/predict.lgb.Booster.Rd @@ -4,9 +4,17 @@ \alias{predict.lgb.Booster} \title{Predict method for LightGBM model} \usage{ -\method{predict}{lgb.Booster}(object, data, num_iteration = NULL, - rawscore = FALSE, predleaf = FALSE, predcontrib = FALSE, - header = FALSE, reshape = FALSE, ...) +\method{predict}{lgb.Booster}( + object, + data, + num_iteration = NULL, + rawscore = FALSE, + predleaf = FALSE, + predcontrib = FALSE, + header = FALSE, + reshape = FALSE, + ... +) } \arguments{ \item{object}{Object of class \code{lgb.Booster}} @@ -16,8 +24,8 @@ \item{num_iteration}{number of iteration want to predict with, NULL or <= 0 means use best iteration} \item{rawscore}{whether the prediction should be returned in the for of original untransformed -sum of predictions from boosting iterations' results. E.g., setting \code{rawscore=TRUE} for -logistic regression would result in predictions for log-odds instead of probabilities.} +sum of predictions from boosting iterations' results. E.g., setting \code{rawscore=TRUE} +for logistic regression would result in predictions for log-odds instead of probabilities.} \item{predleaf}{whether predict leaf index instead.} @@ -53,13 +61,15 @@ test <- agaricus.test dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label) params <- list(objective = "regression", metric = "l2") valids <- list(test = dtest) -model <- lgb.train(params, - dtrain, - 10, - valids, - min_data = 1, - learning_rate = 1, - early_stopping_rounds = 5) +model <- lgb.train( + params = params + , data = dtrain + , nrounds = 10 + , valids = valids + , min_data = 1 + , learning_rate = 1 + , early_stopping_rounds = 5 +) preds <- predict(model, test$data) } diff --git a/R-package/man/readRDS.lgb.Booster.Rd b/R-package/man/readRDS.lgb.Booster.Rd index cef1679ce53d..a8fc219ffd95 100644 --- a/R-package/man/readRDS.lgb.Booster.Rd +++ b/R-package/man/readRDS.lgb.Booster.Rd @@ -27,13 +27,15 @@ test <- agaricus.test dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label) params <- list(objective = "regression", metric = "l2") valids <- list(test = dtest) -model <- lgb.train(params, - dtrain, - 10, - valids, - min_data = 1, - learning_rate = 1, - early_stopping_rounds = 5) +model <- lgb.train( + params = params + , data = dtrain + , nrounds = 10 + , valids = valids + , min_data = 1 + , learning_rate = 1 + , early_stopping_rounds = 5 +) saveRDS.lgb.Booster(model, "model.rds") new_model <- readRDS.lgb.Booster("model.rds") diff --git a/R-package/man/saveRDS.lgb.Booster.Rd b/R-package/man/saveRDS.lgb.Booster.Rd index 54fd1d07867c..afa639d4245c 100644 --- a/R-package/man/saveRDS.lgb.Booster.Rd +++ b/R-package/man/saveRDS.lgb.Booster.Rd @@ -4,19 +4,31 @@ \alias{saveRDS.lgb.Booster} \title{saveRDS for \code{lgb.Booster} models} \usage{ -saveRDS.lgb.Booster(object, file = "", ascii = FALSE, version = NULL, - compress = TRUE, refhook = NULL, raw = TRUE) +saveRDS.lgb.Booster( + object, + file = "", + ascii = FALSE, + version = NULL, + compress = TRUE, + refhook = NULL, + raw = TRUE +) } \arguments{ \item{object}{R object to serialize.} \item{file}{a connection or the name of the file where the R object is saved to or read from.} -\item{ascii}{a logical. If TRUE or NA, an ASCII representation is written; otherwise (default), a binary one is used. See the comments in the help for save.} +\item{ascii}{a logical. If TRUE or NA, an ASCII representation is written; otherwise (default), +a binary one is used. See the comments in the help for save.} -\item{version}{the workspace format version to use. \code{NULL} specifies the current default version (2). Versions prior to 2 are not supported, so this will only be relevant when there are later versions.} +\item{version}{the workspace format version to use. \code{NULL} specifies the current default +version (2). Versions prior to 2 are not supported, so this will only be relevant +when there are later versions.} -\item{compress}{a logical specifying whether saving to a named file is to use "gzip" compression, or one of \code{"gzip"}, \code{"bzip2"} or \code{"xz"} to indicate the type of compression to be used. Ignored if file is a connection.} +\item{compress}{a logical specifying whether saving to a named file is to use "gzip" compression, +or one of \code{"gzip"}, \code{"bzip2"} or \code{"xz"} to indicate the type of +compression to be used. Ignored if file is a connection.} \item{refhook}{a hook function for handling reference objects.} @@ -26,7 +38,8 @@ saveRDS.lgb.Booster(object, file = "", ascii = FALSE, version = NULL, NULL invisibly. } \description{ -Attempts to save a model using RDS. Has an additional parameter (\code{raw}) which decides whether to save the raw model or not. +Attempts to save a model using RDS. Has an additional parameter (\code{raw}) which decides +whether to save the raw model or not. } \examples{ library(lightgbm) @@ -39,10 +52,10 @@ dtest <- lgb.Dataset.create.valid(dtrain, test$data, label = test$label) params <- list(objective = "regression", metric = "l2") valids <- list(test = dtest) model <- lgb.train( - params - , dtrain - , 10 - , valids + params = params + , data = dtrain + , nrounds = 10 + , valids = valids , min_data = 1 , learning_rate = 1 , early_stopping_rounds = 5 diff --git a/R-package/man/setinfo.Rd b/R-package/man/setinfo.Rd index 861e5f0219ef..5b0a23f6992b 100644 --- a/R-package/man/setinfo.Rd +++ b/R-package/man/setinfo.Rd @@ -12,11 +12,11 @@ setinfo(dataset, ...) \arguments{ \item{dataset}{Object of class \code{lgb.Dataset}} -\item{...}{other parameters} - \item{name}{the name of the field to get} \item{info}{the specific field of information to set} + +\item{...}{other parameters} } \value{ passed object diff --git a/R-package/man/slice.Rd b/R-package/man/slice.Rd index 10040d11a2bc..c759ee06c989 100644 --- a/R-package/man/slice.Rd +++ b/R-package/man/slice.Rd @@ -12,9 +12,9 @@ slice(dataset, ...) \arguments{ \item{dataset}{Object of class \code{lgb.Dataset}} -\item{...}{other parameters (currently not used)} - \item{idxset}{a integer vector of indices of rows needed} + +\item{...}{other parameters (currently not used)} } \value{ constructed sub dataset