From da31733a54f314ebc2d788c7d4261651fd4e6b73 Mon Sep 17 00:00:00 2001 From: Xiangrui Meng Date: Wed, 9 Apr 2014 01:03:12 -0700 Subject: [PATCH] update developer and experimental tags --- .../apache/spark/mllib/api/python/PythonMLLibAPI.scala | 2 +- .../apache/spark/mllib/classification/NaiveBayes.scala | 2 +- .../scala/org/apache/spark/mllib/clustering/KMeans.scala | 4 ++-- .../org/apache/spark/mllib/optimization/Gradient.scala | 8 ++++---- .../apache/spark/mllib/optimization/GradientDescent.scala | 4 ++-- .../org/apache/spark/mllib/optimization/Optimizer.scala | 2 +- .../org/apache/spark/mllib/optimization/Updater.scala | 8 ++++---- .../scala/org/apache/spark/mllib/recommendation/ALS.scala | 2 +- .../mllib/recommendation/MatrixFactorizationModel.scala | 2 +- .../mllib/regression/GeneralizedLinearAlgorithm.scala | 2 +- .../scala/org/apache/spark/mllib/tree/DecisionTree.scala | 2 +- .../org/apache/spark/mllib/tree/configuration/Algo.scala | 2 +- .../spark/mllib/tree/configuration/FeatureType.scala | 2 +- .../spark/mllib/tree/configuration/QuantileStrategy.scala | 2 +- .../apache/spark/mllib/tree/configuration/Strategy.scala | 2 +- .../org/apache/spark/mllib/tree/impurity/Entropy.scala | 4 ++-- .../scala/org/apache/spark/mllib/tree/impurity/Gini.scala | 4 ++-- .../org/apache/spark/mllib/tree/impurity/Impurity.scala | 6 +++--- .../org/apache/spark/mllib/tree/impurity/Variance.scala | 4 ++-- .../apache/spark/mllib/tree/model/DecisionTreeModel.scala | 2 +- .../spark/mllib/tree/model/InformationGainStats.scala | 2 +- .../scala/org/apache/spark/mllib/tree/model/Node.scala | 2 +- .../scala/org/apache/spark/mllib/tree/model/Split.scala | 2 +- .../org/apache/spark/mllib/util/DataValidators.scala | 2 +- .../org/apache/spark/mllib/util/KMeansDataGenerator.scala | 2 +- .../org/apache/spark/mllib/util/LinearDataGenerator.scala | 2 +- .../mllib/util/LogisticRegressionDataGenerator.scala | 2 +- .../org/apache/spark/mllib/util/MFDataGenerator.scala | 2 +- .../main/scala/org/apache/spark/mllib/util/MLUtils.scala | 4 ++-- .../org/apache/spark/mllib/util/SVMDataGenerator.scala | 2 +- 30 files changed, 44 insertions(+), 44 deletions(-) diff --git a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala index ac0631abe5f8f..3e3732db0be6f 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/api/python/PythonMLLibAPI.scala @@ -28,7 +28,7 @@ import org.apache.spark.mllib.regression._ import org.apache.spark.rdd.RDD /** - * DEVELOPER API + * :: DeveloperApi :: * * The Java stubs necessary for the Python mllib bindings. */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/classification/NaiveBayes.scala b/mllib/src/main/scala/org/apache/spark/mllib/classification/NaiveBayes.scala index 14c6517f96936..eaf2cfb8b7c12 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/classification/NaiveBayes.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/classification/NaiveBayes.scala @@ -27,7 +27,7 @@ import org.apache.spark.mllib.util.MLUtils import org.apache.spark.rdd.RDD /** - * EXPERIMENTAL + * :: Experimental :: * * Model for Naive Bayes Classifiers. * diff --git a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala index c4d346f614dd8..b0820819a945f 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala @@ -76,7 +76,7 @@ class KMeans private ( } /** - * EXPERIMENTAL + * :: Experimental :: * * Set the number of runs of the algorithm to execute in parallel. We initialize the algorithm * this many times with random starting conditions (configured by the initialization mode), then @@ -398,7 +398,7 @@ object KMeans { } /** - * EXPERIMENTAL + * :: Experimental :: */ def main(args: Array[String]) { if (args.length < 4) { diff --git a/mllib/src/main/scala/org/apache/spark/mllib/optimization/Gradient.scala b/mllib/src/main/scala/org/apache/spark/mllib/optimization/Gradient.scala index 8a6d20f6f6ae8..e2e7190fb3b0a 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/optimization/Gradient.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/optimization/Gradient.scala @@ -22,7 +22,7 @@ import breeze.linalg.{axpy => brzAxpy} import org.apache.spark.mllib.linalg.{Vectors, Vector} /** - * DEVELOPER API + * :: DeveloperApi :: * * Class used to compute the gradient for a loss function, given a single data point. */ @@ -53,7 +53,7 @@ abstract class Gradient extends Serializable { } /** - * DEVELOPER API + * :: DeveloperApi :: * * Compute gradient and loss for a logistic loss function, as used in binary classification. * See also the documentation for the precise formulation. @@ -96,7 +96,7 @@ class LogisticGradient extends Gradient { } /** - * DEVELOPER API + * :: DeveloperApi :: * * Compute gradient and loss for a Least-squared loss function, as used in linear regression. * This is correct for the averaged least squares loss function (mean squared error) @@ -130,7 +130,7 @@ class LeastSquaresGradient extends Gradient { } /** - * DEVELOPER API + * :: DeveloperApi :: * * Compute gradient and loss for a Hinge loss function, as used in SVM binary classification. * See also the documentation for the precise formulation. diff --git a/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala b/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala index 60cad435be5c9..16e3ebc0df491 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/optimization/GradientDescent.scala @@ -26,7 +26,7 @@ import org.apache.spark.rdd.RDD import org.apache.spark.mllib.linalg.{Vectors, Vector} /** - * DEVELOPER API + * :: DeveloperApi :: * * Class used to solve an optimization problem using Gradient Descent. * @param gradient Gradient function to be used. @@ -110,7 +110,7 @@ class GradientDescent(private var gradient: Gradient, private var updater: Updat } /** - * DEVELOPER API + * :: DeveloperApi :: * * Top-level method to run gradient descent. */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/optimization/Optimizer.scala b/mllib/src/main/scala/org/apache/spark/mllib/optimization/Optimizer.scala index a655a8bb7a4ed..57eb2afe8c6d6 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/optimization/Optimizer.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/optimization/Optimizer.scala @@ -22,7 +22,7 @@ import org.apache.spark.rdd.RDD import org.apache.spark.mllib.linalg.Vector /** - * DEVELOPER API + * :: DeveloperApi :: * * Trait for optimization problem solvers. */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/optimization/Updater.scala b/mllib/src/main/scala/org/apache/spark/mllib/optimization/Updater.scala index a241bfd4e5858..3963088553536 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/optimization/Updater.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/optimization/Updater.scala @@ -24,7 +24,7 @@ import breeze.linalg.{norm => brzNorm, axpy => brzAxpy, Vector => BV} import org.apache.spark.mllib.linalg.{Vectors, Vector} /** - * DEVELOPER API + * :: DeveloperApi :: * * Class used to perform steps (weight update) using Gradient Descent methods. * @@ -61,7 +61,7 @@ abstract class Updater extends Serializable { } /** - * DEVELOPER API + * :: DeveloperApi :: * * A simple updater for gradient descent *without* any regularization. * Uses a step-size decreasing with the square root of the number of iterations. @@ -82,7 +82,7 @@ class SimpleUpdater extends Updater { } /** - * DEVELOPER API + * :: DeveloperApi :: * * Updater for L1 regularized problems. * R(w) = ||w||_1 @@ -126,7 +126,7 @@ class L1Updater extends Updater { } /** - * DEVELOPER API + * :: DeveloperApi :: * * Updater for L2 regularized problems. * R(w) = 1/2 ||w||^2 diff --git a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/ALS.scala b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/ALS.scala index 9046960110af2..39d87e2061d79 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/ALS.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/ALS.scala @@ -138,7 +138,7 @@ class ALS private ( } /** - * EXPERIMENTAL + * :: Experimental :: * * Sets the constant used in computing confidence in implicit ALS. Default: 1.0. */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala index 80590c7974949..da2e70d2182d5 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/recommendation/MatrixFactorizationModel.scala @@ -68,7 +68,7 @@ class MatrixFactorizationModel( } /** - * DEVELOPER API + * :: DeveloperApi :: * * Predict the rating of many users for many products. * This is a Java stub for python predictAll() diff --git a/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala b/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala index 1d5b2f036155d..83c4b08006cfe 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/regression/GeneralizedLinearAlgorithm.scala @@ -101,7 +101,7 @@ abstract class GeneralizedLinearAlgorithm[M <: GeneralizedLinearModel] } /** - * EXPERIMENTAL + * :: Experimental :: * * Set if the algorithm should validate data before training. Default true. */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala index 4fc50dfa2fd69..118f93b08cf07 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/DecisionTree.scala @@ -33,7 +33,7 @@ import org.apache.spark.util.random.XORShiftRandom import org.apache.spark.mllib.linalg.{Vector, Vectors} /** - * EXPERIMENTAL + * :: Experimental :: * * A class that implements a decision tree algorithm for classification and regression. It * supports both continuous and categorical features. diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/Algo.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/Algo.scala index 332062de7463d..8b907f3a1e97e 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/Algo.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/Algo.scala @@ -18,7 +18,7 @@ package org.apache.spark.mllib.tree.configuration /** - * EXPERIMENTAL + * :: Experimental :: * * Enum to select the algorithm for the decision tree */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/FeatureType.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/FeatureType.scala index e2a57837d5cef..db1d4fbbf4c34 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/FeatureType.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/FeatureType.scala @@ -18,7 +18,7 @@ package org.apache.spark.mllib.tree.configuration /** - * EXPERIMENTAL + * :: Experimental :: * * Enum to describe whether a feature is "continuous" or "categorical" */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/QuantileStrategy.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/QuantileStrategy.scala index 95319b739ab32..7c777207a747d 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/QuantileStrategy.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/QuantileStrategy.scala @@ -18,7 +18,7 @@ package org.apache.spark.mllib.tree.configuration /** - * EXPERIMENTAL + * :: Experimental :: * * Enum for selecting the quantile calculation strategy */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/Strategy.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/Strategy.scala index 13cd656128768..6a4c54f0d6d12 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/Strategy.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/configuration/Strategy.scala @@ -22,7 +22,7 @@ import org.apache.spark.mllib.tree.configuration.Algo._ import org.apache.spark.mllib.tree.configuration.QuantileStrategy._ /** - * EXPERIMENTAL + * :: Experimental :: * * Stores all the configuration options for tree construction * @param algo classification or regression diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Entropy.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Entropy.scala index beec48bb3a108..c77939809ca60 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Entropy.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Entropy.scala @@ -18,7 +18,7 @@ package org.apache.spark.mllib.tree.impurity /** - * EXPERIMENTAL + * :: Experimental :: * * Class for calculating [[http://en.wikipedia.org/wiki/Binary_entropy_function entropy]] during * binary classification. @@ -28,7 +28,7 @@ object Entropy extends Impurity { private[tree] def log2(x: Double) = scala.math.log(x) / scala.math.log(2) /** - * DEVELOPER API + * :: DeveloperApi :: * * entropy calculation * @param c0 count of instances with label 0 diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala index 5babe7d10d111..c2422f9d4f82c 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Gini.scala @@ -18,7 +18,7 @@ package org.apache.spark.mllib.tree.impurity /** - * EXPERIMENTAL + * :: Experimental :: * * Class for calculating the * [[http://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity Gini impurity]] @@ -27,7 +27,7 @@ package org.apache.spark.mllib.tree.impurity object Gini extends Impurity { /** - * DEVELOPER API + * :: DeveloperApi :: * * Gini coefficient calculation * @param c0 count of instances with label 0 diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala index e6fa115030e7a..496c8b1e910a8 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala @@ -18,14 +18,14 @@ package org.apache.spark.mllib.tree.impurity /** - * EXPERIMENTAL + * :: Experimental :: * * Trait for calculating information gain. */ trait Impurity extends Serializable { /** - * DEVELOPER API + * :: DeveloperApi :: * * information calculation for binary classification * @param c0 count of instances with label 0 @@ -35,7 +35,7 @@ trait Impurity extends Serializable { def calculate(c0 : Double, c1 : Double): Double /** - * DEVELOPER API + * :: DeveloperApi :: * * information calculation for regression * @param count number of instances diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Variance.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Variance.scala index 7be3b9236ecd9..9a0a943a10e33 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Variance.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Variance.scala @@ -18,7 +18,7 @@ package org.apache.spark.mllib.tree.impurity /** - * EXPERIMENTAL + * :: Experimental :: * * Class for calculating variance during regression */ @@ -27,7 +27,7 @@ object Variance extends Impurity { throw new UnsupportedOperationException("Variance.calculate") /** - * DEVELOPER API + * :: DeveloperApi :: * * variance calculation * @param count number of instances diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala index e336ea74e3b76..ace83dd7fdc33 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/DecisionTreeModel.scala @@ -22,7 +22,7 @@ import org.apache.spark.rdd.RDD import org.apache.spark.mllib.linalg.Vector /** - * EXPERIMENTAL + * :: Experimental :: * * Model to store the decision tree parameters * @param topNode root node diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/InformationGainStats.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/InformationGainStats.scala index aa1a478ea41b5..13d2ab6203ba7 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/InformationGainStats.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/InformationGainStats.scala @@ -18,7 +18,7 @@ package org.apache.spark.mllib.tree.model /** - * DEVELOPER API + * :: DeveloperApi :: * * Information gain statistics for each split * @param gain information gain value diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/Node.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/Node.scala index 6b644e7657f40..0e645fef1a321 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/Node.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/Node.scala @@ -22,7 +22,7 @@ import org.apache.spark.mllib.tree.configuration.FeatureType._ import org.apache.spark.mllib.linalg.Vector /** - * DEVELOPER API + * :: DeveloperApi :: * * Node in a decision tree * @param id integer node id diff --git a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/Split.scala b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/Split.scala index f8f4e5abfa6a1..5cedbda86a4b5 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/tree/model/Split.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/tree/model/Split.scala @@ -20,7 +20,7 @@ package org.apache.spark.mllib.tree.model import org.apache.spark.mllib.tree.configuration.FeatureType.FeatureType /** - * DEVELOPER API + * :: DeveloperApi :: * * Split applied to a feature * @param feature feature index diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/DataValidators.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/DataValidators.scala index 75909884aed98..3b4652290bc9c 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/util/DataValidators.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/util/DataValidators.scala @@ -22,7 +22,7 @@ import org.apache.spark.rdd.RDD import org.apache.spark.mllib.regression.LabeledPoint /** - * DEVELOPER API + * :: DeveloperApi :: * * A collection of methods used to validate data before applying ML algorithms. */ diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/KMeansDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/KMeansDataGenerator.scala index f5db8a2a493c4..267fb529ba47f 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/util/KMeansDataGenerator.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/util/KMeansDataGenerator.scala @@ -23,7 +23,7 @@ import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD /** - * DEVELOPER API + * :: DeveloperApi :: * * Generate test data for KMeans. This class first chooses k cluster centers * from a d-dimensional Gaussian distribution scaled by factor r and then creates a Gaussian diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala index c6561f3bdc13d..1e237d02fa985 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/util/LinearDataGenerator.scala @@ -28,7 +28,7 @@ import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.regression.LabeledPoint /** - * DEVELOPER API + * :: DeveloperApi :: * * Generate sample data used for Linear Data. This class generates * uniformly random values for every feature and adds Gaussian noise with mean `eps` to the diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/LogisticRegressionDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/LogisticRegressionDataGenerator.scala index 41fe234491e89..11bcea3565acd 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/util/LogisticRegressionDataGenerator.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/util/LogisticRegressionDataGenerator.scala @@ -25,7 +25,7 @@ import org.apache.spark.mllib.regression.LabeledPoint import org.apache.spark.mllib.linalg.Vectors /** - * DEVELOPER API + * :: DeveloperApi :: * * Generate test data for LogisticRegression. This class chooses positive labels * with probability `probOne` and scales features for positive examples by `eps`. diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala index e2430f8052640..67dc1c6a33e28 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala @@ -25,7 +25,7 @@ import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD /** - * DEVELOPER API + * :: DeveloperApi :: * * Generate RDD(s) containing data for Matrix Factorization. * diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala index c77dd5e6dc71b..65042b6feee56 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala @@ -122,7 +122,7 @@ object MLUtils { loadLibSVMData(sc, path, labelParser, numFeatures, sc.defaultMinSplits) /** - * EXPERIMENTAL + * :: Experimental :: * * Load labeled data from a file. The data format used here is * , ... @@ -143,7 +143,7 @@ object MLUtils { } /** - * EXPERIMENTAL + * :: Experimental :: * * Save labeled data to a file. The data format used here is * , ... diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/SVMDataGenerator.scala b/mllib/src/main/scala/org/apache/spark/mllib/util/SVMDataGenerator.scala index 5e591fc4199fc..1f748031fdbcb 100644 --- a/mllib/src/main/scala/org/apache/spark/mllib/util/SVMDataGenerator.scala +++ b/mllib/src/main/scala/org/apache/spark/mllib/util/SVMDataGenerator.scala @@ -27,7 +27,7 @@ import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.regression.LabeledPoint /** - * DEVELOPER API + * :: DeveloperApi :: * * Generate sample data used for SVM. This class generates uniform random values * for the features and adds Gaussian noise with weight 0.1 to generate labels.