diff --git a/python/docs/pyspark.ml.rst b/python/docs/pyspark.ml.rst index 9015a3c15edb2..f20453e624296 100644 --- a/python/docs/pyspark.ml.rst +++ b/python/docs/pyspark.ml.rst @@ -5,9 +5,33 @@ Submodules ---------- pyspark.ml module -------------------------- +----------------- .. automodule:: pyspark.ml :members: :undoc-members: :show-inheritance: + +pyspark.ml.param module +----------------------- + +.. automodule:: pyspark.ml.param + :members: + :undoc-members: + :show-inheritance: + +pyspark.ml.feature module +------------------------- + +.. automodule:: pyspark.ml.feature + :members: + :undoc-members: + :show-inheritance: + +pyspark.ml.classification module +-------------------------------- + +.. automodule:: pyspark.ml.classification + :members: + :undoc-members: + :show-inheritance: diff --git a/python/pyspark/ml/__init__.py b/python/pyspark/ml/__init__.py index df39515d81433..83351dc523ce1 100644 --- a/python/pyspark/ml/__init__.py +++ b/python/pyspark/ml/__init__.py @@ -22,7 +22,7 @@ from pyspark.ml.param import Param, Params from pyspark.ml.util import Identifiable -__all__ = ["Pipeline", "Transformer", "Estimator"] +__all__ = ["Pipeline", "Transformer", "Estimator", "param", "feature", "classification"] def _jvm(): diff --git a/python/pyspark/ml/classification.py b/python/pyspark/ml/classification.py index 2c9aaad03cedf..f3cea0958c897 100644 --- a/python/pyspark/ml/classification.py +++ b/python/pyspark/ml/classification.py @@ -15,11 +15,12 @@ # limitations under the License. # -from pyspark.sql import SchemaRDD +from pyspark.sql import SchemaRDD, inherit_doc from pyspark.ml import Estimator, Transformer, _jvm from pyspark.ml.param import Param +@inherit_doc class LogisticRegression(Estimator): """ Logistic regression. diff --git a/python/pyspark/ml/feature.py b/python/pyspark/ml/feature.py index 35f32f513f7a5..9e4b6574574a4 100644 --- a/python/pyspark/ml/feature.py +++ b/python/pyspark/ml/feature.py @@ -15,10 +15,11 @@ # limitations under the License. # -from pyspark.sql import SchemaRDD, ArrayType, StringType +from pyspark.sql import SchemaRDD, ArrayType, StringType, inherit_doc from pyspark.ml import Transformer, _jvm from pyspark.ml.param import Param +@inherit_doc class Tokenizer(Transformer): def __init__(self): @@ -61,6 +62,7 @@ def transform(self, dataset, params={}): raise ValueError("The input params must be either a dict or a list.") +@inherit_doc class HashingTF(Transformer): def __init__(self):