Skip to content

Commit

Permalink
replace other occurrences
Browse files Browse the repository at this point in the history
  • Loading branch information
tedyu committed Aug 26, 2024
1 parent f5ae14d commit c88a4a2
Show file tree
Hide file tree
Showing 2 changed files with 28 additions and 28 deletions.
54 changes: 27 additions & 27 deletions python/pyspark/sql/functions/builtin.py
Original file line number Diff line number Diff line change
Expand Up @@ -3674,7 +3674,7 @@ def regr_avgx(y: "ColumnOrName", x: "ColumnOrName") -> Column:

Examples
--------
Example 1: All paris are non-null
Example 1: All pairs are non-null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, 2), (2, 2), (2, 3), (2, 4) AS tab(y, x)")
Expand All @@ -3685,7 +3685,7 @@ def regr_avgx(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| 2.75| 2.75|
+---------------+------+

Example 2: All paris's x values are null
Example 2: All pairs's x values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, null) AS tab(y, x)")
Expand All @@ -3696,7 +3696,7 @@ def regr_avgx(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| NULL| NULL|
+---------------+------+

Example 3: All paris's y values are null
Example 3: All pairs's y values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (null, 1) AS tab(y, x)")
Expand Down Expand Up @@ -3754,7 +3754,7 @@ def regr_avgy(y: "ColumnOrName", x: "ColumnOrName") -> Column:

Examples
--------
Example 1: All paris are non-null
Example 1: All pairs are non-null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, 2), (2, 2), (2, 3), (2, 4) AS tab(y, x)")
Expand All @@ -3765,7 +3765,7 @@ def regr_avgy(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| 1.75| 1.75|
+---------------+------+

Example 2: All paris's x values are null
Example 2: All pairs's x values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, null) AS tab(y, x)")
Expand All @@ -3776,7 +3776,7 @@ def regr_avgy(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| NULL| 1.0|
+---------------+------+

Example 3: All paris's y values are null
Example 3: All pairs's y values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (null, 1) AS tab(y, x)")
Expand Down Expand Up @@ -3834,7 +3834,7 @@ def regr_count(y: "ColumnOrName", x: "ColumnOrName") -> Column:

Examples
--------
Example 1: All paris are non-null
Example 1: All pairs are non-null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, 2), (2, 2), (2, 3), (2, 4) AS tab(y, x)")
Expand All @@ -3845,7 +3845,7 @@ def regr_count(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| 4| 4|
+----------------+--------+

Example 2: All paris's x values are null
Example 2: All pairs's x values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, null) AS tab(y, x)")
Expand All @@ -3856,7 +3856,7 @@ def regr_count(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| 0| 1|
+----------------+--------+

Example 3: All paris's y values are null
Example 3: All pairs's y values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (null, 1) AS tab(y, x)")
Expand Down Expand Up @@ -3915,7 +3915,7 @@ def regr_intercept(y: "ColumnOrName", x: "ColumnOrName") -> Column:

Examples
--------
Example 1: All paris are non-null
Example 1: All pairs are non-null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, 1), (2, 2), (3, 3), (4, 4) AS tab(y, x)")
Expand All @@ -3926,7 +3926,7 @@ def regr_intercept(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| 0.0|
+--------------------+

Example 2: All paris's x values are null
Example 2: All pairs's x values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, null) AS tab(y, x)")
Expand All @@ -3937,7 +3937,7 @@ def regr_intercept(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| NULL|
+--------------------+

Example 3: All paris's y values are null
Example 3: All pairs's y values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (null, 1) AS tab(y, x)")
Expand Down Expand Up @@ -3995,7 +3995,7 @@ def regr_r2(y: "ColumnOrName", x: "ColumnOrName") -> Column:

Examples
--------
Example 1: All paris are non-null
Example 1: All pairs are non-null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, 1), (2, 2), (3, 3), (4, 4) AS tab(y, x)")
Expand All @@ -4006,7 +4006,7 @@ def regr_r2(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| 1.0|
+-------------+

Example 2: All paris's x values are null
Example 2: All pairs's x values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, null) AS tab(y, x)")
Expand All @@ -4017,7 +4017,7 @@ def regr_r2(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| NULL|
+-------------+

Example 3: All paris's y values are null
Example 3: All pairs's y values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (null, 1) AS tab(y, x)")
Expand Down Expand Up @@ -4075,7 +4075,7 @@ def regr_slope(y: "ColumnOrName", x: "ColumnOrName") -> Column:

Examples
--------
Example 1: All paris are non-null
Example 1: All pairs are non-null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, 1), (2, 2), (3, 3), (4, 4) AS tab(y, x)")
Expand All @@ -4086,7 +4086,7 @@ def regr_slope(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| 1.0|
+----------------+

Example 2: All paris's x values are null
Example 2: All pairs's x values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, null) AS tab(y, x)")
Expand All @@ -4097,7 +4097,7 @@ def regr_slope(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| NULL|
+----------------+

Example 3: All paris's y values are null
Example 3: All pairs's y values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (null, 1) AS tab(y, x)")
Expand Down Expand Up @@ -4155,7 +4155,7 @@ def regr_sxx(y: "ColumnOrName", x: "ColumnOrName") -> Column:

Examples
--------
Example 1: All paris are non-null
Example 1: All pairs are non-null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, 1), (2, 2), (3, 3), (4, 4) AS tab(y, x)")
Expand All @@ -4166,7 +4166,7 @@ def regr_sxx(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| 5.0|
+--------------+

Example 2: All paris's x values are null
Example 2: All pairs's x values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, null) AS tab(y, x)")
Expand All @@ -4177,7 +4177,7 @@ def regr_sxx(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| NULL|
+--------------+

Example 3: All paris's y values are null
Example 3: All pairs's y values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (null, 1) AS tab(y, x)")
Expand Down Expand Up @@ -4235,7 +4235,7 @@ def regr_sxy(y: "ColumnOrName", x: "ColumnOrName") -> Column:

Examples
--------
Example 1: All paris are non-null
Example 1: All pairs are non-null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, 1), (2, 2), (3, 3), (4, 4) AS tab(y, x)")
Expand All @@ -4246,7 +4246,7 @@ def regr_sxy(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| 5.0|
+--------------+

Example 2: All paris's x values are null
Example 2: All pairs's x values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, null) AS tab(y, x)")
Expand All @@ -4257,7 +4257,7 @@ def regr_sxy(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| NULL|
+--------------+

Example 3: All paris's y values are null
Example 3: All pairs's y values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (null, 1) AS tab(y, x)")
Expand Down Expand Up @@ -4315,7 +4315,7 @@ def regr_syy(y: "ColumnOrName", x: "ColumnOrName") -> Column:

Examples
--------
Example 1: All paris are non-null
Example 1: All pairs are non-null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, 1), (2, 2), (3, 3), (4, 4) AS tab(y, x)")
Expand All @@ -4326,7 +4326,7 @@ def regr_syy(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| 5.0|
+--------------+

Example 2: All paris's x values are null
Example 2: All pairs's x values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (1, null) AS tab(y, x)")
Expand All @@ -4337,7 +4337,7 @@ def regr_syy(y: "ColumnOrName", x: "ColumnOrName") -> Column:
| NULL|
+--------------+

Example 3: All paris's y values are null
Example 3: All pairs's y values are null

>>> import pyspark.sql.functions as sf
>>> df = spark.sql("SELECT * FROM VALUES (null, 1) AS tab(y, x)")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4771,7 +4771,7 @@ object SQLConf {
buildConf("spark.sql.pyspark.legacy.inferMapTypeFromFirstPair.enabled")
.internal()
.doc("PySpark's SparkSession.createDataFrame infers the key/value types of a map from all " +
"paris in the map by default. If this config is set to true, it restores the legacy " +
"pairs in the map by default. If this config is set to true, it restores the legacy " +
"behavior of only inferring the type from the first non-null pair.")
.version("4.0.0")
.booleanConf
Expand Down

0 comments on commit c88a4a2

Please sign in to comment.