Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into pyspark-update-cl…
Browse files Browse the repository at this point in the history
…oudpickle-42-SPARK-23159
  • Loading branch information
BryanCutler committed Mar 5, 2018
2 parents 2d19f0a + 4586ead commit 7d265f5
Show file tree
Hide file tree
Showing 281 changed files with 6,819 additions and 4,447 deletions.
92 changes: 0 additions & 92 deletions R/pkg/R/DataFrame.R

Large diffs are not rendered by default.

16 changes: 0 additions & 16 deletions R/pkg/R/SQLContext.R
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,6 @@ infer_type <- function(x) {
#' @return a list of config values with keys as their names
#' @rdname sparkR.conf
#' @name sparkR.conf
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -163,7 +162,6 @@ sparkR.conf <- function(key, defaultValue) {
#' @return a character string of the Spark version
#' @rdname sparkR.version
#' @name sparkR.version
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -191,7 +189,6 @@ getDefaultSqlSource <- function() {
#' limited by length of the list or number of rows of the data.frame
#' @return A SparkDataFrame.
#' @rdname createDataFrame
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -294,7 +291,6 @@ createDataFrame <- function(x, ...) {

#' @rdname createDataFrame
#' @aliases createDataFrame
#' @export
#' @method as.DataFrame default
#' @note as.DataFrame since 1.6.0
as.DataFrame.default <- function(data, schema = NULL, samplingRatio = 1.0, numPartitions = NULL) {
Expand All @@ -304,7 +300,6 @@ as.DataFrame.default <- function(data, schema = NULL, samplingRatio = 1.0, numPa
#' @param ... additional argument(s).
#' @rdname createDataFrame
#' @aliases as.DataFrame
#' @export
as.DataFrame <- function(data, ...) {
dispatchFunc("as.DataFrame(data, schema = NULL)", data, ...)
}
Expand Down Expand Up @@ -342,7 +337,6 @@ setMethod("toDF", signature(x = "RDD"),
#' @param ... additional external data source specific named properties.
#' @return SparkDataFrame
#' @rdname read.json
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -371,7 +365,6 @@ read.json <- function(x, ...) {

#' @rdname read.json
#' @name jsonFile
#' @export
#' @method jsonFile default
#' @note jsonFile since 1.4.0
jsonFile.default <- function(path) {
Expand Down Expand Up @@ -423,7 +416,6 @@ jsonRDD <- function(sqlContext, rdd, schema = NULL, samplingRatio = 1.0) {
#' @param ... additional external data source specific named properties.
#' @return SparkDataFrame
#' @rdname read.orc
#' @export
#' @name read.orc
#' @note read.orc since 2.0.0
read.orc <- function(path, ...) {
Expand All @@ -444,7 +436,6 @@ read.orc <- function(path, ...) {
#' @param path path of file to read. A vector of multiple paths is allowed.
#' @return SparkDataFrame
#' @rdname read.parquet
#' @export
#' @name read.parquet
#' @method read.parquet default
#' @note read.parquet since 1.6.0
Expand All @@ -466,7 +457,6 @@ read.parquet <- function(x, ...) {
#' @param ... argument(s) passed to the method.
#' @rdname read.parquet
#' @name parquetFile
#' @export
#' @method parquetFile default
#' @note parquetFile since 1.4.0
parquetFile.default <- function(...) {
Expand All @@ -490,7 +480,6 @@ parquetFile <- function(x, ...) {
#' @param ... additional external data source specific named properties.
#' @return SparkDataFrame
#' @rdname read.text
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -522,7 +511,6 @@ read.text <- function(x, ...) {
#' @param sqlQuery A character vector containing the SQL query
#' @return SparkDataFrame
#' @rdname sql
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -556,7 +544,6 @@ sql <- function(x, ...) {
#' @return SparkDataFrame
#' @rdname tableToDF
#' @name tableToDF
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -591,7 +578,6 @@ tableToDF <- function(tableName) {
#' @rdname read.df
#' @name read.df
#' @seealso \link{read.json}
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -681,7 +667,6 @@ loadDF <- function(x = NULL, ...) {
#' @return SparkDataFrame
#' @rdname read.jdbc
#' @name read.jdbc
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -734,7 +719,6 @@ read.jdbc <- function(url, tableName,
#' @rdname read.stream
#' @name read.stream
#' @seealso \link{write.stream}
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down
8 changes: 0 additions & 8 deletions R/pkg/R/WindowSpec.R
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ NULL
#' @seealso \link{windowPartitionBy}, \link{windowOrderBy}
#'
#' @param sws A Java object reference to the backing Scala WindowSpec
#' @export
#' @note WindowSpec since 2.0.0
setClass("WindowSpec",
slots = list(sws = "jobj"))
Expand All @@ -44,7 +43,6 @@ windowSpec <- function(sws) {
}

#' @rdname show
#' @export
#' @note show(WindowSpec) since 2.0.0
setMethod("show", "WindowSpec",
function(object) {
Expand All @@ -63,7 +61,6 @@ setMethod("show", "WindowSpec",
#' @name partitionBy
#' @aliases partitionBy,WindowSpec-method
#' @family windowspec_method
#' @export
#' @examples
#' \dontrun{
#' partitionBy(ws, "col1", "col2")
Expand Down Expand Up @@ -97,7 +94,6 @@ setMethod("partitionBy",
#' @aliases orderBy,WindowSpec,character-method
#' @family windowspec_method
#' @seealso See \link{arrange} for use in sorting a SparkDataFrame
#' @export
#' @examples
#' \dontrun{
#' orderBy(ws, "col1", "col2")
Expand All @@ -113,7 +109,6 @@ setMethod("orderBy",
#' @rdname orderBy
#' @name orderBy
#' @aliases orderBy,WindowSpec,Column-method
#' @export
#' @note orderBy(WindowSpec, Column) since 2.0.0
setMethod("orderBy",
signature(x = "WindowSpec", col = "Column"),
Expand Down Expand Up @@ -142,7 +137,6 @@ setMethod("orderBy",
#' @aliases rowsBetween,WindowSpec,numeric,numeric-method
#' @name rowsBetween
#' @family windowspec_method
#' @export
#' @examples
#' \dontrun{
#' rowsBetween(ws, 0, 3)
Expand Down Expand Up @@ -174,7 +168,6 @@ setMethod("rowsBetween",
#' @aliases rangeBetween,WindowSpec,numeric,numeric-method
#' @name rangeBetween
#' @family windowspec_method
#' @export
#' @examples
#' \dontrun{
#' rangeBetween(ws, 0, 3)
Expand Down Expand Up @@ -202,7 +195,6 @@ setMethod("rangeBetween",
#' @name over
#' @aliases over,Column,WindowSpec-method
#' @family colum_func
#' @export
#' @examples
#' \dontrun{
#' df <- createDataFrame(mtcars)
Expand Down
3 changes: 0 additions & 3 deletions R/pkg/R/broadcast.R
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,12 @@
# @seealso broadcast
#
# @param id Id of the backing Spark broadcast variable
# @export
setClass("Broadcast", slots = list(id = "character"))

# @rdname broadcast-class
# @param value Value of the broadcast variable
# @param jBroadcastRef reference to the backing Java broadcast object
# @param objName name of broadcasted object
# @export
Broadcast <- function(id, value, jBroadcastRef, objName) {
.broadcastValues[[id]] <- value
.broadcastNames[[as.character(objName)]] <- jBroadcastRef
Expand Down Expand Up @@ -73,7 +71,6 @@ setMethod("value",

# @param bcastId The id of broadcast variable to set
# @param value The value to be set
# @export
setBroadcastValue <- function(bcastId, value) {
bcastIdStr <- as.character(bcastId)
.broadcastValues[[bcastIdStr]] <- value
Expand Down
18 changes: 0 additions & 18 deletions R/pkg/R/catalog.R
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
#' @return A SparkDataFrame.
#' @rdname createExternalTable-deprecated
#' @seealso \link{createTable}
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -71,7 +70,6 @@ createExternalTable <- function(x, ...) {
#' @return A SparkDataFrame.
#' @rdname createTable
#' @seealso \link{createExternalTable}
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -110,7 +108,6 @@ createTable <- function(tableName, path = NULL, source = NULL, schema = NULL, ..
#' identifier is provided, it refers to a table in the current database.
#' @return SparkDataFrame
#' @rdname cacheTable
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -140,7 +137,6 @@ cacheTable <- function(x, ...) {
#' identifier is provided, it refers to a table in the current database.
#' @return SparkDataFrame
#' @rdname uncacheTable
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand All @@ -167,7 +163,6 @@ uncacheTable <- function(x, ...) {
#' Removes all cached tables from the in-memory cache.
#'
#' @rdname clearCache
#' @export
#' @examples
#' \dontrun{
#' clearCache()
Expand All @@ -193,7 +188,6 @@ clearCache <- function() {
#' @param tableName The name of the SparkSQL table to be dropped.
#' @seealso \link{dropTempView}
#' @rdname dropTempTable-deprecated
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -225,7 +219,6 @@ dropTempTable <- function(x, ...) {
#' @return TRUE if the view is dropped successfully, FALSE otherwise.
#' @rdname dropTempView
#' @name dropTempView
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand All @@ -251,7 +244,6 @@ dropTempView <- function(viewName) {
#' @return a SparkDataFrame
#' @rdname tables
#' @seealso \link{listTables}
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand All @@ -276,7 +268,6 @@ tables <- function(x, ...) {
#' @param databaseName (optional) name of the database
#' @return a list of table names
#' @rdname tableNames
#' @export
#' @examples
#'\dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -304,7 +295,6 @@ tableNames <- function(x, ...) {
#' @return name of the current default database.
#' @rdname currentDatabase
#' @name currentDatabase
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand All @@ -324,7 +314,6 @@ currentDatabase <- function() {
#' @param databaseName name of the database
#' @rdname setCurrentDatabase
#' @name setCurrentDatabase
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand All @@ -347,7 +336,6 @@ setCurrentDatabase <- function(databaseName) {
#' @return a SparkDataFrame of the list of databases.
#' @rdname listDatabases
#' @name listDatabases
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand All @@ -370,7 +358,6 @@ listDatabases <- function() {
#' @rdname listTables
#' @name listTables
#' @seealso \link{tables}
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -403,7 +390,6 @@ listTables <- function(databaseName = NULL) {
#' @return a SparkDataFrame of the list of column descriptions.
#' @rdname listColumns
#' @name listColumns
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -433,7 +419,6 @@ listColumns <- function(tableName, databaseName = NULL) {
#' @return a SparkDataFrame of the list of function descriptions.
#' @rdname listFunctions
#' @name listFunctions
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand Down Expand Up @@ -463,7 +448,6 @@ listFunctions <- function(databaseName = NULL) {
#' identifier is provided, it refers to a table in the current database.
#' @rdname recoverPartitions
#' @name recoverPartitions
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand All @@ -490,7 +474,6 @@ recoverPartitions <- function(tableName) {
#' identifier is provided, it refers to a table in the current database.
#' @rdname refreshTable
#' @name refreshTable
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand All @@ -512,7 +495,6 @@ refreshTable <- function(tableName) {
#' @param path the path of the data source.
#' @rdname refreshByPath
#' @name refreshByPath
#' @export
#' @examples
#' \dontrun{
#' sparkR.session()
Expand Down
Loading

0 comments on commit 7d265f5

Please sign in to comment.