Skip to content

Commit

Permalink
Couple minor fixes.
Browse files Browse the repository at this point in the history
  • Loading branch information
rxin committed Jan 27, 2015
1 parent ea98ea1 commit 2ca74db
Showing 1 changed file with 12 additions and 6 deletions.
18 changes: 12 additions & 6 deletions sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,14 @@ class DataFrame protected[sql](
}

/** Left here for compatibility reasons. */
@deprecated("1.3.0", "use toDataFrame")
def toSchemaRDD: DataFrame = this

/**
* Return the object itself. Used to force an implicit conversion from RDD to DataFrame in Scala.
*/
def toDF: DataFrame = this

/** Return the schema of this [[DataFrame]]. */
override def schema: StructType = queryExecution.analyzed.schema

Expand Down Expand Up @@ -501,7 +507,7 @@ class DataFrame protected[sql](

/**
* Registers this RDD as a temporary table using the given name. The lifetime of this temporary
* table is tied to the [[SQLContext]] that was used to create this SchemaRDD.
* table is tied to the [[SQLContext]] that was used to create this DataFrame.
*
* @group schema
*/
Expand All @@ -510,20 +516,20 @@ class DataFrame protected[sql](
}

/**
* Saves the contents of this [[DataFrame]] as a parquet file, preserving the schema. Files that
* are written out using this method can be read back in as a [[DataFrame]] using the `parquetFile`
* function.
* Saves the contents of this [[DataFrame]] as a parquet file, preserving the schema.
* Files that are written out using this method can be read back in as a [[DataFrame]]
* using the `parquetFile` function in [[SQLContext]].
*/
override def saveAsParquetFile(path: String): Unit = {
sqlContext.executePlan(WriteToFile(path, logicalPlan)).toRdd
}

/**
* :: Experimental ::
* Creates a table from the the contents of this SchemaRDD. This will fail if the table already
* Creates a table from the the contents of this DataFrame. This will fail if the table already
* exists.
*
* Note that this currently only works with SchemaRDDs that are created from a HiveContext as
* Note that this currently only works with DataFrame that are created from a HiveContext as
* there is no notion of a persisted catalog in a standard SQL context. Instead you can write
* an RDD out to a parquet file, and then register that file as a table. This "table" can then
* be the target of an `insertInto`.
Expand Down

0 comments on commit 2ca74db

Please sign in to comment.