From 5b1ad7d51d7d67e2c02c0322271787f26f5a0a45 Mon Sep 17 00:00:00 2001 From: CodingCat Date: Mon, 24 Mar 2014 14:35:53 -0400 Subject: [PATCH] move SparkHiveHadoopWriter to org.apache.spark package --- .../apache/{hadoop/mapred => spark}/SparkHadoopWriter.scala | 6 ++---- .../scala/org/apache/spark/sql/hive/hiveOperators.scala | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) rename sql/hive/src/main/scala/org/apache/{hadoop/mapred => spark}/SparkHadoopWriter.scala (98%) diff --git a/sql/hive/src/main/scala/org/apache/hadoop/mapred/SparkHadoopWriter.scala b/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala similarity index 98% rename from sql/hive/src/main/scala/org/apache/hadoop/mapred/SparkHadoopWriter.scala rename to sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala index 0b3873191985b..bc6c9a8ef7925 100644 --- a/sql/hive/src/main/scala/org/apache/hadoop/mapred/SparkHadoopWriter.scala +++ b/sql/hive/src/main/scala/org/apache/spark/SparkHadoopWriter.scala @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.mapred +package org.apache.spark import java.io.IOException import java.text.NumberFormat @@ -25,11 +25,9 @@ import org.apache.hadoop.fs.Path import org.apache.hadoop.hive.ql.exec.{FileSinkOperator, Utilities} import org.apache.hadoop.hive.ql.io.{HiveFileFormatUtils, HiveOutputFormat} import org.apache.hadoop.hive.ql.plan.FileSinkDesc +import org.apache.hadoop.mapred._ import org.apache.hadoop.io.Writable -import org.apache.spark.Logging -import org.apache.spark.SerializableWritable - /** * Internal helper class that saves an RDD using a Hive OutputFormat. * It is based on [[SparkHadoopWriter]]. diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveOperators.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveOperators.scala index 9aa9e173a8367..78f69e7ff5731 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveOperators.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveOperators.scala @@ -35,7 +35,7 @@ import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.types.{BooleanType, DataType} import org.apache.spark.sql.execution._ -import org.apache.spark.{TaskContext, SparkException} +import org.apache.spark.{SparkHiveHadoopWriter, TaskContext, SparkException} /* Implicits */ import scala.collection.JavaConversions._