From a32fef7b4905ef098be9e4f73e15ebdfea6a545b Mon Sep 17 00:00:00 2001 From: Kevin Mader Date: Thu, 2 Oct 2014 14:41:19 +0200 Subject: [PATCH] removed unneeded classes added DeveloperApi note to portabledatastreams since the implementation might change --- core/src/main/scala/org/apache/spark/input/RawFileInput.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/input/RawFileInput.scala b/core/src/main/scala/org/apache/spark/input/RawFileInput.scala index e1caf9c44ec16..ed353445bf486 100644 --- a/core/src/main/scala/org/apache/spark/input/RawFileInput.scala +++ b/core/src/main/scala/org/apache/spark/input/RawFileInput.scala @@ -35,7 +35,7 @@ import java.io.{ ByteArrayInputStream, ByteArrayOutputStream, DataOutputStream, * A general format for reading whole files in as streams, byte arrays, * or other functions to be added */ -abstract class StreamFileInputFormat[T] +private[spark] abstract class StreamFileInputFormat[T] extends CombineFileInputFormat[String, T] { override protected def isSplitable(context: JobContext, file: Path): Boolean = false /** @@ -152,7 +152,7 @@ class PortableDataStream(@transient isplit: CombineFileSplit, * An abstract class of [[org.apache.hadoop.mapreduce.RecordReader RecordReader]] * to reading files out as streams */ -abstract class StreamBasedRecordReader[T]( +private[spark] abstract class StreamBasedRecordReader[T]( split: CombineFileSplit, context: TaskAttemptContext, index: Integer)