Skip to content

Commit

Permalink
[SPARK-4397][Core] Cleanup 'import SparkContext._' in core
Browse files Browse the repository at this point in the history
This PR cleans up `import SparkContext._` in core for SPARK-4397(apache#3262) to prove it really works well.

Author: zsxwing <zsxwing@gmail.com>

Closes apache#3530 from zsxwing/SPARK-4397-cleanup and squashes the following commits:

04e2273 [zsxwing] Cleanup 'import SparkContext._' in core
  • Loading branch information
zsxwing authored and rxin committed Dec 2, 2014
1 parent 64f3175 commit 6dfe38a
Show file tree
Hide file tree
Showing 36 changed files with 8 additions and 44 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import com.google.common.base.Optional
import org.apache.hadoop.io.compress.CompressionCodec

import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.annotation.Experimental
import org.apache.spark.api.java.JavaPairRDD._
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.mapred.{InputFormat, OutputFormat, JobConf}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, OutputFormat => NewOutputFormat}
import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.api.java.{JavaSparkContext, JavaPairRDD, JavaRDD}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
Expand Down
4 changes: 2 additions & 2 deletions core/src/main/scala/org/apache/spark/package.scala
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ package org.apache
* contains operations available only on RDDs of Doubles; and
* [[org.apache.spark.rdd.SequenceFileRDDFunctions]] contains operations available on RDDs that can
* be saved as SequenceFiles. These operations are automatically available on any RDD of the right
* type (e.g. RDD[(Int, Int)] through implicit conversions when you
* `import org.apache.spark.SparkContext._`.
* type (e.g. RDD[(Int, Int)] through implicit conversions except `saveAsSequenceFile`. You need to
* `import org.apache.spark.SparkContext._` to make `saveAsSequenceFile` work.
*
* Java programmers should reference the [[org.apache.spark.api.java]] package
* for Spark programming APIs in Java.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import org.apache.spark.{ComplexFutureAction, FutureAction, Logging}

/**
* A set of asynchronous RDD actions available through an implicit conversion.
* Import `org.apache.spark.SparkContext._` at the top of your program to use these functions.
*/
class AsyncRDDActions[T: ClassTag](self: RDD[T]) extends Serializable with Logging {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import org.apache.spark.util.StatCounter

/**
* Extra functions available on RDDs of Doubles through an implicit conversion.
* Import `org.apache.spark.SparkContext._` at the top of your program to use these functions.
*/
class DoubleRDDFunctions(self: RDD[Double]) extends Logging with Serializable {
/** Add up the elements in this RDD. */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,9 @@ import org.apache.spark.annotation.DeveloperApi

/**
* Extra functions available on RDDs of (key, value) pairs where the key is sortable through
* an implicit conversion. Import `org.apache.spark.SparkContext._` at the top of your program to
* use these functions. They will work with any key type `K` that has an implicit `Ordering[K]` in
* scope. Ordering objects already exist for all of the standard primitive types. Users can also
* define their own orderings for custom types, or to override the default ordering. The implicit
* an implicit conversion. They will work with any key type `K` that has an implicit `Ordering[K]`
* in scope. Ordering objects already exist for all of the standard primitive types. Users can also
* define their own orderings for custom types, or to override the default ordering. The implicit
* ordering that is in the closest scope will be used.
*
* {{{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ RecordWriter => NewRecordWriter}

import org.apache.spark._
import org.apache.spark.Partitioner.defaultPartitioner
import org.apache.spark.SparkContext._
import org.apache.spark.annotation.Experimental
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.executor.{DataWriteMethod, OutputMetrics}
Expand All @@ -50,7 +49,6 @@ import org.apache.spark.util.random.StratifiedSamplingUtils

/**
* Extra functions available on RDDs of (key, value) pairs through an implicit conversion.
* Import `org.apache.spark.SparkContext._` at the top of your program to use these functions.
*/
class PairRDDFunctions[K, V](self: RDD[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null)
Expand Down
6 changes: 3 additions & 3 deletions core/src/main/scala/org/apache/spark/rdd/RDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ import org.apache.hadoop.mapred.TextOutputFormat

import org.apache.spark._
import org.apache.spark.Partitioner._
import org.apache.spark.SparkContext._
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.broadcast.Broadcast
Expand All @@ -58,8 +57,9 @@ import org.apache.spark.util.random.{BernoulliSampler, PoissonSampler, Bernoulli
* Doubles; and
* [[org.apache.spark.rdd.SequenceFileRDDFunctions]] contains operations available on RDDs that
* can be saved as SequenceFiles.
* These operations are automatically available on any RDD of the right type (e.g. RDD[(Int, Int)]
* through implicit conversions when you `import org.apache.spark.SparkContext._`.
* All operations are automatically available on any RDD of the right type (e.g. RDD[(Int, Int)]
* through implicit conversions except `saveAsSequenceFile`. You need to
* `import org.apache.spark.SparkContext._` to make `saveAsSequenceFile` work.
*
* Internally, each RDD is characterized by five main properties:
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapred.SequenceFileOutputFormat

import org.apache.spark.Logging
import org.apache.spark.SparkContext._

/**
* Extra functions available on RDDs of (key, value) pairs to create a Hadoop SequenceFile,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ package org.apache.spark.ui
import scala.util.Random

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.scheduler.SchedulingMode

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import scala.reflect.ClassTag
import org.apache.commons.math3.distribution.PoissonDistribution

import org.apache.spark.Logging
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import scala.collection.mutable
import org.scalatest.FunSuite
import org.scalatest.Matchers

import org.apache.spark.SparkContext._

class AccumulatorSuite extends FunSuite with Matchers with LocalSparkContext {

Expand Down
1 change: 0 additions & 1 deletion core/src/test/scala/org/apache/spark/CheckpointSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import scala.reflect.ClassTag

import org.scalatest.FunSuite

import org.apache.spark.SparkContext._
import org.apache.spark.rdd._
import org.apache.spark.storage.{BlockId, StorageLevel, TestBlockId}
import org.apache.spark.util.Utils
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import org.scalatest.concurrent.{PatienceConfiguration, Eventually}
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._

import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.storage._
import org.apache.spark.shuffle.hash.HashShuffleManager
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import org.scalatest.concurrent.Timeouts._
import org.scalatest.Matchers
import org.scalatest.time.{Millis, Span}

import org.apache.spark.SparkContext._
import org.apache.spark.storage.{RDDBlockId, StorageLevel}

class NotSerializableClass
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import java.util.concurrent.atomic.AtomicInteger

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.SparkContext._
import org.apache.spark.network.TransportContext
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.server.TransportServer
Expand Down
1 change: 0 additions & 1 deletion core/src/test/scala/org/apache/spark/FailureSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package org.apache.spark

import org.scalatest.FunSuite

import org.apache.spark.SparkContext._
import org.apache.spark.util.NonSerializable

import java.io.NotSerializableException
Expand Down
1 change: 0 additions & 1 deletion core/src/test/scala/org/apache/spark/FileServerSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import java.util.jar.{JarEntry, JarOutputStream}
import com.google.common.io.ByteStreams
import org.scalatest.FunSuite

import org.apache.spark.SparkContext._
import org.apache.spark.util.Utils

class FileServerSuite extends FunSuite with LocalSparkContext {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import scala.concurrent.duration.Duration

import org.scalatest.{BeforeAndAfter, FunSuite, Matchers}

import org.apache.spark.SparkContext._

class FutureActionSuite extends FunSuite with BeforeAndAfter with Matchers with LocalSparkContext {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ package org.apache.spark
import org.scalatest.FunSuite

import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext._

class ImplicitOrderingSuite extends FunSuite with LocalSparkContext {
// Tests that PairRDDFunctions grabs an implicit Ordering in various cases where it should.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import scala.concurrent.future
import org.scalatest.{BeforeAndAfter, FunSuite}
import org.scalatest.Matchers

import org.apache.spark.SparkContext._
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskStart}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import scala.math.abs

import org.scalatest.{FunSuite, PrivateMethodTester}

import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.util.StatCounter

Expand Down
1 change: 0 additions & 1 deletion core/src/test/scala/org/apache/spark/ShuffleSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ package org.apache.spark
import org.scalatest.FunSuite
import org.scalatest.Matchers

import org.apache.spark.SparkContext._
import org.apache.spark.ShuffleSuite.NonJavaSerializableClass
import org.apache.spark.rdd.{CoGroupedRDD, OrderedRDDFunctions, RDD, ShuffledRDD, SubtractedRDD}
import org.apache.spark.serializer.KryoSerializer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import org.scalatest.{Matchers, FunSuite}
import org.scalatest.concurrent.Eventually._

import org.apache.spark.JobExecutionStatus._
import org.apache.spark.SparkContext._

class StatusTrackerSuite extends FunSuite with Matchers with LocalSparkContext {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import org.scalatest.{BeforeAndAfterAll, FunSuite}
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.SpanSugar._

import org.apache.spark.SparkContext._
import org.apache.spark.{SparkContext, SparkException, LocalSparkContext}

class AsyncRDDActionsSuite extends FunSuite with BeforeAndAfterAll with Timeouts {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ package org.apache.spark.rdd
import org.scalatest.FunSuite

import org.apache.spark._
import org.apache.spark.SparkContext._

class DoubleRDDSuite extends FunSuite with SharedSparkContext {
// Verify tests on the histogram functionality. We test with both evenly
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ import org.apache.hadoop.mapreduce.{JobContext => NewJobContext, OutputCommitter
OutputFormat => NewOutputFormat, RecordWriter => NewRecordWriter,
TaskAttemptContext => NewTaskAttempContext}
import org.apache.spark.{Partitioner, SharedSparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.util.Utils

import org.scalatest.FunSuite
Expand Down
2 changes: 0 additions & 2 deletions core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import scala.reflect.ClassTag
import org.scalatest.FunSuite

import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.util.Utils

import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}
Expand Down Expand Up @@ -97,7 +96,6 @@ class RDDSuite extends FunSuite with SharedSparkContext {
}

test("partitioner aware union") {
import SparkContext._
def makeRDDWithPartitioner(seq: Seq[Int]) = {
sc.makeRDD(seq, 1)
.map(x => (x, null))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import org.scalatest.FunSuite
import org.scalatest.Matchers

import org.apache.spark.{Logging, SharedSparkContext}
import org.apache.spark.SparkContext._

class SortingSuite extends FunSuite with SharedSparkContext with Matchers with Logging {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import org.scalatest.concurrent.Timeouts
import org.scalatest.time.SpanSugar._

import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.storage.{BlockId, BlockManagerId, BlockManagerMaster}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import java.io.{File, PrintWriter}
import org.json4s.jackson.JsonMethods._
import org.scalatest.{BeforeAndAfter, FunSuite}

import org.apache.spark.SparkContext._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.io.CompressionCodec
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuite}
import org.scalatest.Matchers

import org.apache.spark.{LocalSparkContext, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.executor.TaskMetrics

class SparkListenerSuite extends FunSuite with LocalSparkContext with Matchers
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import com.esotericsoftware.kryo.Kryo
import org.scalatest.FunSuite

import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkEnv, TestUtils}
import org.apache.spark.SparkContext._
import org.apache.spark.serializer.KryoDistributedTest._

class KryoSerializerDistributedSuite extends FunSuite {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import org.scalatest.selenium.WebBrowser
import org.scalatest.time.SpanSugar._

import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.LocalSparkContext._
import org.apache.spark.api.java.StorageLevels
import org.apache.spark.shuffle.FetchFailedException
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import scala.collection.mutable.ArrayBuffer
import org.scalatest.FunSuite

import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.io.CompressionCodec

class ExternalAppendOnlyMapSuite extends FunSuite with LocalSparkContext {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import scala.collection.mutable.ArrayBuffer
import org.scalatest.{PrivateMethodTester, FunSuite}

import org.apache.spark._
import org.apache.spark.SparkContext._

import scala.util.Random

Expand Down

0 comments on commit 6dfe38a

Please sign in to comment.