Skip to content

Commit

Permalink
Add comments for _jobProgressListener and remove postfixOps
Browse files Browse the repository at this point in the history
  • Loading branch information
zsxwing committed May 6, 2015
1 parent 1009ef1 commit 783cb7b
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 2 deletions.
2 changes: 2 additions & 0 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,8 @@ class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationCli

if (master == "yarn-client") System.setProperty("SPARK_YARN_MODE", "true")

// "_jobProgressListener" should be set up before creating SparkEnv because when creating
// "SparkEnv", some messages will be posted to "listenerBus" and we should not miss them.
_jobProgressListener = new JobProgressListener(_conf)
listenerBus.addListener(jobProgressListener)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
package org.apache.spark.broadcast

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

import org.scalatest.{Assertions, FunSuite}
Expand Down Expand Up @@ -313,7 +312,7 @@ class BroadcastSuite extends FunSuite with LocalSparkContext {
val _sc =
new SparkContext("local-cluster[%d, 1, 512]".format(numSlaves), "test", broadcastConf)
// Wait until all salves are up
eventually(timeout(10 seconds), interval(10 milliseconds)) {
eventually(timeout(10.seconds), interval(10.milliseconds)) {
_sc.jobProgressListener.synchronized {
val numBlockManagers = _sc.jobProgressListener.blockManagerIds.size
assert(numBlockManagers == numSlaves + 1,
Expand Down

0 comments on commit 783cb7b

Please sign in to comment.