Skip to content

Commit

Permalink
Merge branch 'master' into vector
Browse files Browse the repository at this point in the history
  • Loading branch information
mengxr committed Apr 2, 2014
2 parents f7da54b + 8b3045c commit 11999c7
Show file tree
Hide file tree
Showing 44 changed files with 2,510 additions and 70 deletions.
2 changes: 1 addition & 1 deletion core/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@
<artifactId>json4s-jackson_${scala.binary.version}</artifactId>
<version>3.2.6</version>
<!-- see also exclusion for lift-json; this is necessary since it depends on
scala-library and scalap 2.10.0, but we use 2.10.3, and only override
scala-library and scalap 2.10.0, but we use 2.10.4, and only override
scala-library -->
<exclusions>
<exclusion>
Expand Down
3 changes: 2 additions & 1 deletion core/src/main/scala/org/apache/spark/ui/JettyUtils.scala
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,14 @@
package org.apache.spark.ui

import java.net.{InetSocketAddress, URL}
import javax.servlet.DispatcherType
import javax.servlet.http.{HttpServlet, HttpServletRequest, HttpServletResponse}

import scala.annotation.tailrec
import scala.util.{Failure, Success, Try}
import scala.xml.Node

import org.eclipse.jetty.server.{DispatcherType, Server}
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.server.handler._
import org.eclipse.jetty.servlet._
import org.eclipse.jetty.util.thread.QueuedThreadPool
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ private[ui] class BlockManagerListener(storageStatusListener: StorageStatusListe

override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted) = synchronized {
val rddInfo = stageSubmitted.stageInfo.rddInfo
_rddInfoMap(rddInfo.id) = rddInfo
_rddInfoMap.getOrElseUpdate(rddInfo.id, rddInfo)
}

override def onStageCompleted(stageCompleted: SparkListenerStageCompleted) = synchronized {
Expand Down
2 changes: 1 addition & 1 deletion dev/audit-release/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ run them locally by setting appropriate environment variables.

```
$ cd sbt_app_core
$ SCALA_VERSION=2.10.3 \
$ SCALA_VERSION=2.10.4 \
SPARK_VERSION=1.0.0-SNAPSHOT \
SPARK_RELEASE_REPOSITORY=file:///home/patrick/.ivy2/local \
sbt run
Expand Down
2 changes: 1 addition & 1 deletion dev/audit-release/audit_release.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
RELEASE_KEY = "9E4FE3AF"
RELEASE_REPOSITORY = "https://repository.apache.org/content/repositories/orgapachespark-1006/"
RELEASE_VERSION = "1.0.0"
SCALA_VERSION = "2.10.3"
SCALA_VERSION = "2.10.4"
SCALA_BINARY_VERSION = "2.10"
##

Expand Down
2 changes: 1 addition & 1 deletion docker/spark-test/base/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ RUN apt-get update
# install a few other useful packages plus Open Jdk 7
RUN apt-get install -y less openjdk-7-jre-headless net-tools vim-tiny sudo openssh-server

ENV SCALA_VERSION 2.10.3
ENV SCALA_VERSION 2.10.4
ENV CDH_VERSION cdh4
ENV SCALA_HOME /opt/scala-$SCALA_VERSION
ENV SPARK_HOME /opt/spark
Expand Down
2 changes: 1 addition & 1 deletion docs/_config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ markdown: kramdown
SPARK_VERSION: 1.0.0-SNAPSHOT
SPARK_VERSION_SHORT: 1.0.0
SCALA_BINARY_VERSION: "2.10"
SCALA_VERSION: "2.10.3"
SCALA_VERSION: "2.10.4"
MESOS_VERSION: 0.13.0
SPARK_ISSUE_TRACKER_URL: https://spark-project.atlassian.net
SPARK_GITHUB_URL: https://github.com/apache/spark
7 changes: 4 additions & 3 deletions docs/running-on-yarn.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ The command to launch the Spark application on the cluster is as follows:
SPARK_JAR=<SPARK_ASSEMBLY_JAR_FILE> ./bin/spark-class org.apache.spark.deploy.yarn.Client \
--jar <YOUR_APP_JAR_FILE> \
--class <APP_MAIN_CLASS> \
--args <APP_MAIN_ARGUMENTS> \
--arg <APP_MAIN_ARGUMENT> \
--num-executors <NUMBER_OF_EXECUTOR_PROCESSES> \
--driver-memory <MEMORY_FOR_ApplicationMaster> \
--executor-memory <MEMORY_PER_EXECUTOR> \
Expand All @@ -72,7 +72,7 @@ The command to launch the Spark application on the cluster is as follows:
--files <files_for_distributed_cache> \
--archives <archives_for_distributed_cache>

For example:
To pass multiple arguments the "arg" option can be specified multiple times. For example:

# Build the Spark assembly JAR and the Spark examples JAR
$ SPARK_HADOOP_VERSION=2.0.5-alpha SPARK_YARN=true sbt/sbt assembly
Expand All @@ -85,7 +85,8 @@ For example:
./bin/spark-class org.apache.spark.deploy.yarn.Client \
--jar examples/target/scala-{{site.SCALA_BINARY_VERSION}}/spark-examples-assembly-{{site.SPARK_VERSION}}.jar \
--class org.apache.spark.examples.SparkPi \
--args yarn-cluster \
--arg yarn-cluster \
--arg 5 \
--num-executors 3 \
--driver-memory 4g \
--executor-memory 2g \
Expand Down
Loading

0 comments on commit 11999c7

Please sign in to comment.