Skip to content

Commit

Permalink
update the comments and permission of SparkHadoopWriter
Browse files Browse the repository at this point in the history
  • Loading branch information
CodingCat committed Mar 24, 2014
1 parent 9bd1fe3 commit af88939
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 16 deletions.
9 changes: 3 additions & 6 deletions core/src/main/scala/org/apache/spark/SparkHadoopWriter.scala
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,13 @@ import org.apache.hadoop.fs.Path

import org.apache.spark.rdd.HadoopRDD


/**
* Internal helper class that saves an RDD using a Hadoop OutputFormat. This is only public
* because we need to access this class from the `spark` package to use some package-private Hadoop
* functions, but this class should not be used directly by users.
* Internal helper class that saves an RDD using a Hadoop OutputFormat.
*
* Saves the RDD using a JobConf, which should contain an output key class, an output value class,
* a filename to write to, etc, exactly like in a Hadoop MapReduce job.
*/
private[apache]
private[spark]
class SparkHadoopWriter(@transient jobConf: JobConf)
extends Logging
with SparkHadoopMapRedUtil
Expand Down Expand Up @@ -171,7 +168,7 @@ class SparkHadoopWriter(@transient jobConf: JobConf)
}
}

private[apache]
private[spark]
object SparkHadoopWriter {
def createJobID(time: Date, id: Int): JobID = {
val formatter = new SimpleDateFormat("yyyyMMddHHmm")
Expand Down
12 changes: 2 additions & 10 deletions core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -233,19 +233,11 @@ private[spark] object HadoopRDD {
def putCachedMetadata(key: String, value: Any) =
SparkEnv.get.hadoopJobMetadata.put(key, value)

/**
*
* @param jtId
* @param jobId
* @param splitId
* @param attemptId
* @param conf
*/
def addLocalConfiguration(jtId: String, jobId: Int, splitId: Int, attemptId: Int,
def addLocalConfiguration(jobTrackerId: String, jobId: Int, splitId: Int, attemptId: Int,
conf: JobConf) {
// generate job id
//val stageId = context.stageId
val jobID = new JobID(jtId, jobId)
val jobID = new JobID(jobTrackerId, jobId)
//val attemptId = (attemptId % Int.MaxValue).toInt
val taId = new TaskAttemptID(new TaskID(jobID, true, splitId), attemptId)

Expand Down

0 comments on commit af88939

Please sign in to comment.