Skip to content

Commit

Permalink
Fix the code style
Browse files Browse the repository at this point in the history
  • Loading branch information
zsxwing committed Dec 20, 2014
1 parent e4ad8b5 commit 39d9df2
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,7 @@ private[spark] object FixedLengthBinaryInputFormat {

/** Retrieves the record length property from a Hadoop configuration */
def getRecordLength(context: JobContext): Int = {
SparkHadoopUtil.get.getConfigurationFromJobContext(context).
get(RECORD_LENGTH_PROPERTY).toInt
SparkHadoopUtil.get.getConfigurationFromJobContext(context).get(RECORD_LENGTH_PROPERTY).toInt
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@ package org.apache.spark.input

import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream}

import org.apache.spark.deploy.SparkHadoopUtil

import scala.collection.JavaConversions._

import com.google.common.io.ByteStreams
Expand All @@ -30,6 +28,7 @@ import org.apache.hadoop.mapreduce.{InputSplit, JobContext, RecordReader, TaskAt
import org.apache.hadoop.mapreduce.lib.input.{CombineFileInputFormat, CombineFileRecordReader, CombineFileSplit}

import org.apache.spark.annotation.Experimental
import org.apache.spark.deploy.SparkHadoopUtil

/**
* A general format for reading whole files in as streams, byte arrays,
Expand Down

0 comments on commit 39d9df2

Please sign in to comment.