Skip to content

Commit

Permalink
Guard against negative offsets and lengths in FileSegment
Browse files Browse the repository at this point in the history
  • Loading branch information
JoshRosen committed May 29, 2015
1 parent 03f35a4 commit 8b216c4
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,8 @@ private[spark] class DiskBlockObjectWriter(
finalPosition = file.length()
// In certain compression codecs, more bytes are written after close() is called
writeMetrics.incShuffleBytesWritten(finalPosition - reportedPosition)
} else {
finalPosition = file.length()
}
commitAndCloseHasBeenCalled = true
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ import java.io.File
* based off an offset and a length.
*/
private[spark] class FileSegment(val file: File, val offset: Long, val length: Long) {
require(offset >= 0, s"File segment offset cannot be negative (got $offset)")
require(length >= 0, s"File segment length cannot be negative (got $length)")
override def toString: String = {
"(name=%s, offset=%d, length=%d)".format(file.getName, offset, length)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,4 +160,13 @@ class BlockObjectWriterSuite extends FunSuite with BeforeAndAfterEach {
}
writer.close()
}

test("commitAndClose() without ever opening or writing") {
val file = new File(tempDir, "somefile")
val writeMetrics = new ShuffleWriteMetrics()
val writer = new DiskBlockObjectWriter(new TestBlockId("0"), file,
new JavaSerializer(new SparkConf()).newInstance(), 1024, os => os, true, writeMetrics)
writer.commitAndClose()
assert(writer.fileSegment().length === 0)
}
}

0 comments on commit 8b216c4

Please sign in to comment.