Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Default Row Commit Version to AddFile and RemoveFile #1781

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -622,7 +622,8 @@ object Checkpoints extends DeltaLogging {
col("add.dataChange"), // actually not really useful here
col("add.tags"),
col("add.deletionVector"),
col("add.baseRowId")) ++
col("add.baseRowId"),
col("add.defaultRowCommitVersion")) ++
additionalCols: _*
))
)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/*
* Copyright (2021) The Delta Lake Project Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.delta

import org.apache.spark.sql.delta.actions.{Action, AddFile, Protocol}

object DefaultRowCommitVersion {
def assignIfMissing(
protocol: Protocol,
actions: Iterator[Action],
version: Long): Iterator[Action] = {
if (!RowTracking.isSupported(protocol)) {
return actions
}
actions.map {
case a: AddFile if a.defaultRowCommitVersion.isEmpty =>
a.copy(defaultRowCommitVersion = Some(version))
case a =>
a
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1037,7 +1037,8 @@ trait OptimisticTransactionImpl extends TransactionalWrite
}

val (commitVersion, postCommitSnapshot, updatedCurrentTransactionInfo) =
doCommitRetryIteratively(snapshot.version + 1, currentTransactionInfo, isolationLevelToUse)
doCommitRetryIteratively(
getFirstAttemptVersion, currentTransactionInfo, isolationLevelToUse)
logInfo(s"Committed delta #$commitVersion to ${deltaLog.logPath}")
(commitVersion, postCommitSnapshot, updatedCurrentTransactionInfo.actions)
} catch {
Expand Down Expand Up @@ -1084,7 +1085,7 @@ trait OptimisticTransactionImpl extends TransactionalWrite
context: Map[String, String],
metrics: Map[String, String]): (Long, Snapshot) = {
commitStartNano = System.nanoTime()
val attemptVersion = readVersion + 1
val attemptVersion = getFirstAttemptVersion
try {
val commitInfo = CommitInfo(
time = clock.getTimeMillis(),
Expand Down Expand Up @@ -1134,6 +1135,8 @@ trait OptimisticTransactionImpl extends TransactionalWrite
}

allActions = RowId.assignFreshRowIds(spark, protocol, snapshot, allActions)
allActions = DefaultRowCommitVersion
.assignIfMissing(protocol, allActions, getFirstAttemptVersion)

if (readVersion < 0) {
deltaLog.createLogDirectory()
Expand Down Expand Up @@ -1354,6 +1357,8 @@ trait OptimisticTransactionImpl extends TransactionalWrite

finalActions =
RowId.assignFreshRowIds(spark, protocol, snapshot, finalActions.toIterator).toList
finalActions = DefaultRowCommitVersion
.assignIfMissing(protocol, finalActions.toIterator, getFirstAttemptVersion).toList

// We make sure that this isn't an appendOnly table as we check if we need to delete
// files.
Expand Down Expand Up @@ -1710,6 +1715,9 @@ trait OptimisticTransactionImpl extends TransactionalWrite
conflictChecker.checkConflicts()
}

/** Returns the version that the first attempt will try to commit at. */
protected def getFirstAttemptVersion: Long = readVersion + 1L
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the first attempted commit version?
(not sure if name change or just doc comment is better)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The name was chosen to match getNextAttemptVersion. I will add a doc comment to make it clearer what it is.


/** Returns the next attempt version given the last attempted version */
protected def getNextAttemptVersion(previousAttemptVersion: Long): Long = {
val latestSnapshot = deltaLog.update()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,8 @@ class Snapshot(
col(ADD_STATS_TO_USE_COL_NAME).as("stats"),
col("add.tags"),
col("add.deletionVector"),
col("add.baseRowId")
col("add.baseRowId"),
col("add.defaultRowCommitVersion")
)))
.withColumn("remove", when(
col("remove.path").isNotNull,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -562,7 +562,9 @@ case class AddFile(
override val tags: Map[String, String] = null,
deletionVector: DeletionVectorDescriptor = null,
@JsonDeserialize(contentAs = classOf[java.lang.Long])
baseRowId: Option[Long] = None
baseRowId: Option[Long] = None,
@JsonDeserialize(contentAs = classOf[java.lang.Long])
defaultRowCommitVersion: Option[Long] = None
) extends FileAction {
require(path.nonEmpty)

Expand All @@ -580,7 +582,8 @@ case class AddFile(
path, Some(timestamp), dataChange,
extendedFileMetadata = Some(true), partitionValues, Some(size), newTags,
deletionVector = deletionVector,
baseRowId = baseRowId
baseRowId = baseRowId,
defaultRowCommitVersion = defaultRowCommitVersion
)
removedFile.numLogicalRecords = numLogicalRecords
removedFile.estLogicalFileSize = estLogicalFileSize
Expand Down Expand Up @@ -802,7 +805,9 @@ case class RemoveFile(
override val tags: Map[String, String] = null,
deletionVector: DeletionVectorDescriptor = null,
@JsonDeserialize(contentAs = classOf[java.lang.Long])
baseRowId: Option[Long] = None
baseRowId: Option[Long] = None,
@JsonDeserialize(contentAs = classOf[java.lang.Long])
defaultRowCommitVersion: Option[Long] = None
) extends FileAction {
override def wrap: SingleAction = SingleAction(remove = this)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -278,8 +278,11 @@ abstract class CloneTableBase(
val copiedFile = fileToCopy.copy(dataChange = true)
opName match {
case CloneTableCommand.OP_NAME =>
// CLONE does not preserve Row IDs and Commit Versions
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is explained in the design document. The fundamental problem is that there is no way to remove a file and add it back with a different baseRowId in the same commit. This causes issues with incremental clones.

copiedFile.copy(baseRowId = None, defaultRowCommitVersion = None)
case RestoreTableCommand.OP_NAME =>
// RESTORE preserves Row IDs and Commit Versions
copiedFile
case RestoreTableCommand.OP_NAME => copiedFile
}
}
val sourceName = sourceTable.name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,8 @@ class CheckpointsSuite extends QueryTest
"partitionValues",
"size",
"deletionVector",
"baseRowId")
"baseRowId",
"defaultRowCommitVersion")

val tablePath = tempDir.getAbsolutePath
// Append rows [0, 9] to table and merge tablePath.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ class DeltaLogSuite extends QueryTest

assert(log.update().allFiles.collect().find(_.path == "foo")
// `dataChange` is set to `false` after replaying logs.
=== Some(add2.copy(dataChange = false)))
=== Some(add2.copy(dataChange = false, defaultRowCommitVersion = Some(2))))
}
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
/*
* Copyright (2021) The Delta Lake Project Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.spark.sql.delta.rowtracking

import scala.collection.mutable

import org.apache.spark.sql.delta.DeltaLog
import org.apache.spark.sql.delta.actions.{AddFile, RemoveFile}
import org.apache.spark.sql.delta.rowid.RowIdTestUtils

import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.test.SharedSparkSession

class DefaultRowCommitVersionSuite extends QueryTest with SharedSparkSession with RowIdTestUtils {

def expectedCommitVersionsForAllFiles(deltaLog: DeltaLog): Map[String, Long] = {
val commitVersionForFiles = mutable.Map.empty[String, Long]
deltaLog.getChanges(startVersion = 0).foreach { case (commitVersion, actions) =>
actions.foreach {
case a: AddFile if !commitVersionForFiles.contains(a.path) =>
commitVersionForFiles += a.path -> commitVersion
case r: RemoveFile if commitVersionForFiles.contains(r.path) =>
assert(r.defaultRowCommitVersion.contains(commitVersionForFiles(r.path)))
case _ =>
// Do nothing
}
}
commitVersionForFiles.toMap
}

test("defaultRowCommitVersion is not set when feature is disabled") {
withRowTrackingEnabled(enabled = false) {
withTempDir { tempDir =>
spark.range(start = 0, end = 100, step = 1, numPartitions = 1)
.write.format("delta").mode("overwrite").save(tempDir.getAbsolutePath)
spark.range(start = 100, end = 200, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(tempDir.getAbsolutePath)

val deltaLog = DeltaLog.forTable(spark, tempDir)
deltaLog.update().allFiles.collect().foreach { f =>
assert(f.defaultRowCommitVersion.isEmpty)
}
}
}
}

test("checkpoint preserves defaultRowCommitVersion") {
withRowTrackingEnabled(enabled = true) {
withTempDir { tempDir =>
spark.range(start = 0, end = 100, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(tempDir.getAbsolutePath)
spark.range(start = 100, end = 200, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(tempDir.getAbsolutePath)
spark.range(start = 200, end = 300, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(tempDir.getAbsolutePath)

val deltaLog = DeltaLog.forTable(spark, tempDir)
val commitVersionForFiles = expectedCommitVersionsForAllFiles(deltaLog)

deltaLog.update().allFiles.collect().foreach { f =>
assert(f.defaultRowCommitVersion.contains(commitVersionForFiles(f.path)))
}

deltaLog.checkpoint(deltaLog.update())

deltaLog.update().allFiles.collect().foreach { f =>
assert(f.defaultRowCommitVersion.contains(commitVersionForFiles(f.path)))
}
}
}
}

test("data skipping reads defaultRowCommitVersion") {
withRowTrackingEnabled(enabled = true) {
withTempDir { tempDir =>
spark.range(start = 0, end = 100, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(tempDir.getAbsolutePath)
spark.range(start = 100, end = 200, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(tempDir.getAbsolutePath)
spark.range(start = 200, end = 300, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(tempDir.getAbsolutePath)

val deltaLog = DeltaLog.forTable(spark, tempDir)
val commitVersionForFiles = expectedCommitVersionsForAllFiles(deltaLog)

val filters = Seq(col("id = 150").expr)
val scan = deltaLog.update().filesForScan(filters)

scan.files.foreach { f =>
assert(f.defaultRowCommitVersion.contains(commitVersionForFiles(f.path)))
}
}
}
}

test("clone does not preserve default row commit versions") {
withRowTrackingEnabled(enabled = true) {
withTempDir { sourceDir =>
spark.range(start = 0, end = 100, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(sourceDir.getAbsolutePath)
spark.range(start = 100, end = 200, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(sourceDir.getAbsolutePath)
spark.range(start = 200, end = 300, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(sourceDir.getAbsolutePath)

withTable("target") {
spark.sql(s"CREATE TABLE target SHALLOW CLONE delta.`${sourceDir.getAbsolutePath}`")

val targetLog = DeltaLog.forTable(spark, TableIdentifier("target"))
targetLog.update().allFiles.collect().foreach { f =>
assert(f.defaultRowCommitVersion.contains(0L))
}
}
}
}
}

test("restore does preserve default row commit versions") {
withRowTrackingEnabled(enabled = true) {
withTempDir { tempDir =>
spark.range(start = 0, end = 100, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(tempDir.getAbsolutePath)
spark.range(start = 100, end = 200, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(tempDir.getAbsolutePath)
spark.range(start = 200, end = 300, step = 1, numPartitions = 1)
.write.format("delta").mode("append").save(tempDir.getAbsolutePath)

val deltaLog = DeltaLog.forTable(spark, tempDir)
val commitVersionForFiles = expectedCommitVersionsForAllFiles(deltaLog)

spark.sql(s"RESTORE delta.`${tempDir.getAbsolutePath}` TO VERSION AS OF 1")

deltaLog.update().allFiles.collect().foreach { f =>
assert(f.defaultRowCommitVersion.contains(commitVersionForFiles(f.path)))
}
}
}
}
}