Skip to content

Commit

Permalink
MINOR: Call logSegments.toBuffer only when required (#9971)
Browse files Browse the repository at this point in the history
Reviewers: Ismael Juma <ismael@juma.me.uk>, Satish Duggana <satishd@apache.org>, Chia-Ping Tsai <chia7712@gmail.com>
  • Loading branch information
kowshik authored Jan 27, 2021
1 parent c830bce commit 4075a5c
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions core/src/main/scala/kafka/log/Log.scala
Original file line number Diff line number Diff line change
Expand Up @@ -1648,9 +1648,6 @@ class Log(@volatile private var _dir: File,
s"for partition $topicPartition is ${config.messageFormatVersion} which is earlier than the minimum " +
s"required version $KAFKA_0_10_0_IV0")

// Cache to avoid race conditions. `toBuffer` is faster than most alternatives and provides
// constant time access while being safe to use with concurrent collections unlike `toArray`.
val segmentsCopy = logSegments.toBuffer
// For the earliest and latest, we do not need to return the timestamp.
if (targetTimestamp == ListOffsetsRequest.EARLIEST_TIMESTAMP) {
// The first cached epoch usually corresponds to the log start offset, but we have to verify this since
Expand All @@ -1667,6 +1664,9 @@ class Log(@volatile private var _dir: File,
val epochOptional = Optional.ofNullable(latestEpochOpt.orNull)
Some(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, logEndOffset, epochOptional))
} else {
// Cache to avoid race conditions. `toBuffer` is faster than most alternatives and provides
// constant time access while being safe to use with concurrent collections unlike `toArray`.
val segmentsCopy = logSegments.toBuffer
// We need to search the first segment whose largest timestamp is >= the target timestamp if there is one.
val targetSeg = segmentsCopy.find(_.largestTimestamp >= targetTimestamp)
targetSeg.flatMap(_.findOffsetByTimestamp(targetTimestamp, logStartOffset))
Expand Down

0 comments on commit 4075a5c

Please sign in to comment.