Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix incorrect CRC combine in Kafka produce client #1214

Merged
merged 6 commits into from
Sep 4, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1810,7 +1810,7 @@ private void doEncodeRecordFin(
encodeSlotBuffer.putBytes(encodeSlotLimit, encodeBuffer, 0, encodeProgress);
encodeSlotLimit += encodeProgress;

if (encodeableRecordBytesDeferred > 0)
if (encodeableRecordBytesDeferred > 0 && flushableRequestBytes > 0)
{
doNetworkData(traceId, budgetId, EMPTY_BUFFER, 0, 0);
}
Expand Down Expand Up @@ -1994,7 +1994,8 @@ private void encodeCrc()

final int crcOffset = recordBatch.offset() + RecordBatchFW.FIELD_OFFSET_CRC;
final int crcDataOffset = recordBatch.offset() + RecordBatchFW.FIELD_OFFSET_ATTRIBUTES;
final int crcDataLimit = recordBatchLimit <= encodeLimit ? recordBatchLimit : recordBatch.limit();
final int recordHeaderLimit = encodeLimit - (valueCompleteSize - encodeableRecordBytesDeferred);
final int crcDataLimit = recordBatchLimit <= encodeLimit ? recordBatchLimit : recordHeaderLimit;

final ByteBuffer encodeSlotByteBuffer = encodePool.byteBuffer(encodeSlot);
final int encodePosition = encodeSlotByteBuffer.position();
Expand All @@ -2006,8 +2007,7 @@ private void encodeCrc()
crc.update(encodeSlotByteBuffer);

long checksum = crc.getValue();

if (crcDataLimit == recordBatch.limit())
if (crcDataLimit == recordHeaderLimit && valueCompleteSize != 0)
{
checksum = combineCRC32C(checksum, valueChecksum, valueCompleteSize);
checksum = combineCRC32C(checksum, headersChecksum, headersSize);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,26 @@ public void shouldSendMessageValue() throws Exception
k3po.finish();
}

@Test
@Configuration("client.when.topic.yaml")
@Specification({
"${app}/message.empty.crc/client",
"${net}/message.empty.crc/server"})
public void shouldSendMessageEmptyWithCrc() throws Exception
{
k3po.finish();
}

@Test
@Configuration("client.when.topic.yaml")
@Specification({
"${app}/messages.fragmented.crc/client",
"${net}/messages.fragmented.crc/server"})
public void shouldSendFragmentedMessagesWithCrc() throws Exception
{
k3po.finish();
}

@Test
@Configuration("client.when.topic.yaml")
@Specification({
Expand Down Expand Up @@ -327,6 +347,17 @@ public void shouldSendMessageValueRepeated() throws Exception
k3po.finish();
}

@Test
@Configuration("client.when.topic.yaml")
@Specification({
"${app}/message.value.repeated.fragmented/client",
"${net}/message.value.repeated/server"})
@Configure(name = KafkaConfigurationTest.KAFKA_CLIENT_PRODUCE_MAX_REQUEST_MILLIS_NAME, value = "200")
public void shouldSendMessageValueRepeatedWhenFragmented() throws Exception
{
k3po.finish();
}

@Test
@Configuration("client.when.topic.yaml")
@Specification({
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2460,6 +2460,13 @@ public KafkaProduceDataExBuilder sequence(
return this;
}

public KafkaProduceDataExBuilder crc32c(
long crc32c)
{
produceDataExRW.crc32c(crc32c);
return this;
}

public KafkaProduceDataExBuilder ackMode(
String ackMode)
{
Expand Down Expand Up @@ -3586,6 +3593,7 @@ public final class KafkaProduceDataExMatcherBuilder
private Long timestamp;
private Long producerId;
private Short producerEpoch;
private Integer crc32c;
private Integer sequence;
private KafkaAckMode ackMode;
private KafkaKeyFW.Builder keyRW;
Expand All @@ -3609,6 +3617,13 @@ public KafkaProduceDataExMatcherBuilder timestamp(
return this;
}

public KafkaProduceDataExMatcherBuilder crc32c(
int crc32c)
{
this.crc32c = crc32c;
return this;
}

public KafkaProduceDataExMatcherBuilder producerId(
long producerId)
{
Expand Down Expand Up @@ -3702,6 +3717,7 @@ private boolean match(
return matchDeferred(produceDataEx) &&
matchTimestamp(produceDataEx) &&
matchSequence(produceDataEx) &&
matchCrc32c(produceDataEx) &&
matchAckMode(produceDataEx) &&
matchKey(produceDataEx) &&
matchHeaders(produceDataEx);
Expand Down Expand Up @@ -3731,6 +3747,12 @@ private boolean matchProducerEpoch(
return producerEpoch == null || producerEpoch == produceDataEx.producerEpoch();
}

private boolean matchCrc32c(
final KafkaProduceDataExFW produceDataEx)
{
return crc32c == null || crc32c == produceDataEx.crc32c();
}

private boolean matchSequence(
final KafkaProduceDataExFW produceDataEx)
{
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
#
# Copyright 2021-2023 Aklivity Inc.
#
# Aklivity licenses this file to you under the Apache License,
# version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#

property deltaMillis 0L

connect "zilla://streams/app0"
option zilla:window 8192
option zilla:transmission "half-duplex"

write zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.meta()
.topic("test")
.build()
.build()}

connected

read zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.meta()
.topic("test")
.build()
.build()}

read zilla:data.ext ${kafka:dataEx()
.typeId(zilla:id("kafka"))
.meta()
.partition(0, 177)
.build()
.build()}

read notify ROUTED_BROKER_CLIENT

connect await ROUTED_BROKER_CLIENT
"zilla://streams/app0"
option zilla:window 8192
option zilla:transmission "half-duplex"
option zilla:affinity 0xb1

write zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.produce()
.topic("test")
.partition(2)
.build()
.build()}

connected


read zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.produce()
.topic("test")
.partition(2)
.build()
.build()}


write zilla:data.ext ${kafka:dataEx()
.typeId(zilla:id("kafka"))
.produce()
.timestamp(1724846895864)
.ackMode("LEADER_ONLY")
.key("bench_pub_682636139_8#migrate")
.header("sender-id", "zilla-5e105508-c47e-4e41-add1-1b214c2883f6")
.build()
.build()}
write zilla:data.empty
write flush
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
#
# Copyright 2021-2023 Aklivity Inc.
#
# Aklivity licenses this file to you under the Apache License,
# version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#

property serverAddress "zilla://streams/app0"

accept ${serverAddress}
option zilla:window 8192
option zilla:transmission "half-duplex"

accepted

read zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.meta()
.topic("test")
.build()
.build()}

connected

write zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.meta()
.topic("test")
.build()
.build()}
write flush

write zilla:data.ext ${kafka:dataEx()
.typeId(zilla:id("kafka"))
.meta()
.partition(0, 177)
.build()
.build()}
write flush

accepted

read zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.produce()
.topic("test")
.partition(2)
.build()
.build()}

connected

write zilla:begin.ext ${kafka:beginEx()
.typeId(zilla:id("kafka"))
.produce()
.topic("test")
.partition(2)
.build()
.build()}

read zilla:data.ext ${kafka:matchDataEx()
.typeId(zilla:id("kafka"))
.produce()
.timestamp(1724846895864)
.ackMode("LEADER_ONLY")
.key("bench_pub_682636139_8#migrate")
.header("sender-id", "zilla-5e105508-c47e-4e41-add1-1b214c2883f6")
.build()
.build()}
read zilla:data.empty

Loading
Loading