Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(server): Fix multi tx cleanup #723

Merged
merged 2 commits into from
Jan 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions src/server/dragonfly_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,11 @@ const char kKey2[] = "b";
const char kKey3[] = "c";
const char kKey4[] = "y";

const char kKeySid0[] = "x";
const char kKeySid1[] = "c";
const char kKeySid2[] = "b";
const char kKey2Sid0[] = "y";

} // namespace

// This test is responsible for server and main service
Expand Down Expand Up @@ -88,6 +93,15 @@ TEST_F(DflyEngineTest, Sds) {
}

TEST_F(DflyEngineTest, MultiAndEval) {
ShardId sid1 = Shard(kKey1, num_threads_ - 1);
ShardId sid2 = Shard(kKey2, num_threads_ - 1);
ShardId sid3 = Shard(kKey3, num_threads_ - 1);
ShardId sid4 = Shard(kKey4, num_threads_ - 1);
EXPECT_EQ(0, sid1);
EXPECT_EQ(2, sid2);
EXPECT_EQ(1, sid3);
EXPECT_EQ(0, sid4);

RespExpr resp = Run({"multi"});
ASSERT_EQ(resp, "OK");

Expand Down Expand Up @@ -412,6 +426,32 @@ return {offset, epoch}
EXPECT_THAT(resp.GetVec(), ElementsAre(IntArg(1), "6"));
}

// Scenario: 1. a lua call A schedules itself on shards 0, 1, 2.
// 2. another lua call B schedules itself on shards 1,2 but on shard 1 (or 2) it
// schedules itself before A.
// the order of scheduling: shard 0: A, shard 1: B, A. shard 2: B, A.
// 3. A is executes its first command first, which coincendently runs only on shard 0,
// hence A finishes before B and then it tries to cleanup.
// 4. There was an incorrect cleanup of multi-transactions that breaks for shard 1 (or 2)
// because it assume the A is at front of the queue.
TEST_F(DflyEngineTest, EvalBug713) {
const char* script = "return redis.call('get', KEYS[1])";

// A
auto fb0 = pp_->at(1)->LaunchFiber([&] {
fibers_ext::Yield();
for (unsigned i = 0; i < 50; ++i) {
Run({"eval", script, "3", kKeySid0, kKeySid1, kKeySid2});
}
});

// B
for (unsigned j = 0; j < 50; ++j) {
Run({"eval", script, "2", kKeySid1, kKeySid2});
}
fb0.Join();
}

TEST_F(DflyEngineTest, EvalSha) {
auto resp = Run({"script", "load", "return 5"});
EXPECT_THAT(resp, ArgType(RespExpr::STRING));
Expand Down
10 changes: 5 additions & 5 deletions src/server/transaction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1103,15 +1103,15 @@ void Transaction::UnlockMultiShardCb(const std::vector<KeyList>& sharded_keys, E

// It does not have to be that all shards in multi transaction execute this tx.
// Hence it could stay in the tx queue. We perform the necessary cleanup and remove it from
// there.
// there. The transaction is not guaranteed to be at front.
if (sd.pq_pos != TxQueue::kEnd) {
DVLOG(1) << "unlockmulti: TxPopFront " << DebugId();
DVLOG(1) << "unlockmulti: TxRemove " << DebugId();

TxQueue* txq = shard->txq();
DCHECK(!txq->Empty());
Transaction* trans = absl::get<Transaction*>(txq->Front());
DCHECK(trans == this);
txq->PopFront();
DCHECK_EQ(absl::get<Transaction*>(txq->At(sd.pq_pos)), this);

txq->Remove(sd.pq_pos);
sd.pq_pos = TxQueue::kEnd;
}

Expand Down