From f0b39c7f1baa0d58be47c3d38ef764d5e21e1267 Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Thu, 10 Aug 2023 10:48:50 +0300 Subject: [PATCH 1/8] WIP: fixing backup_pitr flaky tests Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> --- .../endtoend/backup/pitr/backup_pitr_test.go | 4 +- .../backup/vtctlbackup/backup_utils.go | 12 ++- .../backup/vtctlbackup/pitr_test_framework.go | 86 ++++++++++++------- 3 files changed, 66 insertions(+), 36 deletions(-) diff --git a/go/test/endtoend/backup/pitr/backup_pitr_test.go b/go/test/endtoend/backup/pitr/backup_pitr_test.go index a1b29ef47dd..5cad0dc5ba7 100644 --- a/go/test/endtoend/backup/pitr/backup_pitr_test.go +++ b/go/test/endtoend/backup/pitr/backup_pitr_test.go @@ -47,7 +47,7 @@ func TestIncrementalBackupAndRestoreToPos(t *testing.T) { // (of course we only delete backups that still leave us with valid restore paths). // // All of the above is done for BuiltinBackup, XtraBackup, Mysqlctld (which is technically builtin) -func TestIncrementalBackupAndRestoreToTimestamp(t *testing.T) { +func _TestIncrementalBackupAndRestoreToTimestamp(t *testing.T) { tcase := &backup.PITRTestCase{ Name: "BuiltinBackup", SetupType: backup.BuiltinBackup, @@ -59,7 +59,7 @@ func TestIncrementalBackupAndRestoreToTimestamp(t *testing.T) { // TestIncrementalBackupOnTwoTablets runs a series of interleaved backups on two different replicas: full and incremental. // Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on // one another. -func TestIncrementalBackupOnTwoTablets(t *testing.T) { +func _TestIncrementalBackupOnTwoTablets(t *testing.T) { tcase := &backup.PITRTestCase{ Name: "BuiltinBackup", SetupType: backup.BuiltinBackup, diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index 7ff9d6b860f..a38e828df5e 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -1089,10 +1089,18 @@ func vtctlBackupReplicaNoDestroyNoWrites(t *testing.T, replicaIndex int) (backup return backups } +func GetTabletPosition(t *testing.T, tablet *cluster.Vttablet) string { + pos, _ := cluster.GetPrimaryPosition(t, *tablet, hostname) + return pos +} + func GetReplicaPosition(t *testing.T, replicaIndex int) string { replica := getReplica(t, replicaIndex) - pos, _ := cluster.GetPrimaryPosition(t, *replica, hostname) - return pos + return GetTabletPosition(t, replica) +} + +func GetPrimaryPosition(t *testing.T) string { + return GetTabletPosition(t, primary) } func GetReplicaGtidPurged(t *testing.T, replicaIndex int) string { diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index ddeb43c7dd7..1fc0acdfc01 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -61,10 +61,10 @@ type testedBackupTimestampInfo struct { func waitForReplica(t *testing.T, replicaIndex int) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - pMsgs := ReadRowsFromPrimary(t) + primaryPos := GetPrimaryPosition(t) for { - rMsgs := ReadRowsFromReplica(t, replicaIndex) - if len(pMsgs) == len(rMsgs) { + replicaPos := GetReplicaPosition(t, replicaIndex) + if replicaPos == primaryPos { // success return } @@ -78,6 +78,26 @@ func waitForReplica(t *testing.T, replicaIndex int) { } } +// func waitForReplica(t *testing.T, replicaIndex int) { +// ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) +// defer cancel() +// pMsgs := ReadRowsFromPrimary(t) +// for { +// rMsgs := ReadRowsFromReplica(t, replicaIndex) +// if len(pMsgs) == len(rMsgs) { +// // success +// return +// } +// select { +// case <-ctx.Done(): +// assert.FailNow(t, "timeout waiting for replica to catch up") +// return +// case <-time.After(time.Second): +// // +// } +// } +// } + // ExecTestIncrementalBackupAndRestoreToPos runs a series of backups: a full backup and multiple incremental backups. // in between, it makes writes to the database, and takes notes: what data was available in what backup. // It then restores each and every one of those backups, in random order, and expects to find the specific data associated with the backup. @@ -170,10 +190,10 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) if tc.writeBeforeBackup { InsertRowOnPrimary(t, "") } - // we wait for 1 second because backups are written to a directory named after the current timestamp, + // we wait for >1 second because backups are written to a directory named after the current timestamp, // in 1 second resolution. We want to avoid two backups that have the same pathname. Realistically this // is only ever a problem in this end-to-end test, not in production. - // Also, we gie the replica a chance to catch up. + // Also, we give the replica a chance to catch up. time.Sleep(postWriteSleepDuration) // randomly flush binary logs 0, 1 or 2 times FlushBinaryLogsOnReplica(t, 0, rand.Intn(3)) @@ -213,37 +233,39 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) if !incrementalFromPos.IsZero() { expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) } + t.Logf("======== manifest.Position: %v", manifest.Position.GTIDSet) + t.Logf("======== lastBackupPos.GTIDSet: %v", lastBackupPos.GTIDSet) require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v, gtid_purged: %v, manifest.Position: %v", expectFromPosition, fromPositionIncludingPurged, gtidPurgedPos, manifest.Position) }) } - testRestores := func(t *testing.T) { - for _, r := range rand.Perm(len(backupPositions)) { - pos := backupPositions[r] - testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos]) - t.Run(testName, func(t *testing.T) { - restoreToPos, err := replication.DecodePosition(pos) - require.NoError(t, err) - TestReplicaRestoreToPos(t, 0, restoreToPos, "") - msgs := ReadRowsFromReplica(t, 0) - count, ok := rowsPerPosition[pos] - require.True(t, ok) - assert.Equalf(t, count, len(msgs), "messages: %v", msgs) - }) - } - } - t.Run("PITR", func(t *testing.T) { - testRestores(t) - }) - t.Run("remove full position backups", func(t *testing.T) { - // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again. - for _, backupName := range fromFullPositionBackups { - RemoveBackup(t, backupName) - } - }) - t.Run("PITR-2", func(t *testing.T) { - testRestores(t) - }) + // testRestores := func(t *testing.T) { + // for _, r := range rand.Perm(len(backupPositions)) { + // pos := backupPositions[r] + // testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos]) + // t.Run(testName, func(t *testing.T) { + // restoreToPos, err := replication.DecodePosition(pos) + // require.NoError(t, err) + // TestReplicaRestoreToPos(t, 0, restoreToPos, "") + // msgs := ReadRowsFromReplica(t, 0) + // count, ok := rowsPerPosition[pos] + // require.True(t, ok) + // assert.Equalf(t, count, len(msgs), "messages: %v", msgs) + // }) + // } + // } + // t.Run("PITR", func(t *testing.T) { + // testRestores(t) + // }) + // t.Run("remove full position backups", func(t *testing.T) { + // // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again. + // for _, backupName := range fromFullPositionBackups { + // RemoveBackup(t, backupName) + // } + // }) + // t.Run("PITR-2", func(t *testing.T) { + // testRestores(t) + // }) }) } From 185df22322ad8127fed0ceaa45cb3f1c4e1a3712 Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Thu, 10 Aug 2023 11:38:15 +0300 Subject: [PATCH 2/8] re-evaluate primary pos Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> --- go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index 1fc0acdfc01..c41bcd62085 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -61,8 +61,8 @@ type testedBackupTimestampInfo struct { func waitForReplica(t *testing.T, replicaIndex int) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - primaryPos := GetPrimaryPosition(t) for { + primaryPos := GetPrimaryPosition(t) replicaPos := GetReplicaPosition(t, replicaIndex) if replicaPos == primaryPos { // success From e9abf2f1896abb6ecf160ea4932934b1cad12fca Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Thu, 10 Aug 2023 12:01:06 +0300 Subject: [PATCH 3/8] debug Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> --- go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index c41bcd62085..f50bd5734af 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -189,6 +189,7 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) t.Run(tc.name, func(t *testing.T) { if tc.writeBeforeBackup { InsertRowOnPrimary(t, "") + t.Logf("========= wrote on primary. Now <<<< %d >>>> rows", len(ReadRowsFromPrimary(t))) } // we wait for >1 second because backups are written to a directory named after the current timestamp, // in 1 second resolution. We want to avoid two backups that have the same pathname. Realistically this @@ -198,6 +199,8 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) // randomly flush binary logs 0, 1 or 2 times FlushBinaryLogsOnReplica(t, 0, rand.Intn(3)) waitForReplica(t, 0) + t.Logf("========= primary <<<< %d >>>> rows", len(ReadRowsFromPrimary(t))) + t.Logf("========= replica <<<< %d >>>> rows", len(ReadRowsFromReplica(t, 0))) recordRowsPerPosition(t) // configure --incremental-from-pos to either: // - auto From 417154969cf7cf9e9efb0dcd7ae41ad40094ead4 Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Thu, 10 Aug 2023 12:29:33 +0300 Subject: [PATCH 4/8] empty commit to kick CI Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> From 4af6fce0e520829be6196468802b8a46ffde5a87 Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Thu, 10 Aug 2023 12:43:12 +0300 Subject: [PATCH 5/8] more debug Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> --- go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index f50bd5734af..8770e2ca0ec 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -187,6 +187,7 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) var fromFullPositionBackups []string for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { + t.Logf("========== primary position at start of test: %v", GetPrimaryPosition(t)) if tc.writeBeforeBackup { InsertRowOnPrimary(t, "") t.Logf("========= wrote on primary. Now <<<< %d >>>> rows", len(ReadRowsFromPrimary(t))) @@ -201,6 +202,8 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) waitForReplica(t, 0) t.Logf("========= primary <<<< %d >>>> rows", len(ReadRowsFromPrimary(t))) t.Logf("========= replica <<<< %d >>>> rows", len(ReadRowsFromReplica(t, 0))) + t.Logf("========== primary position at recording: %v", GetPrimaryPosition(t)) + t.Logf("========== replica position at recording: %v", GetReplicaPosition(t, 0)) recordRowsPerPosition(t) // configure --incremental-from-pos to either: // - auto From 255136733485a6a723647c2d84c670046bad95de Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Thu, 10 Aug 2023 12:56:16 +0300 Subject: [PATCH 6/8] excessive flushing Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> --- .../backup/vtctlbackup/backup_utils.go | 18 ++++++++++++++---- .../backup/vtctlbackup/pitr_test_framework.go | 3 ++- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index a38e828df5e..1a333af253b 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -1155,16 +1155,26 @@ func ReadRowsFromReplica(t *testing.T, replicaIndex int) (msgs []string) { return ReadRowsFromTablet(t, getReplica(t, replicaIndex)) } -// FlushBinaryLogsOnReplica issues `FLUSH BINARY LOGS` times -func FlushBinaryLogsOnReplica(t *testing.T, replicaIndex int, count int) { - replica := getReplica(t, replicaIndex) +// FlushBinaryLogsOnTablet issues `FLUSH BINARY LOGS` times +func FlushBinaryLogsOnTablet(t *testing.T, tablet *cluster.Vttablet, count int) { query := "flush binary logs" for i := 0; i < count; i++ { - _, err := replica.VttabletProcess.QueryTablet(query, keyspaceName, true) + _, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) require.NoError(t, err) } } +// FlushBinaryLogsOnReplica issues `FLUSH BINARY LOGS` times +func FlushBinaryLogsOnReplica(t *testing.T, replicaIndex int, count int) { + replica := getReplica(t, replicaIndex) + FlushBinaryLogsOnTablet(t, replica, count) +} + +// FlushBinaryLogsOnPrimary issues `FLUSH BINARY LOGS` times +func FlushBinaryLogsOnPrimary(t *testing.T, count int) { + FlushBinaryLogsOnTablet(t, primary, count) +} + // FlushAndPurgeBinaryLogsOnReplica intentionally loses all existing binary logs. It flushes into a new binary log // and immediately purges all previous logs. // This is used to lose information. diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index 8770e2ca0ec..770650a2f2d 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -198,7 +198,8 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) // Also, we give the replica a chance to catch up. time.Sleep(postWriteSleepDuration) // randomly flush binary logs 0, 1 or 2 times - FlushBinaryLogsOnReplica(t, 0, rand.Intn(3)) + FlushBinaryLogsOnPrimary(t, 3) + FlushBinaryLogsOnReplica(t, 0, 4) waitForReplica(t, 0) t.Logf("========= primary <<<< %d >>>> rows", len(ReadRowsFromPrimary(t))) t.Logf("========= replica <<<< %d >>>> rows", len(ReadRowsFromReplica(t, 0))) From 76de04b90273189c60e5aeb86c84e7ae4383cf52 Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Thu, 10 Aug 2023 14:01:31 +0300 Subject: [PATCH 7/8] restore random flushes Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> --- go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index 770650a2f2d..8770e2ca0ec 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -198,8 +198,7 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) // Also, we give the replica a chance to catch up. time.Sleep(postWriteSleepDuration) // randomly flush binary logs 0, 1 or 2 times - FlushBinaryLogsOnPrimary(t, 3) - FlushBinaryLogsOnReplica(t, 0, 4) + FlushBinaryLogsOnReplica(t, 0, rand.Intn(3)) waitForReplica(t, 0) t.Logf("========= primary <<<< %d >>>> rows", len(ReadRowsFromPrimary(t))) t.Logf("========= replica <<<< %d >>>> rows", len(ReadRowsFromReplica(t, 0))) From a175ac25400eaaf1526859f82d23c3f144b8bcbe Mon Sep 17 00:00:00 2001 From: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> Date: Thu, 10 Aug 2023 15:06:42 +0300 Subject: [PATCH 8/8] cleanup Signed-off-by: Shlomi Noach <2607934+shlomi-noach@users.noreply.github.com> --- .../endtoend/backup/pitr/backup_pitr_test.go | 4 +- .../backup/vtctlbackup/pitr_test_framework.go | 83 ++++++------------- 2 files changed, 29 insertions(+), 58 deletions(-) diff --git a/go/test/endtoend/backup/pitr/backup_pitr_test.go b/go/test/endtoend/backup/pitr/backup_pitr_test.go index 5cad0dc5ba7..a1b29ef47dd 100644 --- a/go/test/endtoend/backup/pitr/backup_pitr_test.go +++ b/go/test/endtoend/backup/pitr/backup_pitr_test.go @@ -47,7 +47,7 @@ func TestIncrementalBackupAndRestoreToPos(t *testing.T) { // (of course we only delete backups that still leave us with valid restore paths). // // All of the above is done for BuiltinBackup, XtraBackup, Mysqlctld (which is technically builtin) -func _TestIncrementalBackupAndRestoreToTimestamp(t *testing.T) { +func TestIncrementalBackupAndRestoreToTimestamp(t *testing.T) { tcase := &backup.PITRTestCase{ Name: "BuiltinBackup", SetupType: backup.BuiltinBackup, @@ -59,7 +59,7 @@ func _TestIncrementalBackupAndRestoreToTimestamp(t *testing.T) { // TestIncrementalBackupOnTwoTablets runs a series of interleaved backups on two different replicas: full and incremental. // Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on // one another. -func _TestIncrementalBackupOnTwoTablets(t *testing.T) { +func TestIncrementalBackupOnTwoTablets(t *testing.T) { tcase := &backup.PITRTestCase{ Name: "BuiltinBackup", SetupType: backup.BuiltinBackup, diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index 8770e2ca0ec..860d0b54c7a 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/mysqlctl" ) @@ -78,26 +77,6 @@ func waitForReplica(t *testing.T, replicaIndex int) { } } -// func waitForReplica(t *testing.T, replicaIndex int) { -// ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) -// defer cancel() -// pMsgs := ReadRowsFromPrimary(t) -// for { -// rMsgs := ReadRowsFromReplica(t, replicaIndex) -// if len(pMsgs) == len(rMsgs) { -// // success -// return -// } -// select { -// case <-ctx.Done(): -// assert.FailNow(t, "timeout waiting for replica to catch up") -// return -// case <-time.After(time.Second): -// // -// } -// } -// } - // ExecTestIncrementalBackupAndRestoreToPos runs a series of backups: a full backup and multiple incremental backups. // in between, it makes writes to the database, and takes notes: what data was available in what backup. // It then restores each and every one of those backups, in random order, and expects to find the specific data associated with the backup. @@ -187,10 +166,8 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) var fromFullPositionBackups []string for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - t.Logf("========== primary position at start of test: %v", GetPrimaryPosition(t)) if tc.writeBeforeBackup { InsertRowOnPrimary(t, "") - t.Logf("========= wrote on primary. Now <<<< %d >>>> rows", len(ReadRowsFromPrimary(t))) } // we wait for >1 second because backups are written to a directory named after the current timestamp, // in 1 second resolution. We want to avoid two backups that have the same pathname. Realistically this @@ -200,10 +177,6 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) // randomly flush binary logs 0, 1 or 2 times FlushBinaryLogsOnReplica(t, 0, rand.Intn(3)) waitForReplica(t, 0) - t.Logf("========= primary <<<< %d >>>> rows", len(ReadRowsFromPrimary(t))) - t.Logf("========= replica <<<< %d >>>> rows", len(ReadRowsFromReplica(t, 0))) - t.Logf("========== primary position at recording: %v", GetPrimaryPosition(t)) - t.Logf("========== replica position at recording: %v", GetReplicaPosition(t, 0)) recordRowsPerPosition(t) // configure --incremental-from-pos to either: // - auto @@ -239,39 +212,37 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) if !incrementalFromPos.IsZero() { expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) } - t.Logf("======== manifest.Position: %v", manifest.Position.GTIDSet) - t.Logf("======== lastBackupPos.GTIDSet: %v", lastBackupPos.GTIDSet) require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v, gtid_purged: %v, manifest.Position: %v", expectFromPosition, fromPositionIncludingPurged, gtidPurgedPos, manifest.Position) }) } - // testRestores := func(t *testing.T) { - // for _, r := range rand.Perm(len(backupPositions)) { - // pos := backupPositions[r] - // testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos]) - // t.Run(testName, func(t *testing.T) { - // restoreToPos, err := replication.DecodePosition(pos) - // require.NoError(t, err) - // TestReplicaRestoreToPos(t, 0, restoreToPos, "") - // msgs := ReadRowsFromReplica(t, 0) - // count, ok := rowsPerPosition[pos] - // require.True(t, ok) - // assert.Equalf(t, count, len(msgs), "messages: %v", msgs) - // }) - // } - // } - // t.Run("PITR", func(t *testing.T) { - // testRestores(t) - // }) - // t.Run("remove full position backups", func(t *testing.T) { - // // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again. - // for _, backupName := range fromFullPositionBackups { - // RemoveBackup(t, backupName) - // } - // }) - // t.Run("PITR-2", func(t *testing.T) { - // testRestores(t) - // }) + testRestores := func(t *testing.T) { + for _, r := range rand.Perm(len(backupPositions)) { + pos := backupPositions[r] + testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos]) + t.Run(testName, func(t *testing.T) { + restoreToPos, err := replication.DecodePosition(pos) + require.NoError(t, err) + TestReplicaRestoreToPos(t, 0, restoreToPos, "") + msgs := ReadRowsFromReplica(t, 0) + count, ok := rowsPerPosition[pos] + require.True(t, ok) + assert.Equalf(t, count, len(msgs), "messages: %v", msgs) + }) + } + } + t.Run("PITR", func(t *testing.T) { + testRestores(t) + }) + t.Run("remove full position backups", func(t *testing.T) { + // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again. + for _, backupName := range fromFullPositionBackups { + RemoveBackup(t, backupName) + } + }) + t.Run("PITR-2", func(t *testing.T) { + testRestores(t) + }) }) }