Skip to content

Commit

Permalink
feat: make all durability policy constatns
Browse files Browse the repository at this point in the history
Signed-off-by: Manan Gupta <manan@planetscale.com>
  • Loading branch information
GuptaManan100 committed Dec 30, 2024
1 parent dce8d00 commit 76f7e08
Show file tree
Hide file tree
Showing 32 changed files with 244 additions and 209 deletions.
7 changes: 4 additions & 3 deletions go/cmd/vtctldclient/command/keyspaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (

"vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/protoutil"
"vitess.io/vitess/go/vt/vtctl/reparentutil"

"vitess.io/vitess/go/cmd/vtctldclient/cli"
"vitess.io/vitess/go/constants/sidecar"
Expand Down Expand Up @@ -153,7 +154,7 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error {

var snapshotTime *vttime.Time
if topodatapb.KeyspaceType(createKeyspaceOptions.KeyspaceType) == topodatapb.KeyspaceType_SNAPSHOT {
if createKeyspaceOptions.DurabilityPolicy != "none" {
if createKeyspaceOptions.DurabilityPolicy != reparentutil.DurabilityNone {
return errors.New("--durability-policy cannot be specified while creating a snapshot keyspace")
}

Expand Down Expand Up @@ -409,7 +410,7 @@ func init() {
CreateKeyspace.Flags().Var(&createKeyspaceOptions.KeyspaceType, "type", "The type of the keyspace.")
CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.BaseKeyspace, "base-keyspace", "", "The base keyspace for a snapshot keyspace.")
CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SnapshotTimestamp, "snapshot-timestamp", "", "The snapshot time for a snapshot keyspace, as a timestamp in RFC3339 format.")
CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.")
CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", reparentutil.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.")
CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SidecarDBName, "sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.")
Root.AddCommand(CreateKeyspace)

Expand All @@ -425,7 +426,7 @@ func init() {
RemoveKeyspaceCell.Flags().BoolVarP(&removeKeyspaceCellOptions.Recursive, "recursive", "r", false, "Also delete all tablets in that cell beloning to the specified keyspace.")
Root.AddCommand(RemoveKeyspaceCell)

SetKeyspaceDurabilityPolicy.Flags().StringVar(&setKeyspaceDurabilityPolicyOptions.DurabilityPolicy, "durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Other values include 'semi_sync' and others as dictated by registered plugins.")
SetKeyspaceDurabilityPolicy.Flags().StringVar(&setKeyspaceDurabilityPolicyOptions.DurabilityPolicy, "durability-policy", reparentutil.DurabilityNone, "Type of durability to enforce for this keyspace. Default is none. Other values include 'semi_sync' and others as dictated by registered plugins.")
Root.AddCommand(SetKeyspaceDurabilityPolicy)

ValidateSchemaKeyspace.Flags().BoolVar(&validateSchemaKeyspaceOptions.IncludeViews, "include-views", false, "Includes views in compared schemas.")
Expand Down
7 changes: 4 additions & 3 deletions go/test/endtoend/keyspace/keyspace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/vt/key"
"vitess.io/vitess/go/vt/vtctl/reparentutil"

topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
Expand Down Expand Up @@ -142,18 +143,18 @@ func TestDurabilityPolicyField(t *testing.T) {

out, err := vtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "ks_durability", "--durability-policy=semi_sync")
require.NoError(t, err, out)
checkDurabilityPolicy(t, "semi_sync")
checkDurabilityPolicy(t, reparentutil.DurabilitySemiSync)

out, err = vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", "ks_durability", "--durability-policy=none")
require.NoError(t, err, out)
checkDurabilityPolicy(t, "none")
checkDurabilityPolicy(t, reparentutil.DurabilityNone)

out, err = vtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability")
require.NoError(t, err, out)

out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--durability-policy=semi_sync", "ks_durability")
require.NoError(t, err, out)
checkDurabilityPolicy(t, "semi_sync")
checkDurabilityPolicy(t, reparentutil.DurabilitySemiSync)

out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability")
require.NoError(t, err, out)
Expand Down
27 changes: 14 additions & 13 deletions go/test/endtoend/reparent/emergencyreparent/ers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,11 @@ import (
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/reparent/utils"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtctl/reparentutil"
)

func TestTrivialERS(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand All @@ -55,7 +56,7 @@ func TestTrivialERS(t *testing.T) {
}

func TestReparentIgnoreReplicas(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
var err error
Expand Down Expand Up @@ -96,7 +97,7 @@ func TestReparentIgnoreReplicas(t *testing.T) {
}

func TestReparentDownPrimary(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down Expand Up @@ -131,7 +132,7 @@ func TestReparentDownPrimary(t *testing.T) {
}

func TestReparentNoChoiceDownPrimary(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
var err error
Expand Down Expand Up @@ -166,7 +167,7 @@ func TestReparentNoChoiceDownPrimary(t *testing.T) {

func TestSemiSyncSetupCorrectly(t *testing.T) {
t.Run("semi-sync enabled", func(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand All @@ -193,7 +194,7 @@ func TestSemiSyncSetupCorrectly(t *testing.T) {
})

t.Run("semi-sync disabled", func(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "none")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityNone)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down Expand Up @@ -222,7 +223,7 @@ func TestSemiSyncSetupCorrectly(t *testing.T) {

// TestERSPromoteRdonly tests that we never end up promoting a rdonly instance as the primary
func TestERSPromoteRdonly(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
var err error
Expand All @@ -249,7 +250,7 @@ func TestERSPromoteRdonly(t *testing.T) {

// TestERSPreventCrossCellPromotion tests that we promote a replica in the same cell as the previous primary if prevent cross cell promotion flag is set
func TestERSPreventCrossCellPromotion(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
var err error
Expand All @@ -271,7 +272,7 @@ func TestERSPreventCrossCellPromotion(t *testing.T) {
// TestPullFromRdonly tests that if a rdonly tablet is the most advanced, then our promoted primary should have
// caught up to it by pulling transactions from it
func TestPullFromRdonly(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
var err error
Expand Down Expand Up @@ -342,7 +343,7 @@ func TestPullFromRdonly(t *testing.T) {
// replicas which do not have any replication status and also succeeds if the io thread
// is stopped on the primary elect.
func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
Expand Down Expand Up @@ -441,7 +442,7 @@ func TestERSForInitialization(t *testing.T) {
}

func TestRecoverWithMultipleFailures(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
Expand All @@ -468,7 +469,7 @@ func TestRecoverWithMultipleFailures(t *testing.T) {
// TestERSFailFast tests that ERS will fail fast if it cannot find any tablet which can be safely promoted instead of promoting
// a tablet and hanging while inserting a row in the reparent journal on getting semi-sync ACKs
func TestERSFailFast(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
Expand Down Expand Up @@ -507,7 +508,7 @@ func TestERSFailFast(t *testing.T) {
// TestReplicationStopped checks that ERS ignores the tablets that have sql thread stopped.
// If there are more than 1, we also fail.
func TestReplicationStopped(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
Expand Down
13 changes: 7 additions & 6 deletions go/test/endtoend/reparent/newfeaturetest/reparent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/reparent/utils"
"vitess.io/vitess/go/vt/vtctl/reparentutil"
)

// TestRecoverWithMultipleVttabletFailures tests that ERS succeeds with the default values
Expand All @@ -36,7 +37,7 @@ import (
// The test takes down the vttablets of the primary and a rdonly tablet and runs ERS with the
// default values of remote_operation_timeout, lock-timeout flags and wait_replicas_timeout subflag.
func TestRecoverWithMultipleVttabletFailures(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
Expand Down Expand Up @@ -67,7 +68,7 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) {
// and ERS succeeds.
func TestSingleReplicaERS(t *testing.T) {
// Set up a cluster with none durability policy
clusterInstance := utils.SetupReparentCluster(t, "none")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityNone)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
// Confirm that the replication is setup correctly in the beginning.
Expand Down Expand Up @@ -102,7 +103,7 @@ func TestSingleReplicaERS(t *testing.T) {

// TestTabletRestart tests that a running tablet can be restarted and everything is still fine
func TestTabletRestart(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand All @@ -114,7 +115,7 @@ func TestTabletRestart(t *testing.T) {

// Tests ensures that ChangeTabletType works even when semi-sync plugins are not loaded.
func TestChangeTypeWithoutSemiSync(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "none")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityNone)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down Expand Up @@ -159,7 +160,7 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) {
// TestERSWithWriteInPromoteReplica tests that ERS doesn't fail even if there is a
// write that happens when PromoteReplica is called.
func TestERSWithWriteInPromoteReplica(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
Expand All @@ -176,7 +177,7 @@ func TestERSWithWriteInPromoteReplica(t *testing.T) {
}

func TestBufferingWithMultipleDisruptions(t *testing.T) {
clusterInstance := utils.SetupShardedReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupShardedReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)

// Stop all VTOrc instances, so that they don't interfere with the test.
Expand Down
25 changes: 13 additions & 12 deletions go/test/endtoend/reparent/plannedreparent/reparent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,11 @@ import (
"vitess.io/vitess/go/test/endtoend/reparent/utils"
"vitess.io/vitess/go/vt/log"
replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
"vitess.io/vitess/go/vt/vtctl/reparentutil"
)

func TestPrimaryToSpareStateChangeImpossible(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand All @@ -47,7 +48,7 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) {
}

func TestReparentCrossCell(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand All @@ -60,7 +61,7 @@ func TestReparentCrossCell(t *testing.T) {
}

func TestReparentGraceful(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand All @@ -82,7 +83,7 @@ func TestReparentGraceful(t *testing.T) {

// TestPRSWithDrainedLaggingTablet tests that PRS succeeds even if we have a lagging drained tablet
func TestPRSWithDrainedLaggingTablet(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand All @@ -108,7 +109,7 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) {
}

func TestReparentReplicaOffline(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand All @@ -125,7 +126,7 @@ func TestReparentReplicaOffline(t *testing.T) {
}

func TestReparentAvoid(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.DeleteTablet(t, clusterInstance, tablets[2])
Expand Down Expand Up @@ -172,13 +173,13 @@ func TestReparentAvoid(t *testing.T) {
}

func TestReparentFromOutside(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
reparentFromOutside(t, clusterInstance, false)
}

func TestReparentFromOutsideWithNoPrimary(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down Expand Up @@ -277,7 +278,7 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus
}

func TestReparentWithDownReplica(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down Expand Up @@ -323,7 +324,7 @@ func TestReparentWithDownReplica(t *testing.T) {
}

func TestChangeTypeSemiSync(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down Expand Up @@ -389,7 +390,7 @@ func TestChangeTypeSemiSync(t *testing.T) {
// 1. When PRS is run with the cross_cell durability policy setup, then the semi-sync settings on all the tablets are as expected
// 2. Bringing up a new vttablet should have its replication and semi-sync setup correctly without any manual intervention
func TestCrossCellDurability(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "cross_cell")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilityCrossCell)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down Expand Up @@ -428,7 +429,7 @@ func TestCrossCellDurability(t *testing.T) {

// TestFullStatus tests that the RPC FullStatus works as intended.
func TestFullStatus(t *testing.T) {
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]})
Expand Down
3 changes: 2 additions & 1 deletion go/test/endtoend/reparent/semisync/semi_sync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/reparent/utils"
"vitess.io/vitess/go/vt/vtctl/reparentutil"
)

func TestSemiSyncUpgradeDowngrade(t *testing.T) {
Expand All @@ -33,7 +34,7 @@ func TestSemiSyncUpgradeDowngrade(t *testing.T) {
if ver != 21 {
t.Skip("We only want to run this test for v21 release")
}
clusterInstance := utils.SetupReparentCluster(t, "semi_sync")
clusterInstance := utils.SetupReparentCluster(t, reparentutil.DurabilitySemiSync)
defer utils.TeardownCluster(clusterInstance)
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets

Expand Down
Loading

0 comments on commit 76f7e08

Please sign in to comment.