diff --git a/ddl/cancel_ddl_test.go b/ddl/cancel_ddl_test.go new file mode 100644 index 0000000000000..695d1dea30b34 --- /dev/null +++ b/ddl/cancel_ddl_test.go @@ -0,0 +1,911 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ddl + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type testDDLSerialSuiteToVerify struct { + suite.Suite +} + +func TestDDLSerialSuite(t *testing.T) { + suite.Run(t, new(testDDLSerialSuiteToVerify)) +} + +func (s *testDDLSerialSuiteToVerify) SetupSuite() { + SetWaitTimeWhenErrorOccurred(time.Microsecond) +} + +func checkCancelState(txn kv.Transaction, job *model.Job, test *testCancelJob) error { + var checkErr error + addIndexFirstReorg := (test.act == model.ActionAddIndex || test.act == model.ActionAddPrimaryKey) && + job.SchemaState == model.StateWriteReorganization && job.SnapshotVer == 0 + // If the action is adding index and the state is writing reorganization, it wants to test the case of cancelling the job when backfilling indexes. + // When the job satisfies this case of addIndexFirstReorg, the worker hasn't started to backfill indexes. + if test.cancelState == job.SchemaState && !addIndexFirstReorg && !job.IsRollingback() { + errs, err := admin.CancelJobs(txn, test.jobIDs) + if err != nil { + checkErr = errors.Trace(err) + return checkErr + } + // It only tests cancel one DDL job. + if !terror.ErrorEqual(errs[0], test.cancelRetErrs[0]) { + checkErr = errors.Trace(errs[0]) + return checkErr + } + } + return checkErr +} + +type testCancelJob struct { + jobIDs []int64 + cancelRetErrs []error // cancelRetErrs is the first return value of CancelJobs. + act model.ActionType // act is the job action. + cancelState model.SchemaState +} + +func buildCancelJobTests(firstID int64) []testCancelJob { + noErrs := []error{nil} + tests := []testCancelJob{ + {act: model.ActionAddIndex, jobIDs: []int64{firstID + 1}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, + {act: model.ActionAddIndex, jobIDs: []int64{firstID + 2}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionAddIndex, jobIDs: []int64{firstID + 3}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, + {act: model.ActionAddIndex, jobIDs: []int64{firstID + 4}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 4)}, cancelState: model.StatePublic}, + + // Test cancel drop index job , see TestCancelDropIndex. + {act: model.ActionAddColumn, jobIDs: []int64{firstID + 5}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, + {act: model.ActionAddColumn, jobIDs: []int64{firstID + 6}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionAddColumn, jobIDs: []int64{firstID + 7}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, + {act: model.ActionAddColumn, jobIDs: []int64{firstID + 8}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 8)}, cancelState: model.StatePublic}, + + // Test create table, watch out, table id will alloc a globalID. + {act: model.ActionCreateTable, jobIDs: []int64{firstID + 10}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + // Test create database, watch out, database id will alloc a globalID. + {act: model.ActionCreateSchema, jobIDs: []int64{firstID + 12}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + + {act: model.ActionDropColumn, jobIDs: []int64{firstID + 13}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 13)}, cancelState: model.StateDeleteOnly}, + {act: model.ActionDropColumn, jobIDs: []int64{firstID + 14}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 14)}, cancelState: model.StateWriteOnly}, + {act: model.ActionDropColumn, jobIDs: []int64{firstID + 15}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 15)}, cancelState: model.StateWriteReorganization}, + {act: model.ActionRebaseAutoID, jobIDs: []int64{firstID + 16}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionShardRowID, jobIDs: []int64{firstID + 17}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + + {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 18}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 19}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, + + {act: model.ActionAddForeignKey, jobIDs: []int64{firstID + 20}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionAddForeignKey, jobIDs: []int64{firstID + 21}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 21)}, cancelState: model.StatePublic}, + {act: model.ActionDropForeignKey, jobIDs: []int64{firstID + 22}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionDropForeignKey, jobIDs: []int64{firstID + 23}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 23)}, cancelState: model.StatePublic}, + + {act: model.ActionRenameTable, jobIDs: []int64{firstID + 24}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionRenameTable, jobIDs: []int64{firstID + 25}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 25)}, cancelState: model.StatePublic}, + + {act: model.ActionModifyTableCharsetAndCollate, jobIDs: []int64{firstID + 26}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionModifyTableCharsetAndCollate, jobIDs: []int64{firstID + 27}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 27)}, cancelState: model.StatePublic}, + {act: model.ActionTruncateTablePartition, jobIDs: []int64{firstID + 28}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionTruncateTablePartition, jobIDs: []int64{firstID + 29}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 29)}, cancelState: model.StatePublic}, + {act: model.ActionModifySchemaCharsetAndCollate, jobIDs: []int64{firstID + 31}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionModifySchemaCharsetAndCollate, jobIDs: []int64{firstID + 32}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 32)}, cancelState: model.StatePublic}, + + {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 33}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, + {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 34}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 35}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, + {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 36}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 36)}, cancelState: model.StatePublic}, + {act: model.ActionDropPrimaryKey, jobIDs: []int64{firstID + 37}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionDropPrimaryKey, jobIDs: []int64{firstID + 38}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 38)}, cancelState: model.StateDeleteOnly}, + + {act: model.ActionAddColumns, jobIDs: []int64{firstID + 39}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, + {act: model.ActionAddColumns, jobIDs: []int64{firstID + 40}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionAddColumns, jobIDs: []int64{firstID + 41}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, + {act: model.ActionAddColumns, jobIDs: []int64{firstID + 42}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 42)}, cancelState: model.StatePublic}, + + {act: model.ActionDropColumns, jobIDs: []int64{firstID + 43}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 43)}, cancelState: model.StateDeleteOnly}, + {act: model.ActionDropColumns, jobIDs: []int64{firstID + 44}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 44)}, cancelState: model.StateWriteOnly}, + {act: model.ActionDropColumns, jobIDs: []int64{firstID + 45}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 45)}, cancelState: model.StateWriteReorganization}, + + {act: model.ActionAlterIndexVisibility, jobIDs: []int64{firstID + 47}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionAlterIndexVisibility, jobIDs: []int64{firstID + 48}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 48)}, cancelState: model.StatePublic}, + + {act: model.ActionExchangeTablePartition, jobIDs: []int64{firstID + 54}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionExchangeTablePartition, jobIDs: []int64{firstID + 55}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 55)}, cancelState: model.StatePublic}, + + {act: model.ActionAddTablePartition, jobIDs: []int64{firstID + 60}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionAddTablePartition, jobIDs: []int64{firstID + 61}, cancelRetErrs: noErrs, cancelState: model.StateReplicaOnly}, + {act: model.ActionAddTablePartition, jobIDs: []int64{firstID + 62}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob}, cancelState: model.StatePublic}, + + // modify column has two different types, normal-type and reorg-type. The latter has 5 states and it can be cancelled except the public state. + {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 65}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 66}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, + {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 67}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, + {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 68}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, + {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 69}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob}, cancelState: model.StatePublic}, + + // for drop indexes + {act: model.ActionDropIndexes, jobIDs: []int64{firstID + 72}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 72)}, cancelState: model.StateWriteOnly}, + {act: model.ActionDropIndexes, jobIDs: []int64{firstID + 73}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 73)}, cancelState: model.StateDeleteOnly}, + {act: model.ActionDropIndexes, jobIDs: []int64{firstID + 74}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 74)}, cancelState: model.StateWriteReorganization}, + + // for alter db placement + {act: model.ActionModifySchemaDefaultPlacement, jobIDs: []int64{firstID + 75}, cancelRetErrs: noErrs, cancelState: model.StateNone}, + {act: model.ActionModifySchemaDefaultPlacement, jobIDs: []int64{firstID + 76}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 76)}, cancelState: model.StatePublic}, + } + + return tests +} + +func (s *testDDLSerialSuiteToVerify) checkDropIdx(t *testing.T, d *ddl, schemaID int64, tableID int64, idxName string, success bool) { + checkIdxExist(t, d, schemaID, tableID, idxName, !success) +} + +func (s *testDDLSerialSuiteToVerify) checkAddIdx(t *testing.T, d *ddl, schemaID int64, tableID int64, idxName string, success bool) { + checkIdxExist(t, d, schemaID, tableID, idxName, success) +} + +func checkIdxExist(t *testing.T, d *ddl, schemaID int64, tableID int64, idxName string, expectedExist bool) { + changedTable := testGetTable(t, d, schemaID, tableID) + var found bool + for _, idxInfo := range changedTable.Meta().Indices { + if idxInfo.Name.O == idxName { + found = true + break + } + } + require.Equal(t, found, expectedExist) +} + +func (s *testDDLSerialSuiteToVerify) checkAddColumns(d *ddl, schemaID int64, tableID int64, colNames []string, success bool) { + changedTable := testGetTable(s.T(), d, schemaID, tableID) + found := !checkColumnsNotFound(changedTable, colNames) + require.Equal(s.T(), found, success) +} + +func (s *testDDLSerialSuiteToVerify) checkCancelDropColumns(d *ddl, schemaID int64, tableID int64, colNames []string, success bool) { + changedTable := testGetTable(s.T(), d, schemaID, tableID) + notFound := checkColumnsNotFound(changedTable, colNames) + require.Equal(s.T(), notFound, success) +} + +func checkColumnsNotFound(t table.Table, colNames []string) bool { + notFound := true + for _, colName := range colNames { + for _, colInfo := range t.Meta().Columns { + if colInfo.Name.O == colName { + notFound = false + } + } + } + return notFound +} + +func checkIdxVisibility(changedTable table.Table, idxName string, expected bool) bool { + for _, idxInfo := range changedTable.Meta().Indices { + if idxInfo.Name.O == idxName && idxInfo.Invisible == expected { + return true + } + } + return false +} + +func (s *testDDLSerialSuiteToVerify) TestCancelJob() { + store := createMockStore(s.T()) + defer func() { + require.NoError(s.T(), store.Close()) + }() + d, err := testNewDDLAndStart( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + require.NoError(s.T(), err) + defer func() { + require.NoError(s.T(), d.Stop()) + }() + dbInfo, err := testSchemaInfo(d, "test_cancel_job") + require.NoError(s.T(), err) + testCreateSchema(s.T(), testNewContext(d), d, dbInfo) + // create a partition table. + partitionTblInfo := testTableInfoWithPartition(s.T(), d, "t_partition", 5) + // Skip using sessPool. Make sure adding primary key can be successful. + partitionTblInfo.Columns[0].Flag |= mysql.NotNullFlag + // create table t (c1 int, c2 int, c3 int, c4 int, c5 int); + tblInfo, err := testTableInfo(d, "t", 5) + require.NoError(s.T(), err) + ctx := testNewContext(d) + err = ctx.NewTxn(context.Background()) + require.NoError(s.T(), err) + err = ctx.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "1") + require.NoError(s.T(), err) + defer func() { + err := ctx.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "0") + require.NoError(s.T(), err) + }() + testCreateTable(s.T(), ctx, d, dbInfo, partitionTblInfo) + tableAutoID := int64(100) + shardRowIDBits := uint64(5) + tblInfo.AutoIncID = tableAutoID + tblInfo.ShardRowIDBits = shardRowIDBits + job := testCreateTable(s.T(), ctx, d, dbInfo, tblInfo) + // insert t values (1, 2, 3, 4, 5); + originTable := testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + row := types.MakeDatums(1, 2, 3, 4, 5) + _, err = originTable.AddRecord(ctx, row) + require.NoError(s.T(), err) + txn, err := ctx.Txn(true) + require.NoError(s.T(), err) + err = txn.Commit(context.Background()) + require.NoError(s.T(), err) + + tc := &TestDDLCallback{} + // set up hook + firstJobID := job.ID + tests := buildCancelJobTests(firstJobID) + var checkErr error + var mu sync.Mutex + var test *testCancelJob + updateTest := func(t *testCancelJob) { + mu.Lock() + test = t + mu.Unlock() + } + hookCancelFunc := func(job *model.Job) { + if job.State == model.JobStateSynced || job.State == model.JobStateCancelled || job.State == model.JobStateCancelling { + return + } + // This hook only valid for the related test job. + // This is use to avoid parallel test fail. + mu.Lock() + if len(test.jobIDs) > 0 && test.jobIDs[0] != job.ID { + mu.Unlock() + return + } + mu.Unlock() + if checkErr != nil { + return + } + + hookCtx := mock.NewContext() + hookCtx.Store = store + err1 := hookCtx.NewTxn(context.Background()) + if err1 != nil { + checkErr = errors.Trace(err1) + return + } + txn, err1 = hookCtx.Txn(true) + if err1 != nil { + checkErr = errors.Trace(err1) + return + } + mu.Lock() + checkErr = checkCancelState(txn, job, test) + mu.Unlock() + if checkErr != nil { + return + } + err1 = txn.Commit(context.Background()) + if err1 != nil { + checkErr = errors.Trace(err1) + return + } + } + tc.onJobUpdated = hookCancelFunc + tc.onJobRunBefore = hookCancelFunc + d.SetHook(tc) + + // for adding index + updateTest(&tests[0]) + idxOrigName := "idx" + validArgs := []interface{}{false, model.NewCIStr(idxOrigName), + []*ast.IndexPartSpecification{{ + Column: &ast.ColumnName{Name: model.NewCIStr("c1")}, + Length: -1, + }}, nil} + + // When the job satisfies this test case, the option will be rollback, so the job's schema state is none. + cancelState := model.StateNone + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddIndex, validArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) + updateTest(&tests[1]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddIndex, validArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) + updateTest(&tests[2]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddIndex, validArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) + updateTest(&tests[3]) + testCreateIndex(s.T(), ctx, d, dbInfo, tblInfo, false, "idx", "c2") + require.NoError(s.T(), checkErr) + txn, err = ctx.Txn(true) + require.NoError(s.T(), err) + require.Nil(s.T(), txn.Commit(context.Background())) + s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, true) + + // for add column + updateTest(&tests[4]) + addingColName := "colA" + newColumnDef := &ast.ColumnDef{ + Name: &ast.ColumnName{Name: model.NewCIStr(addingColName)}, + Tp: &types.FieldType{Tp: mysql.TypeLonglong}, + Options: []*ast.ColumnOption{}, + } + chs, coll := charset.GetDefaultCharsetAndCollate() + col, _, err := buildColumnAndConstraint(ctx, 2, newColumnDef, nil, chs, coll) + require.NoError(s.T(), err) + + addColumnArgs := []interface{}{col, &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, 0} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, []string{addingColName}, false) + + updateTest(&tests[5]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, []string{addingColName}, false) + + updateTest(&tests[6]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, []string{addingColName}, false) + + updateTest(&tests[7]) + testAddColumn(s.T(), ctx, d, dbInfo, tblInfo, addColumnArgs) + require.NoError(s.T(), checkErr) + s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, []string{addingColName}, true) + + // for create table + tblInfo1, err := testTableInfo(d, "t1", 2) + require.NoError(s.T(), err) + updateTest(&tests[8]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo1.ID, model.ActionCreateTable, []interface{}{tblInfo1}, &cancelState) + require.NoError(s.T(), checkErr) + testCheckTableState(s.T(), d, dbInfo, tblInfo1, model.StateNone) + + // for create database + dbInfo1, err := testSchemaInfo(d, "test_cancel_job1") + require.NoError(s.T(), err) + updateTest(&tests[9]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo1.ID, 0, model.ActionCreateSchema, []interface{}{dbInfo1}, &cancelState) + require.NoError(s.T(), checkErr) + testCheckSchemaState(s.T(), d, dbInfo1, model.StateNone) + + // for drop column. + updateTest(&tests[10]) + dropColName := "c3" + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, false) + testDropColumn(s.T(), ctx, d, dbInfo, tblInfo, dropColName, false) + require.NoError(s.T(), checkErr) + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, true) + + updateTest(&tests[11]) + dropColName = "c4" + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, false) + testDropColumn(s.T(), ctx, d, dbInfo, tblInfo, dropColName, false) + require.NoError(s.T(), checkErr) + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, true) + + updateTest(&tests[12]) + dropColName = "c5" + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, false) + testDropColumn(s.T(), ctx, d, dbInfo, tblInfo, dropColName, false) + require.NoError(s.T(), checkErr) + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, true) + + // cancel rebase auto id + updateTest(&tests[13]) + rebaseIDArgs := []interface{}{int64(200)} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionRebaseAutoID, rebaseIDArgs, &cancelState) + require.NoError(s.T(), checkErr) + changedTable := testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.Equal(s.T(), changedTable.Meta().AutoIncID, tableAutoID) + + // cancel shard bits + updateTest(&tests[14]) + shardRowIDArgs := []interface{}{uint64(7)} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionShardRowID, shardRowIDArgs, &cancelState) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.Equal(s.T(), changedTable.Meta().ShardRowIDBits, shardRowIDBits) + + // modify none-state column + col.DefaultValue = "1" + updateTest(&tests[15]) + modifyColumnArgs := []interface{}{col, col.Name, &ast.ColumnPosition{}, byte(0), uint64(0)} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, modifyColumnArgs, &test.cancelState) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + changedCol := model.FindColumnInfo(changedTable.Meta().Columns, col.Name.L) + require.Nil(s.T(), changedCol.DefaultValue) + + // modify delete-only-state column, + col.FieldType.Tp = mysql.TypeTiny + col.FieldType.Flen-- + updateTest(&tests[16]) + modifyColumnArgs = []interface{}{col, col.Name, &ast.ColumnPosition{}, byte(0), uint64(0)} + cancelState = model.StateNone + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, modifyColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + changedCol = model.FindColumnInfo(changedTable.Meta().Columns, col.Name.L) + require.Equal(s.T(), changedCol.FieldType.Tp, mysql.TypeLonglong) + require.Equal(s.T(), changedCol.FieldType.Flen, col.FieldType.Flen+1) + col.FieldType.Flen++ + + // Test add foreign key failed cause by canceled. + updateTest(&tests[17]) + addForeignKeyArgs := []interface{}{model.FKInfo{Name: model.NewCIStr("fk1")}} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, addForeignKeyArgs, &test.cancelState) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.Equal(s.T(), len(changedTable.Meta().ForeignKeys), 0) + + // Test add foreign key successful. + updateTest(&tests[18]) + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, addForeignKeyArgs) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.Equal(s.T(), len(changedTable.Meta().ForeignKeys), 1) + require.Equal(s.T(), changedTable.Meta().ForeignKeys[0].Name, addForeignKeyArgs[0].(model.FKInfo).Name) + + // Test drop foreign key failed cause by canceled. + updateTest(&tests[19]) + dropForeignKeyArgs := []interface{}{addForeignKeyArgs[0].(model.FKInfo).Name} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, dropForeignKeyArgs, &test.cancelState) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.Equal(s.T(), len(changedTable.Meta().ForeignKeys), 1) + require.Equal(s.T(), changedTable.Meta().ForeignKeys[0].Name, dropForeignKeyArgs[0].(model.CIStr)) + + // Test drop foreign key successful. + updateTest(&tests[20]) + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, dropForeignKeyArgs) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.Equal(s.T(), len(changedTable.Meta().ForeignKeys), 0) + + // test rename table failed caused by canceled. + test = &tests[21] + renameTableArgs := []interface{}{dbInfo.ID, model.NewCIStr("t2"), dbInfo.Name} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, renameTableArgs, &test.cancelState) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.Equal(s.T(), changedTable.Meta().Name.L, "t") + + // test rename table successful. + test = &tests[22] + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, renameTableArgs) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.Equal(s.T(), changedTable.Meta().Name.L, "t2") + + // test modify table charset failed caused by canceled. + test = &tests[23] + modifyTableCharsetArgs := []interface{}{"utf8mb4", "utf8mb4_bin"} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, modifyTableCharsetArgs, &test.cancelState) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.Equal(s.T(), changedTable.Meta().Charset, "utf8") + require.Equal(s.T(), changedTable.Meta().Collate, "utf8_bin") + + // test modify table charset successfully. + test = &tests[24] + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, modifyTableCharsetArgs) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.Equal(s.T(), changedTable.Meta().Charset, "utf8mb4") + require.Equal(s.T(), changedTable.Meta().Collate, "utf8mb4_bin") + + // test truncate table partition failed caused by canceled. + test = &tests[25] + truncateTblPartitionArgs := []interface{}{[]int64{partitionTblInfo.Partition.Definitions[0].ID}} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, partitionTblInfo.ID, test.act, truncateTblPartitionArgs, &test.cancelState) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, partitionTblInfo.ID) + require.True(s.T(), changedTable.Meta().Partition.Definitions[0].ID == partitionTblInfo.Partition.Definitions[0].ID) + + // test truncate table partition charset successfully. + test = &tests[26] + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, partitionTblInfo.ID, test.act, truncateTblPartitionArgs) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, partitionTblInfo.ID) + require.False(s.T(), changedTable.Meta().Partition.Definitions[0].ID == partitionTblInfo.Partition.Definitions[0].ID) + + // test modify schema charset failed caused by canceled. + test = &tests[27] + charsetAndCollate := []interface{}{"utf8mb4", "utf8mb4_bin"} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, charsetAndCollate, &test.cancelState) + require.NoError(s.T(), checkErr) + dbInfo, err = testGetSchemaInfoWithError(d, dbInfo.ID) + require.NoError(s.T(), err) + require.Equal(s.T(), dbInfo.Charset, "") + require.Equal(s.T(), dbInfo.Collate, "") + + // test modify table charset successfully. + test = &tests[28] + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, charsetAndCollate) + require.NoError(s.T(), checkErr) + dbInfo, err = testGetSchemaInfoWithError(d, dbInfo.ID) + require.NoError(s.T(), err) + require.Equal(s.T(), dbInfo.Charset, "utf8mb4") + require.Equal(s.T(), dbInfo.Collate, "utf8mb4_bin") + + // for adding primary key + tblInfo = changedTable.Meta() + updateTest(&tests[29]) + idxOrigName = "primary" + validArgs = []interface{}{false, model.NewCIStr(idxOrigName), + []*ast.IndexPartSpecification{{ + Column: &ast.ColumnName{Name: model.NewCIStr("c1")}, + Length: -1, + }}, nil} + cancelState = model.StateNone + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddPrimaryKey, validArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) + updateTest(&tests[30]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddPrimaryKey, validArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) + updateTest(&tests[31]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddPrimaryKey, validArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) + updateTest(&tests[32]) + testCreatePrimaryKey(s.T(), ctx, d, dbInfo, tblInfo, "c1") + require.NoError(s.T(), checkErr) + txn, err = ctx.Txn(true) + require.NoError(s.T(), err) + require.Nil(s.T(), txn.Commit(context.Background())) + s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, true) + + // for dropping primary key + updateTest(&tests[33]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionDropPrimaryKey, validArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkDropIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) + updateTest(&tests[34]) + testDropIndex(s.T(), ctx, d, dbInfo, tblInfo, idxOrigName) + require.NoError(s.T(), checkErr) + s.checkDropIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, true) + + // for add columns + updateTest(&tests[35]) + addingColNames := []string{"colA", "colB", "colC", "colD", "colE", "colF"} + cols := make([]*table.Column, len(addingColNames)) + for i, addingColName := range addingColNames { + newColumnDef := &ast.ColumnDef{ + Name: &ast.ColumnName{Name: model.NewCIStr(addingColName)}, + Tp: &types.FieldType{Tp: mysql.TypeLonglong}, + Options: []*ast.ColumnOption{}, + } + col, _, err := buildColumnAndConstraint(ctx, 0, newColumnDef, nil, mysql.DefaultCharset, "") + require.NoError(s.T(), err) + cols[i] = col + } + offsets := make([]int, len(cols)) + positions := make([]*ast.ColumnPosition, len(cols)) + for i := range positions { + positions[i] = &ast.ColumnPosition{Tp: ast.ColumnPositionNone} + } + ifNotExists := make([]bool, len(cols)) + + addColumnArgs = []interface{}{cols, positions, offsets, ifNotExists} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumns, addColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, addingColNames, false) + + updateTest(&tests[36]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumns, addColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, addingColNames, false) + + updateTest(&tests[37]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumns, addColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, addingColNames, false) + + updateTest(&tests[38]) + testAddColumns(s.T(), ctx, d, dbInfo, tblInfo, addColumnArgs) + require.NoError(s.T(), checkErr) + s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, addingColNames, true) + + // for drop columns + updateTest(&tests[39]) + dropColNames := []string{"colA", "colB"} + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, false) + testDropColumns(s.T(), ctx, d, dbInfo, tblInfo, dropColNames, false) + require.NoError(s.T(), checkErr) + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, true) + + updateTest(&tests[40]) + dropColNames = []string{"colC", "colD"} + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, false) + testDropColumns(s.T(), ctx, d, dbInfo, tblInfo, dropColNames, false) + require.NoError(s.T(), checkErr) + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, true) + + updateTest(&tests[41]) + dropColNames = []string{"colE", "colF"} + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, false) + testDropColumns(s.T(), ctx, d, dbInfo, tblInfo, dropColNames, false) + require.NoError(s.T(), checkErr) + s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, true) + + // test alter index visibility failed caused by canceled. + indexName := "idx_c3" + testCreateIndex(s.T(), ctx, d, dbInfo, tblInfo, false, indexName, "c3") + require.NoError(s.T(), checkErr) + txn, err = ctx.Txn(true) + require.NoError(s.T(), err) + require.Nil(s.T(), txn.Commit(context.Background())) + s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, indexName, true) + + updateTest(&tests[42]) + alterIndexVisibility := []interface{}{model.NewCIStr(indexName), true} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, alterIndexVisibility, &test.cancelState) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.True(s.T(), checkIdxVisibility(changedTable, indexName, false)) + + // cancel alter index visibility successfully + updateTest(&tests[43]) + alterIndexVisibility = []interface{}{model.NewCIStr(indexName), true} + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, alterIndexVisibility) + require.NoError(s.T(), checkErr) + changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) + require.True(s.T(), checkIdxVisibility(changedTable, indexName, true)) + + // test exchange partition failed caused by canceled + pt := testTableInfoWithPartition(s.T(), d, "pt", 5) + nt, err := testTableInfo(d, "nt", 5) + require.NoError(s.T(), err) + testCreateTable(s.T(), ctx, d, dbInfo, pt) + testCreateTable(s.T(), ctx, d, dbInfo, nt) + + updateTest(&tests[44]) + defID := pt.Partition.Definitions[0].ID + exchangeTablePartition := []interface{}{defID, dbInfo.ID, pt.ID, "p0", true} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, nt.ID, test.act, exchangeTablePartition, &test.cancelState) + require.NoError(s.T(), checkErr) + changedNtTable := testGetTable(s.T(), d, dbInfo.ID, nt.ID) + changedPtTable := testGetTable(s.T(), d, dbInfo.ID, pt.ID) + require.True(s.T(), changedNtTable.Meta().ID == nt.ID) + require.True(s.T(), changedPtTable.Meta().Partition.Definitions[0].ID == pt.Partition.Definitions[0].ID) + + // cancel exchange partition successfully + updateTest(&tests[45]) + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, nt.ID, test.act, exchangeTablePartition) + require.NoError(s.T(), checkErr) + changedNtTable = testGetTable(s.T(), d, dbInfo.ID, pt.Partition.Definitions[0].ID) + changedPtTable = testGetTable(s.T(), d, dbInfo.ID, pt.ID) + require.False(s.T(), changedNtTable.Meta().ID == nt.ID) + require.True(s.T(), changedPtTable.Meta().Partition.Definitions[0].ID == nt.ID) + + // Cancel add table partition. + baseTableInfo := testTableInfoWithPartitionLessThan(s.T(), d, "empty_table", 5, "1000") + testCreateTable(s.T(), ctx, d, dbInfo, baseTableInfo) + + cancelState = model.StateNone + updateTest(&tests[46]) + addedPartInfo := testAddedNewTablePartitionInfo(s.T(), d, baseTableInfo, "p1", "maxvalue") + addPartitionArgs := []interface{}{addedPartInfo} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, addPartitionArgs, &cancelState) + require.NoError(s.T(), checkErr) + baseTable := testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) + require.Equal(s.T(), len(baseTable.Meta().Partition.Definitions), 1) + + updateTest(&tests[47]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, addPartitionArgs, &cancelState) + require.NoError(s.T(), checkErr) + baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) + require.Equal(s.T(), len(baseTable.Meta().Partition.Definitions), 1) + + updateTest(&tests[48]) + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, addPartitionArgs) + require.NoError(s.T(), checkErr) + baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) + require.Equal(s.T(), len(baseTable.Meta().Partition.Definitions), 2) + require.Equal(s.T(), baseTable.Meta().Partition.Definitions[1].ID, addedPartInfo.Definitions[0].ID) + require.Equal(s.T(), baseTable.Meta().Partition.Definitions[1].LessThan[0], addedPartInfo.Definitions[0].LessThan[0]) + + // Cancel modify column which should reorg the data. + require.Nil(s.T(), failpoint.Enable("github.com/pingcap/tidb/ddl/skipMockContextDoExec", `return(true)`)) + baseTableInfo = testTableInfoWith2IndexOnFirstColumn(s.T(), d, "modify-table", 2) + // This will cost 2 global id, one for table id, the other for the job id. + testCreateTable(s.T(), ctx, d, dbInfo, baseTableInfo) + + cancelState = model.StateNone + newCol := baseTableInfo.Columns[0].Clone() + // change type from long to tinyint. + newCol.FieldType = *types.NewFieldType(mysql.TypeTiny) + // change from null to not null + newCol.FieldType.Flag |= mysql.NotNullFlag + newCol.FieldType.Flen = 2 + + originColName := baseTableInfo.Columns[0].Name + pos := &ast.ColumnPosition{Tp: ast.ColumnPositionNone} + + updateTest(&tests[49]) + modifyColumnArgs = []interface{}{&newCol, originColName, pos, mysql.TypeNull, 0} + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, modifyColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) + require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Tp, mysql.TypeLong) + require.Equal(s.T(), mysql.HasNotNullFlag(baseTable.Meta().Columns[0].FieldType.Flag), false) + + updateTest(&tests[50]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, modifyColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) + require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Tp, mysql.TypeLong) + require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Flag&mysql.NotNullFlag, uint(0)) + + updateTest(&tests[51]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, modifyColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) + require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Tp, mysql.TypeLong) + require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Flag&mysql.NotNullFlag, uint(0)) + + updateTest(&tests[52]) + doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, modifyColumnArgs, &cancelState) + require.NoError(s.T(), checkErr) + baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) + require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Tp, mysql.TypeLong) + require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Flag&mysql.NotNullFlag, uint(0)) + + updateTest(&tests[53]) + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, modifyColumnArgs) + require.NoError(s.T(), checkErr) + baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) + require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Tp, mysql.TypeTiny) + require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Flag&mysql.NotNullFlag, uint(1)) + require.Nil(s.T(), failpoint.Disable("github.com/pingcap/tidb/ddl/skipMockContextDoExec")) + + // for drop indexes + updateTest(&tests[54]) + ifExists := make([]bool, 2) + idxNames := []model.CIStr{model.NewCIStr("i1"), model.NewCIStr("i2")} + dropIndexesArgs := []interface{}{idxNames, ifExists} + tableInfo := createTestTableForDropIndexes(s.T(), ctx, d, dbInfo, "test-drop-indexes", 6) + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tableInfo.ID, test.act, dropIndexesArgs) + s.checkDropIndexes(d, dbInfo.ID, tableInfo.ID, idxNames, true) + + updateTest(&tests[55]) + idxNames = []model.CIStr{model.NewCIStr("i3"), model.NewCIStr("i4")} + dropIndexesArgs = []interface{}{idxNames, ifExists} + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tableInfo.ID, test.act, dropIndexesArgs) + s.checkDropIndexes(d, dbInfo.ID, tableInfo.ID, idxNames, true) + + updateTest(&tests[56]) + idxNames = []model.CIStr{model.NewCIStr("i5"), model.NewCIStr("i6")} + dropIndexesArgs = []interface{}{idxNames, ifExists} + doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tableInfo.ID, test.act, dropIndexesArgs) + s.checkDropIndexes(d, dbInfo.ID, tableInfo.ID, idxNames, true) +} + +func (s *testDDLSerialSuiteToVerify) checkDropIndexes(d *ddl, schemaID int64, tableID int64, idxNames []model.CIStr, success bool) { + for _, idxName := range idxNames { + checkIdxExist(s.T(), d, schemaID, tableID, idxName.O, !success) + } +} + +func doDDLJobErrWithSchemaState(ctx sessionctx.Context, ddl *ddl, t *testing.T, schemaID, tableID int64, tp model.ActionType, + args []interface{}, state *model.SchemaState) *model.Job { + job := &model.Job{ + SchemaID: schemaID, + TableID: tableID, + Type: tp, + Args: args, + BinlogInfo: &model.HistoryInfo{}, + } + // TODO: check error detail + ctx.SetValue(sessionctx.QueryString, "skip") + require.Error(t, ddl.DoDDLJob(ctx, job)) + testCheckJobCancelled(t, ddl.store, job, state) + + return job +} + +func doDDLJobSuccess(ctx sessionctx.Context, ddl DDL, t *testing.T, schemaID, tableID int64, tp model.ActionType, + args []interface{}) { + job := &model.Job{ + SchemaID: schemaID, + TableID: tableID, + Type: tp, + Args: args, + BinlogInfo: &model.HistoryInfo{}, + } + ctx.SetValue(sessionctx.QueryString, "skip") + err := ddl.DoDDLJob(ctx, job) + require.NoError(t, err) +} + +func testDropColumns(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string, isError bool) *model.Job { + job := buildDropColumnsJob(dbInfo, tblInfo, colNames) + ctx.SetValue(sessionctx.QueryString, "skip") + err := d.DoDDLJob(ctx, job) + if isError { + require.Error(t, err) + return nil + } + require.NoError(t, err) + v := getSchemaVer(t, ctx) + checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} + +func buildDropColumnsJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string) *model.Job { + columnNames := make([]model.CIStr, len(colNames)) + ifExists := make([]bool, len(colNames)) + for i, colName := range colNames { + columnNames[i] = model.NewCIStr(colName) + } + job := &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionDropColumns, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{columnNames, ifExists}, + } + return job +} + +func buildDropColumnJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string) *model.Job { + return &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionDropColumn, + BinlogInfo: &model.HistoryInfo{}, + MultiSchemaInfo: &model.MultiSchemaInfo{}, + Args: []interface{}{model.NewCIStr(colName)}, + } +} + +func testDropColumn(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string, isError bool) *model.Job { + job := buildDropColumnJob(dbInfo, tblInfo, colName) + ctx.SetValue(sessionctx.QueryString, "skip") + err := d.DoDDLJob(ctx, job) + if isError { + require.Error(t, err) + return nil + } + require.NoError(t, err) + v := getSchemaVer(t, ctx) + checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) + return job +} diff --git a/ddl/column.go b/ddl/column.go index 6602e55bb7585..ea2b05e57b6e3 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -147,7 +147,7 @@ func createColumnInfo(tblInfo *model.TableInfo, colInfo *model.ColumnInfo, pos * func checkAddColumn(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, *model.ColumnInfo, *ast.ColumnPosition, int, error) { schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, nil, nil, nil, 0, errors.Trace(err) } @@ -258,7 +258,7 @@ func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) func checkAddColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, []*model.ColumnInfo, []*ast.ColumnPosition, []int, []bool, error) { schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, nil, nil, nil, nil, nil, errors.Trace(err) } @@ -497,7 +497,7 @@ func onDropColumns(t *meta.Meta, job *model.Job) (ver int64, _ error) { func checkDropColumns(t *meta.Meta, job *model.Job) (*model.TableInfo, []*model.ColumnInfo, int, []*model.IndexInfo, error) { schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, nil, 0, nil, errors.Trace(err) } @@ -639,7 +639,7 @@ func onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { func checkDropColumn(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.ColumnInfo, []*model.IndexInfo, error) { schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, nil, nil, errors.Trace(err) } @@ -783,7 +783,7 @@ func getModifyColumnInfo(t *meta.Meta, job *model.Job) (*model.DBInfo, *model.Ta return nil, nil, nil, jobParam, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return nil, nil, nil, jobParam, errors.Trace(err) } @@ -1711,7 +1711,7 @@ func checkForNullValue(ctx context.Context, sctx sessionctx.Context, isDataTrunc } func updateColumnDefaultValue(t *meta.Meta, job *model.Job, newCol *model.ColumnInfo, oldColName *model.CIStr) (ver int64, _ error) { - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } diff --git a/ddl/column_test.go b/ddl/column_test.go index 13d16ce1e910f..ae743d36ba7bb 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -12,350 +12,302 @@ // See the License for the specific language governing permissions and // limitations under the License. -package ddl +package ddl_test import ( "context" "fmt" "reflect" + "strconv" "sync" "testing" "github.com/pingcap/errors" - "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/parser" - "github.com/pingcap/tidb/parser/ast" - "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/dbterror" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" ) -type testColumnSuiteToVerify struct { - suite.Suite - store kv.Storage - dbInfo *model.DBInfo -} - -func TestColumnSuite(t *testing.T) { - suite.Run(t, new(testColumnSuiteToVerify)) -} - -func (s *testColumnSuiteToVerify) SetupSuite() { - s.store = createMockStore(s.T()) - d, err := testNewDDLAndStart( - context.Background(), - WithStore(s.store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - - s.dbInfo, err = testSchemaInfo(d, "test_column") - require.NoError(s.T(), err) - testCreateSchema(s.T(), testNewContext(d), d, s.dbInfo) - require.Nil(s.T(), d.Stop()) -} - -func (s *testColumnSuiteToVerify) TearDownSuite() { - err := s.store.Close() - require.NoError(s.T(), err) -} - -func buildCreateColumnJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string, - pos *ast.ColumnPosition, defaultValue interface{}) *model.Job { - col := &model.ColumnInfo{ - Name: model.NewCIStr(colName), - Offset: len(tblInfo.Columns), - DefaultValue: defaultValue, - OriginDefaultValue: defaultValue, - } - col.ID = allocateColumnID(tblInfo) - col.FieldType = *types.NewFieldType(mysql.TypeLong) - - job := &model.Job{ - SchemaID: dbInfo.ID, - TableID: tblInfo.ID, - Type: model.ActionAddColumn, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{col, pos, 0}, +func testCreateColumn(tk *testkit.TestKit, t *testing.T, ctx sessionctx.Context, tblID int64, + colName string, pos string, defaultValue interface{}, dom *domain.Domain) int64 { + sql := fmt.Sprintf("alter table t1 add column %s int ", colName) + if defaultValue != nil { + sql += fmt.Sprintf("default %v ", defaultValue) } - return job -} - -func testCreateColumn(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, - colName string, pos *ast.ColumnPosition, defaultValue interface{}) *model.Job { - job := buildCreateColumnJob(dbInfo, tblInfo, colName, pos, defaultValue) - ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) - require.NoError(t, err) + sql += pos + tk.MustExec(sql) + idi, _ := strconv.Atoi(tk.MustQuery("admin show ddl jobs 1;").Rows()[0][0].(string)) + id := int64(idi) v := getSchemaVer(t, ctx) - checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) - return job + require.NoError(t, dom.Reload()) + tblInfo, exist := dom.InfoSchema().TableByID(tblID) + require.True(t, exist) + checkHistoryJobArgs(t, ctx, id, &historyJobArgs{ver: v, tbl: tblInfo.Meta()}) + return id } -func buildCreateColumnsJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string, - positions []*ast.ColumnPosition, defaultValue interface{}) *model.Job { - colInfos := make([]*model.ColumnInfo, len(colNames)) - offsets := make([]int, len(colNames)) - ifNotExists := make([]bool, len(colNames)) +func testCreateColumns(tk *testkit.TestKit, t *testing.T, ctx sessionctx.Context, tblID int64, + colNames []string, positions []string, defaultValue interface{}, dom *domain.Domain) int64 { + sql := "alter table t1 add column " for i, colName := range colNames { - col := &model.ColumnInfo{ - Name: model.NewCIStr(colName), - Offset: len(tblInfo.Columns), - DefaultValue: defaultValue, - OriginDefaultValue: defaultValue, + if i != 0 { + sql += ", add column " + } + sql += fmt.Sprintf("%s int %s", colName, positions[i]) + if defaultValue != nil { + sql += fmt.Sprintf(" default %v", defaultValue) } - col.ID = allocateColumnID(tblInfo) - col.FieldType = *types.NewFieldType(mysql.TypeLong) - colInfos[i] = col } + tk.MustExec(sql) + idi, _ := strconv.Atoi(tk.MustQuery("admin show ddl jobs 1;").Rows()[0][0].(string)) + id := int64(idi) + v := getSchemaVer(t, ctx) + require.NoError(t, dom.Reload()) + tblInfo, exist := dom.InfoSchema().TableByID(tblID) + require.True(t, exist) + checkHistoryJobArgs(t, ctx, id, &historyJobArgs{ver: v, tbl: tblInfo.Meta()}) + return id +} - job := &model.Job{ - SchemaID: dbInfo.ID, - TableID: tblInfo.ID, - Type: model.ActionAddColumns, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{colInfos, positions, offsets, ifNotExists}, +func testDropColumnInternal(tk *testkit.TestKit, t *testing.T, ctx sessionctx.Context, tblID int64, colName string, isError bool, dom *domain.Domain) int64 { + sql := fmt.Sprintf("alter table t1 drop column %s ", colName) + _, err := tk.Exec(sql) + if isError { + require.Error(t, err) + } else { + require.NoError(t, err) } - return job -} -func testCreateColumns(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, - colNames []string, positions []*ast.ColumnPosition, defaultValue interface{}) *model.Job { - job := buildCreateColumnsJob(dbInfo, tblInfo, colNames, positions, defaultValue) - ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) - require.NoError(t, err) + idi, _ := strconv.Atoi(tk.MustQuery("admin show ddl jobs 1;").Rows()[0][0].(string)) + id := int64(idi) v := getSchemaVer(t, ctx) - checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) - return job + require.NoError(t, dom.Reload()) + tblInfo, exist := dom.InfoSchema().TableByID(tblID) + require.True(t, exist) + checkHistoryJobArgs(t, ctx, id, &historyJobArgs{ver: v, tbl: tblInfo.Meta()}) + return id } -func buildDropColumnJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string) *model.Job { - return &model.Job{ - SchemaID: dbInfo.ID, - TableID: tblInfo.ID, - Type: model.ActionDropColumn, - BinlogInfo: &model.HistoryInfo{}, - MultiSchemaInfo: &model.MultiSchemaInfo{}, - Args: []interface{}{model.NewCIStr(colName)}, - } +func testDropTable(tk *testkit.TestKit, t *testing.T, tblName string, dom *domain.Domain) int64 { + sql := fmt.Sprintf("drop table %s ", tblName) + tk.MustExec(sql) + + idi, _ := strconv.Atoi(tk.MustQuery("admin show ddl jobs 1;").Rows()[0][0].(string)) + id := int64(idi) + require.NoError(t, dom.Reload()) + _, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr(tblName)) + require.Error(t, err) + return id } -func testDropColumn(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string, isError bool) *model.Job { - job := buildDropColumnJob(dbInfo, tblInfo, colName) - ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) - if isError { - require.Error(t, err) - return nil +func testCreateIndex(tk *testkit.TestKit, t *testing.T, ctx sessionctx.Context, tblID int64, unique bool, indexName string, colName string, dom *domain.Domain) int64 { + un := "" + if unique { + un = "unique" } - require.NoError(t, err) + sql := fmt.Sprintf("alter table t1 add %s index %s(%s)", un, indexName, colName) + tk.MustExec(sql) + + idi, _ := strconv.Atoi(tk.MustQuery("admin show ddl jobs 1;").Rows()[0][0].(string)) + id := int64(idi) v := getSchemaVer(t, ctx) - checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) - return job + require.NoError(t, dom.Reload()) + tblInfo, exist := dom.InfoSchema().TableByID(tblID) + require.True(t, exist) + checkHistoryJobArgs(t, ctx, id, &historyJobArgs{ver: v, tbl: tblInfo.Meta()}) + return id } -func buildDropColumnsJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string) *model.Job { - columnNames := make([]model.CIStr, len(colNames)) - ifExists := make([]bool, len(colNames)) - for i, colName := range colNames { - columnNames[i] = model.NewCIStr(colName) - } - job := &model.Job{ - SchemaID: dbInfo.ID, - TableID: tblInfo.ID, - Type: model.ActionDropColumns, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{columnNames, ifExists}, +func testDropColumns(tk *testkit.TestKit, t *testing.T, ctx sessionctx.Context, tblID int64, colName []string, isError bool, dom *domain.Domain) int64 { + sql := "alter table t1 drop column " + for i, name := range colName { + if i != 0 { + sql += ", drop column " + } + sql += name } - return job -} - -func testDropColumns(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colNames []string, isError bool) *model.Job { - job := buildDropColumnsJob(dbInfo, tblInfo, colNames) - ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + _, err := tk.Exec(sql) if isError { require.Error(t, err) - return nil + } else { + require.NoError(t, err) } - require.NoError(t, err) + + idi, _ := strconv.Atoi(tk.MustQuery("admin show ddl jobs 1;").Rows()[0][0].(string)) + id := int64(idi) v := getSchemaVer(t, ctx) - checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) - return job + require.NoError(t, dom.Reload()) + tblInfo, exist := dom.InfoSchema().TableByID(tblID) + require.True(t, exist) + checkHistoryJobArgs(t, ctx, id, &historyJobArgs{ver: v, tbl: tblInfo.Meta()}) + return id } -func (s *testColumnSuiteToVerify) TestColumnBasic() { - d, err := testNewDDLAndStart( - context.Background(), - WithStore(s.store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - defer func() { - err := d.Stop() - require.NoError(s.T(), err) - }() - - tblInfo, err := testTableInfo(d, "t1", 3) - require.NoError(s.T(), err) - ctx := testNewContext(d) - - testCreateTable(s.T(), ctx, d, s.dbInfo, tblInfo) - t := testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) +func TestColumnBasic(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (c1 int, c2 int, c3 int);") num := 10 for i := 0; i < num; i++ { - _, err := t.AddRecord(ctx, types.MakeDatums(i, 10*i, 100*i)) - require.NoError(s.T(), err) + tk.MustExec(fmt.Sprintf("insert into t1 values(%d, %d, %d)", i, 10*i, 100*i)) } - err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) - - i := int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - require.Len(s.T(), data, 3) - require.Equal(s.T(), data[0].GetInt64(), i) - require.Equal(s.T(), data[1].GetInt64(), 10*i) - require.Equal(s.T(), data[2].GetInt64(), 100*i) + ctx := testNewContext(store) + err := ctx.NewTxn(context.Background()) + require.NoError(t, err) + + var tableID int64 + rs := tk.MustQuery("select TIDB_TABLE_ID from information_schema.tables where table_name='t1' and table_schema='test';") + tableIDi, _ := strconv.Atoi(rs.Rows()[0][0].(string)) + tableID = int64(tableIDi) + + tbl := testGetTable(t, dom, tableID) + + i := 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.Len(t, data, 3) + require.Equal(t, data[0].GetInt64(), int64(i)) + require.Equal(t, data[1].GetInt64(), int64(10*i)) + require.Equal(t, data[2].GetInt64(), int64(100*i)) i++ return true, nil }) - require.NoError(s.T(), err) - require.Equal(s.T(), i, int64(num)) + require.NoError(t, err) + require.Equal(t, i, num) - require.Nil(s.T(), table.FindCol(t.Cols(), "c4")) + require.Nil(t, table.FindCol(tbl.Cols(), "c4")) - job := testCreateColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c4", &ast.ColumnPosition{Tp: ast.ColumnPositionAfter, RelativeColumn: &ast.ColumnName{Name: model.NewCIStr("c3")}}, 100) - testCheckJobDone(s.T(), d, job, true) + jobID := testCreateColumn(tk, t, testNewContext(store), tableID, "c4", "after c3", 100, dom) + testCheckJobDone(t, store, jobID, true) - t = testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) - require.NotNil(s.T(), table.FindCol(t.Cols(), "c4")) + tbl = testGetTable(t, dom, tableID) + require.NotNil(t, table.FindCol(tbl.Cols(), "c4")) - i = int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), + i = 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - require.Len(s.T(), data, 4) - require.Equal(s.T(), data[0].GetInt64(), i) - require.Equal(s.T(), data[1].GetInt64(), 10*i) - require.Equal(s.T(), data[2].GetInt64(), 100*i) - require.Equal(s.T(), data[3].GetInt64(), int64(100)) + require.Len(t, data, 4) + require.Equal(t, data[0].GetInt64(), int64(i)) + require.Equal(t, data[1].GetInt64(), int64(10*i)) + require.Equal(t, data[2].GetInt64(), int64(100*i)) + require.Equal(t, data[3].GetInt64(), int64(100)) i++ return true, nil }) - require.NoError(s.T(), err) - require.Equal(s.T(), i, int64(num)) + require.NoError(t, err) + require.Equal(t, i, num) - h, err := t.AddRecord(ctx, types.MakeDatums(11, 12, 13, 14)) - require.NoError(s.T(), err) + h, err := tbl.AddRecord(ctx, types.MakeDatums(11, 12, 13, 14)) + require.NoError(t, err) err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) - values, err := tables.RowWithCols(t, ctx, h, t.Cols()) - require.NoError(s.T(), err) - - require.Len(s.T(), values, 4) - require.Equal(s.T(), values[3].GetInt64(), int64(14)) - - job = testDropColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c4", false) - testCheckJobDone(s.T(), d, job, false) - - t = testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) - values, err = tables.RowWithCols(t, ctx, h, t.Cols()) - require.NoError(s.T(), err) + require.NoError(t, err) + values, err := tables.RowWithCols(tbl, ctx, h, tbl.Cols()) + require.NoError(t, err) - require.Len(s.T(), values, 3) - require.Equal(s.T(), values[2].GetInt64(), int64(13)) + require.Len(t, values, 4) + require.Equal(t, values[3].GetInt64(), int64(14)) - job = testCreateColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c4", &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, 111) - testCheckJobDone(s.T(), d, job, true) + jobID = testDropColumnInternal(tk, t, testNewContext(store), tableID, "c4", false, dom) + testCheckJobDone(t, store, jobID, false) - t = testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) - values, err = tables.RowWithCols(t, ctx, h, t.Cols()) - require.NoError(s.T(), err) + tbl = testGetTable(t, dom, tableID) + values, err = tables.RowWithCols(tbl, ctx, h, tbl.Cols()) + require.NoError(t, err) - require.Len(s.T(), values, 4) - require.Equal(s.T(), values[3].GetInt64(), int64(111)) + require.Len(t, values, 3) + require.Equal(t, values[2].GetInt64(), int64(13)) - job = testCreateColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c5", &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, 101) - testCheckJobDone(s.T(), d, job, true) + jobID = testCreateColumn(tk, t, testNewContext(store), tableID, "c4", "", 111, dom) + testCheckJobDone(t, store, jobID, true) - t = testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) - values, err = tables.RowWithCols(t, ctx, h, t.Cols()) - require.NoError(s.T(), err) + tbl = testGetTable(t, dom, tableID) + values, err = tables.RowWithCols(tbl, ctx, h, tbl.Cols()) + require.NoError(t, err) - require.Len(s.T(), values, 5) - require.Equal(s.T(), values[4].GetInt64(), int64(101)) + require.Len(t, values, 4) + require.Equal(t, values[3].GetInt64(), int64(111)) - job = testCreateColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c6", &ast.ColumnPosition{Tp: ast.ColumnPositionFirst}, 202) - testCheckJobDone(s.T(), d, job, true) + jobID = testCreateColumn(tk, t, testNewContext(store), tableID, "c5", "", 101, dom) + testCheckJobDone(t, store, jobID, true) - t = testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) - cols := t.Cols() - require.Len(s.T(), cols, 6) - require.Equal(s.T(), cols[0].Offset, 0) - require.Equal(s.T(), cols[0].Name.L, "c6") - require.Equal(s.T(), cols[1].Offset, 1) - require.Equal(s.T(), cols[1].Name.L, "c1") - require.Equal(s.T(), cols[2].Offset, 2) - require.Equal(s.T(), cols[2].Name.L, "c2") - require.Equal(s.T(), cols[3].Offset, 3) - require.Equal(s.T(), cols[3].Name.L, "c3") - require.Equal(s.T(), cols[4].Offset, 4) - require.Equal(s.T(), cols[4].Name.L, "c4") - require.Equal(s.T(), cols[5].Offset, 5) - require.Equal(s.T(), cols[5].Name.L, "c5") + tbl = testGetTable(t, dom, tableID) + values, err = tables.RowWithCols(tbl, ctx, h, tbl.Cols()) + require.NoError(t, err) - values, err = tables.RowWithCols(t, ctx, h, cols) - require.NoError(s.T(), err) + require.Len(t, values, 5) + require.Equal(t, values[4].GetInt64(), int64(101)) + + jobID = testCreateColumn(tk, t, testNewContext(store), tableID, "c6", "first", 202, dom) + testCheckJobDone(t, store, jobID, true) + + tbl = testGetTable(t, dom, tableID) + cols := tbl.Cols() + require.Len(t, cols, 6) + require.Equal(t, cols[0].Offset, 0) + require.Equal(t, cols[0].Name.L, "c6") + require.Equal(t, cols[1].Offset, 1) + require.Equal(t, cols[1].Name.L, "c1") + require.Equal(t, cols[2].Offset, 2) + require.Equal(t, cols[2].Name.L, "c2") + require.Equal(t, cols[3].Offset, 3) + require.Equal(t, cols[3].Name.L, "c3") + require.Equal(t, cols[4].Offset, 4) + require.Equal(t, cols[4].Name.L, "c4") + require.Equal(t, cols[5].Offset, 5) + require.Equal(t, cols[5].Name.L, "c5") + + values, err = tables.RowWithCols(tbl, ctx, h, cols) + require.NoError(t, err) - require.Len(s.T(), values, 6) - require.Equal(s.T(), values[0].GetInt64(), int64(202)) - require.Equal(s.T(), values[5].GetInt64(), int64(101)) + require.Len(t, values, 6) + require.Equal(t, values[0].GetInt64(), int64(202)) + require.Equal(t, values[5].GetInt64(), int64(101)) - job = testDropColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c2", false) - testCheckJobDone(s.T(), d, job, false) + jobID = testDropColumnInternal(tk, t, testNewContext(store), tableID, "c2", false, dom) + testCheckJobDone(t, store, jobID, false) - t = testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) + tbl = testGetTable(t, dom, tableID) - values, err = tables.RowWithCols(t, ctx, h, t.Cols()) - require.NoError(s.T(), err) - require.Len(s.T(), values, 5) - require.Equal(s.T(), values[0].GetInt64(), int64(202)) - require.Equal(s.T(), values[4].GetInt64(), int64(101)) + values, err = tables.RowWithCols(tbl, ctx, h, tbl.Cols()) + require.NoError(t, err) + require.Len(t, values, 5) + require.Equal(t, values[0].GetInt64(), int64(202)) + require.Equal(t, values[4].GetInt64(), int64(101)) - job = testDropColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c1", false) - testCheckJobDone(s.T(), d, job, false) + jobID = testDropColumnInternal(tk, t, testNewContext(store), tableID, "c1", false, dom) + testCheckJobDone(t, store, jobID, false) - job = testDropColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c3", false) - testCheckJobDone(s.T(), d, job, false) + jobID = testDropColumnInternal(tk, t, testNewContext(store), tableID, "c3", false, dom) + testCheckJobDone(t, store, jobID, false) - job = testDropColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c4", false) - testCheckJobDone(s.T(), d, job, false) + jobID = testDropColumnInternal(tk, t, testNewContext(store), tableID, "c4", false, dom) + testCheckJobDone(t, store, jobID, false) - job = testCreateIndex(s.T(), ctx, d, s.dbInfo, tblInfo, false, "c5_idx", "c5") - testCheckJobDone(s.T(), d, job, true) + jobID = testCreateIndex(tk, t, testNewContext(store), tableID, false, "c5_idx", "c5", dom) + testCheckJobDone(t, store, jobID, true) - job = testDropColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c5", false) - testCheckJobDone(s.T(), d, job, false) + jobID = testDropColumnInternal(tk, t, testNewContext(store), tableID, "c5", false, dom) + testCheckJobDone(t, store, jobID, false) - testDropColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c6", true) + jobID = testDropColumnInternal(tk, t, testNewContext(store), tableID, "c6", true, dom) + testCheckJobDone(t, store, jobID, false) - testDropTable(s.T(), ctx, d, s.dbInfo, tblInfo) + testDropTable(tk, t, "t1", dom) } -func (s *testColumnSuiteToVerify) checkColumnKVExist(ctx sessionctx.Context, t table.Table, handle kv.Handle, col *table.Column, columnValue interface{}, isExist bool) error { +func checkColumnKVExist(ctx sessionctx.Context, t table.Table, handle kv.Handle, col *table.Column, columnValue interface{}, isExist bool) error { err := ctx.NewTxn(context.Background()) if err != nil { return errors.Trace(err) @@ -401,440 +353,277 @@ func (s *testColumnSuiteToVerify) checkColumnKVExist(ctx sessionctx.Context, t t return nil } -func (s *testColumnSuiteToVerify) checkNoneColumn(ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo, handle kv.Handle, col *table.Column, columnValue interface{}) error { - t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) - if err != nil { - return errors.Trace(err) - } - err = s.checkColumnKVExist(ctx, t, handle, col, columnValue, false) - if err != nil { - return errors.Trace(err) - } - err = s.testGetColumn(t, col.Name.L, false) - if err != nil { - return errors.Trace(err) - } - return nil +func checkNoneColumn(t *testing.T, ctx sessionctx.Context, tableID int64, handle kv.Handle, col *table.Column, columnValue interface{}, dom *domain.Domain) { + tbl := testGetTable(t, dom, tableID) + err := checkColumnKVExist(ctx, tbl, handle, col, columnValue, false) + require.NoError(t, err) + err = testGetColumn(tbl, col.Name.L, false) + require.NoError(t, err) } -func (s *testColumnSuiteToVerify) checkDeleteOnlyColumn(ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo, handle kv.Handle, col *table.Column, row []types.Datum, columnValue interface{}) error { - t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) - if err != nil { - return errors.Trace(err) - } - err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } - i := int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - if !reflect.DeepEqual(data, row) { - return false, errors.Errorf("%v not equal to %v", data, row) - } +func checkDeleteOnlyColumn(t *testing.T, ctx sessionctx.Context, tableID int64, handle kv.Handle, col *table.Column, row []types.Datum, columnValue interface{}, dom *domain.Domain) { + tbl := testGetTable(t, dom, tableID) + err := ctx.NewTxn(context.Background()) + require.NoError(t, err) + i := 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.Truef(t, reflect.DeepEqual(data, row), "%v not equal to %v", data, row) i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 1 { - return errors.Errorf("expect 1, got %v", i) - } - err = s.checkColumnKVExist(ctx, t, handle, col, columnValue, false) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) + require.Equalf(t, 1, i, "expect 1, got %v", i) + err = checkColumnKVExist(ctx, tbl, handle, col, columnValue, false) + require.NoError(t, err) // Test add a new row. err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) newRow := types.MakeDatums(int64(11), int64(22), int64(33)) - newHandle, err := t.AddRecord(ctx, newRow) - if err != nil { - return errors.Trace(err) - } + newHandle, err := tbl.AddRecord(ctx, newRow) + require.NoError(t, err) err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) rows := [][]types.Datum{row, newRow} - i = int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - if !reflect.DeepEqual(data, rows[i]) { - return false, errors.Errorf("%v not equal to %v", data, rows[i]) - } + i = 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.Truef(t, reflect.DeepEqual(data, rows[i]), "%v not equal to %v", data, rows[i]) i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 2 { - return errors.Errorf("expect 2, got %v", i) - } + require.NoError(t, err) + require.Equalf(t, 2, i, "expect 2, got %v", i) - err = s.checkColumnKVExist(ctx, t, handle, col, columnValue, false) - if err != nil { - return errors.Trace(err) - } + err = checkColumnKVExist(ctx, tbl, handle, col, columnValue, false) + require.NoError(t, err) // Test remove a row. err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) - err = t.RemoveRecord(ctx, newHandle, newRow) - if err != nil { - return errors.Trace(err) - } + err = tbl.RemoveRecord(ctx, newHandle, newRow) + require.NoError(t, err) err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } - i = int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.NoError(t, err) + i = 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) - if i != 1 { - return errors.Errorf("expect 1, got %v", i) - } - err = s.checkColumnKVExist(ctx, t, newHandle, col, columnValue, false) - if err != nil { - return errors.Trace(err) - } - err = s.testGetColumn(t, col.Name.L, false) - if err != nil { - return errors.Trace(err) - } - return nil + require.Equalf(t, 1, i, "expect 1, got %v", i) + err = checkColumnKVExist(ctx, tbl, newHandle, col, columnValue, false) + require.NoError(t, err) + err = testGetColumn(tbl, col.Name.L, false) + require.NoError(t, err) } -func (s *testColumnSuiteToVerify) checkWriteOnlyColumn(ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo, handle kv.Handle, col *table.Column, row []types.Datum, columnValue interface{}) error { - t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) - if err != nil { - return errors.Trace(err) - } - err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } +func checkWriteOnlyColumn(t *testing.T, ctx sessionctx.Context, tableID int64, handle kv.Handle, col *table.Column, row []types.Datum, columnValue interface{}, dom *domain.Domain) { + tbl := testGetTable(t, dom, tableID) + err := ctx.NewTxn(context.Background()) + require.NoError(t, err) - i := int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - if !reflect.DeepEqual(data, row) { - return false, errors.Errorf("%v not equal to %v", data, row) - } + i := 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.Truef(t, reflect.DeepEqual(data, row), "%v not equal to %v", data, row) i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 1 { - return errors.Errorf("expect 1, got %v", i) - } + require.NoError(t, err) + require.Equalf(t, 1, i, "expect 1, got %v", i) - err = s.checkColumnKVExist(ctx, t, handle, col, columnValue, false) - if err != nil { - return errors.Trace(err) - } + err = checkColumnKVExist(ctx, tbl, handle, col, columnValue, false) + require.NoError(t, err) // Test add a new row. err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) newRow := types.MakeDatums(int64(11), int64(22), int64(33)) - newHandle, err := t.AddRecord(ctx, newRow) - if err != nil { - return errors.Trace(err) - } + newHandle, err := tbl.AddRecord(ctx, newRow) + require.NoError(t, err) err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) rows := [][]types.Datum{row, newRow} - i = int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - if !reflect.DeepEqual(data, rows[i]) { - return false, errors.Errorf("%v not equal to %v", data, rows[i]) - } + i = 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.Truef(t, reflect.DeepEqual(data, rows[i]), "%v not equal to %v", data, rows[i]) i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 2 { - return errors.Errorf("expect 2, got %v", i) - } + require.NoError(t, err) + require.Equalf(t, 2, i, "expect 2, got %v", i) - err = s.checkColumnKVExist(ctx, t, newHandle, col, columnValue, true) - if err != nil { - return errors.Trace(err) - } + err = checkColumnKVExist(ctx, tbl, newHandle, col, columnValue, true) + require.NoError(t, err) // Test remove a row. err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) - err = t.RemoveRecord(ctx, newHandle, newRow) - if err != nil { - return errors.Trace(err) - } + err = tbl.RemoveRecord(ctx, newHandle, newRow) + require.NoError(t, err) err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) - i = int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + i = 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 1 { - return errors.Errorf("expect 1, got %v", i) - } + require.NoError(t, err) + require.Equalf(t, 1, i, "expect 1, got %v", i) - err = s.checkColumnKVExist(ctx, t, newHandle, col, columnValue, false) - if err != nil { - return errors.Trace(err) - } - err = s.testGetColumn(t, col.Name.L, false) - if err != nil { - return errors.Trace(err) - } - return nil + err = checkColumnKVExist(ctx, tbl, newHandle, col, columnValue, false) + require.NoError(t, err) + err = testGetColumn(tbl, col.Name.L, false) + require.NoError(t, err) } -func (s *testColumnSuiteToVerify) checkReorganizationColumn(ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo, col *table.Column, row []types.Datum, columnValue interface{}) error { - t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) - if err != nil { - return errors.Trace(err) - } - err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } +func checkReorganizationColumn(t *testing.T, ctx sessionctx.Context, tableID int64, col *table.Column, row []types.Datum, columnValue interface{}, dom *domain.Domain) { + tbl := testGetTable(t, dom, tableID) + err := ctx.NewTxn(context.Background()) + require.NoError(t, err) - i := int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - if !reflect.DeepEqual(data, row) { - return false, errors.Errorf("%v not equal to %v", data, row) - } + i := 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.Truef(t, reflect.DeepEqual(data, row), "%v not equal to %v", data, row) i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 1 { - return errors.Errorf("expect 1 got %v", i) - } + require.NoError(t, err) + require.Equalf(t, 1, i, "expect 1, got %v", i) // Test add a new row. err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) newRow := types.MakeDatums(int64(11), int64(22), int64(33)) - newHandle, err := t.AddRecord(ctx, newRow) - if err != nil { - return errors.Trace(err) - } + newHandle, err := tbl.AddRecord(ctx, newRow) + require.NoError(t, err) err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) rows := [][]types.Datum{row, newRow} - i = int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - if !reflect.DeepEqual(data, rows[i]) { - return false, errors.Errorf("%v not equal to %v", data, rows[i]) - } + i = 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.Truef(t, reflect.DeepEqual(data, rows[i]), "%v not equal to %v", data, rows[i]) i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 2 { - return errors.Errorf("expect 2, got %v", i) - } + require.NoError(t, err) + require.Equalf(t, 2, i, "expect 2, got %v", i) - err = s.checkColumnKVExist(ctx, t, newHandle, col, columnValue, true) - if err != nil { - return errors.Trace(err) - } + err = checkColumnKVExist(ctx, tbl, newHandle, col, columnValue, true) + require.NoError(t, err) // Test remove a row. err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) - err = t.RemoveRecord(ctx, newHandle, newRow) - if err != nil { - return errors.Trace(err) - } + err = tbl.RemoveRecord(ctx, newHandle, newRow) + require.NoError(t, err) err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) - i = int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + i = 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 1 { - return errors.Errorf("expect 1, got %v", i) - } - err = s.testGetColumn(t, col.Name.L, false) - if err != nil { - return errors.Trace(err) - } - return nil + require.NoError(t, err) + require.Equalf(t, 1, i, "expect 1, got %v", i) + err = testGetColumn(tbl, col.Name.L, false) + require.NoError(t, err) } -func (s *testColumnSuiteToVerify) checkPublicColumn(ctx sessionctx.Context, d *ddl, tblInfo *model.TableInfo, newCol *table.Column, oldRow []types.Datum, columnValue interface{}) error { - t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) - if err != nil { - return errors.Trace(err) - } - err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } +func checkPublicColumn(t *testing.T, ctx sessionctx.Context, tableID int64, newCol *table.Column, oldRow []types.Datum, columnValue interface{}, dom *domain.Domain, columnCnt int) { + tbl := testGetTable(t, dom, tableID) + err := ctx.NewTxn(context.Background()) + require.NoError(t, err) - i := int64(0) - updatedRow := append(oldRow, types.NewDatum(columnValue)) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - if !reflect.DeepEqual(data, updatedRow) { - return false, errors.Errorf("%v not equal to %v", data, updatedRow) - } + i := 0 + var updatedRow []types.Datum + updatedRow = append(updatedRow, oldRow...) + for j := 0; j < columnCnt; j++ { + updatedRow = append(updatedRow, types.NewDatum(columnValue)) + } + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.Truef(t, reflect.DeepEqual(data, updatedRow), "%v not equal to %v", data, updatedRow) i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 1 { - return errors.Errorf("expect 1, got %v", i) - } + require.NoError(t, err) + require.Equalf(t, 1, i, "expect 1, got %v", i) // Test add a new row. err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) newRow := types.MakeDatums(int64(11), int64(22), int64(33), int64(44)) - handle, err := t.AddRecord(ctx, newRow) - if err != nil { - return errors.Trace(err) + for j := 1; j < columnCnt; j++ { + newRow = append(newRow, types.NewDatum(int64(44))) } + handle, err := tbl.AddRecord(ctx, newRow) + require.NoError(t, err) err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) rows := [][]types.Datum{updatedRow, newRow} - i = int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - if !reflect.DeepEqual(data, rows[i]) { - return false, errors.Errorf("%v not equal to %v", data, rows[i]) - } + i = 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.Truef(t, reflect.DeepEqual(data, rows[i]), "%v not equal to %v", data, rows[i]) i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 2 { - return errors.Errorf("expect 2, got %v", i) - } + require.NoError(t, err) + require.Equalf(t, 2, i, "expect 2, got %v", i) // Test remove a row. err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) - err = t.RemoveRecord(ctx, handle, newRow) - if err != nil { - return errors.Trace(err) - } + err = tbl.RemoveRecord(ctx, handle, newRow) + require.NoError(t, err) err = ctx.NewTxn(context.Background()) - if err != nil { - return errors.Trace(err) - } + require.NoError(t, err) - i = int64(0) - err = tables.IterRecords(t, ctx, t.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { - if !reflect.DeepEqual(data, updatedRow) { - return false, errors.Errorf("%v not equal to %v", data, updatedRow) - } + i = 0 + err = tables.IterRecords(tbl, ctx, tbl.Cols(), func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { + require.Truef(t, reflect.DeepEqual(data, rows[i]), "%v not equal to %v", data, rows[i]) i++ return true, nil }) - if err != nil { - return errors.Trace(err) - } - if i != 1 { - return errors.Errorf("expect 1, got %v", i) - } + require.NoError(t, err) + require.Equalf(t, 1, i, "expect 1, got %v", i) - err = s.testGetColumn(t, newCol.Name.L, true) - if err != nil { - return errors.Trace(err) - } - return nil + err = testGetColumn(tbl, newCol.Name.L, true) + require.NoError(t, err) } -func (s *testColumnSuiteToVerify) checkAddColumn(state model.SchemaState, d *ddl, tblInfo *model.TableInfo, handle kv.Handle, newCol *table.Column, oldRow []types.Datum, columnValue interface{}) error { - ctx := testNewContext(d) - var err error +func checkAddColumn(t *testing.T, state model.SchemaState, tableID int64, handle kv.Handle, newCol *table.Column, oldRow []types.Datum, columnValue interface{}, dom *domain.Domain, store kv.Storage, columnCnt int) { + ctx := testNewContext(store) switch state { case model.StateNone: - err = errors.Trace(s.checkNoneColumn(ctx, d, tblInfo, handle, newCol, columnValue)) + checkNoneColumn(t, ctx, tableID, handle, newCol, columnValue, dom) case model.StateDeleteOnly: - err = errors.Trace(s.checkDeleteOnlyColumn(ctx, d, tblInfo, handle, newCol, oldRow, columnValue)) + checkDeleteOnlyColumn(t, ctx, tableID, handle, newCol, oldRow, columnValue, dom) case model.StateWriteOnly: - err = errors.Trace(s.checkWriteOnlyColumn(ctx, d, tblInfo, handle, newCol, oldRow, columnValue)) + checkWriteOnlyColumn(t, ctx, tableID, handle, newCol, oldRow, columnValue, dom) case model.StateWriteReorganization, model.StateDeleteReorganization: - err = errors.Trace(s.checkReorganizationColumn(ctx, d, tblInfo, newCol, oldRow, columnValue)) + checkReorganizationColumn(t, ctx, tableID, newCol, oldRow, columnValue, dom) case model.StatePublic: - err = errors.Trace(s.checkPublicColumn(ctx, d, tblInfo, newCol, oldRow, columnValue)) + checkPublicColumn(t, ctx, tableID, newCol, oldRow, columnValue, dom, columnCnt) } - return err } -func (s *testColumnSuiteToVerify) testGetColumn(t table.Table, name string, isExist bool) error { +func testGetColumn(t table.Table, name string, isExist bool) error { col := table.FindCol(t.Cols(), name) if isExist { if col == nil { @@ -848,62 +637,50 @@ func (s *testColumnSuiteToVerify) testGetColumn(t table.Table, name string, isEx return nil } -func (s *testColumnSuiteToVerify) TestAddColumn() { - d, err := testNewDDLAndStart( - context.Background(), - WithStore(s.store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - tblInfo, err := testTableInfo(d, "t", 3) - require.NoError(s.T(), err) - ctx := testNewContext(d) +func TestAddColumn(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (c1 int, c2 int, c3 int);") - err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) - - testCreateTable(s.T(), ctx, d, s.dbInfo, tblInfo) - t := testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) + var tableID int64 + rs := tk.MustQuery("select TIDB_TABLE_ID from information_schema.tables where table_name='t1' and table_schema='test';") + tableIDi, _ := strconv.Atoi(rs.Rows()[0][0].(string)) + tableID = int64(tableIDi) + tbl := testGetTable(t, dom, tableID) + ctx := testNewContext(store) + err := ctx.NewTxn(context.Background()) + require.NoError(t, err) oldRow := types.MakeDatums(int64(1), int64(2), int64(3)) - handle, err := t.AddRecord(ctx, oldRow) - require.NoError(s.T(), err) + handle, err := tbl.AddRecord(ctx, oldRow) + require.NoError(t, err) txn, err := ctx.Txn(true) - require.NoError(s.T(), err) + require.NoError(t, err) err = txn.Commit(context.Background()) - require.NoError(s.T(), err) + require.NoError(t, err) newColName := "c4" defaultColValue := int64(4) - var mu sync.Mutex - var hookErr error checkOK := false - tc := &TestDDLCallback{} - tc.onJobUpdated = func(job *model.Job) { - mu.Lock() - defer mu.Unlock() + d := dom.DDL() + tc := &ddl.TestDDLCallback{Do: dom} + tc.OnJobUpdatedExported = func(job *model.Job) { if checkOK { return } - t, err1 := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) - if err1 != nil { - hookErr = errors.Trace(err1) - return - } - newCol := table.FindCol(t.(*tables.TableCommon).Columns, newColName) + tbl := testGetTable(t, dom, tableID) + newCol := table.FindCol(tbl.(*tables.TableCommon).Columns, newColName) if newCol == nil { return } - err1 = s.checkAddColumn(newCol.State, d, tblInfo, handle, newCol, oldRow, defaultColValue) - if err1 != nil { - hookErr = errors.Trace(err1) - return - } + checkAddColumn(t, newCol.State, tableID, handle, newCol, oldRow, defaultColValue, dom, store, 1) if newCol.State == model.StatePublic { checkOK = true @@ -912,61 +689,26 @@ func (s *testColumnSuiteToVerify) TestAddColumn() { d.SetHook(tc) - job := testCreateColumn(s.T(), ctx, d, s.dbInfo, tblInfo, newColName, &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, defaultColValue) - - testCheckJobDone(s.T(), d, job, true) - mu.Lock() - hErr := hookErr - ok := checkOK - mu.Unlock() - require.NoError(s.T(), hErr) - require.True(s.T(), ok) - - err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) - - job = testDropTable(s.T(), ctx, d, s.dbInfo, tblInfo) - testCheckJobDone(s.T(), d, job, false) + jobID := testCreateColumn(tk, t, testNewContext(store), tableID, newColName, "", defaultColValue, dom) + testCheckJobDone(t, store, jobID, true) - txn, err = ctx.Txn(true) - require.NoError(s.T(), err) - err = txn.Commit(context.Background()) - require.NoError(s.T(), err) + require.True(t, checkOK) - err = d.Stop() - require.NoError(s.T(), err) + jobID = testDropTable(tk, t, "t1", dom) + testCheckJobDone(t, store, jobID, false) } -func (s *testColumnSuiteToVerify) TestAddColumns() { - d, err := testNewDDLAndStart( - context.Background(), - WithStore(s.store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - tblInfo, err := testTableInfo(d, "t", 3) - require.NoError(s.T(), err) - ctx := testNewContext(d) - - err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) - - testCreateTable(s.T(), ctx, d, s.dbInfo, tblInfo) - t := testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) +func TestAddColumns(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (c1 int, c2 int, c3 int);") - oldRow := types.MakeDatums(int64(1), int64(2), int64(3)) - handle, err := t.AddRecord(ctx, oldRow) - require.NoError(s.T(), err) - - txn, err := ctx.Txn(true) - require.NoError(s.T(), err) - err = txn.Commit(context.Background()) - require.NoError(s.T(), err) - - newColNames := []string{"c4,c5,c6"} - positions := make([]*ast.ColumnPosition, 3) + newColNames := []string{"c4", "c5", "c6"} + positions := make([]string, 3) for i := range positions { - positions[i] = &ast.ColumnPosition{Tp: ast.ColumnPositionNone} + positions[i] = "" } defaultColValue := int64(4) @@ -974,30 +716,41 @@ func (s *testColumnSuiteToVerify) TestAddColumns() { var hookErr error checkOK := false - tc := &TestDDLCallback{} - tc.onJobUpdated = func(job *model.Job) { + var tableID int64 + rs := tk.MustQuery("select TIDB_TABLE_ID from information_schema.tables where table_name='t1' and table_schema='test';") + tableIDi, _ := strconv.Atoi(rs.Rows()[0][0].(string)) + tableID = int64(tableIDi) + tbl := testGetTable(t, dom, tableID) + + ctx := testNewContext(store) + err := ctx.NewTxn(context.Background()) + require.NoError(t, err) + oldRow := types.MakeDatums(int64(1), int64(2), int64(3)) + handle, err := tbl.AddRecord(ctx, oldRow) + require.NoError(t, err) + + txn, err := ctx.Txn(true) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + + d := dom.DDL() + tc := &ddl.TestDDLCallback{Do: dom} + tc.OnJobUpdatedExported = func(job *model.Job) { mu.Lock() defer mu.Unlock() if checkOK { return } - t, err1 := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) - if err1 != nil { - hookErr = errors.Trace(err1) - return - } + tbl := testGetTable(t, dom, tableID) for _, newColName := range newColNames { - newCol := table.FindCol(t.(*tables.TableCommon).Columns, newColName) + newCol := table.FindCol(tbl.(*tables.TableCommon).Columns, newColName) if newCol == nil { return } - err1 = s.checkAddColumn(newCol.State, d, tblInfo, handle, newCol, oldRow, defaultColValue) - if err1 != nil { - hookErr = errors.Trace(err1) - return - } + checkAddColumn(t, newCol.State, tableID, handle, newCol, oldRow, defaultColValue, dom, store, 3) if newCol.State == model.StatePublic { checkOK = true @@ -1007,67 +760,61 @@ func (s *testColumnSuiteToVerify) TestAddColumns() { d.SetHook(tc) - job := testCreateColumns(s.T(), ctx, d, s.dbInfo, tblInfo, newColNames, positions, defaultColValue) + jobID := testCreateColumns(tk, t, testNewContext(store), tableID, newColNames, positions, defaultColValue, dom) - testCheckJobDone(s.T(), d, job, true) + testCheckJobDone(t, store, jobID, true) mu.Lock() hErr := hookErr ok := checkOK mu.Unlock() - require.NoError(s.T(), hErr) - require.True(s.T(), ok) + require.NoError(t, hErr) + require.True(t, ok) - job = testDropTable(s.T(), ctx, d, s.dbInfo, tblInfo) - testCheckJobDone(s.T(), d, job, false) - err = d.Stop() - require.NoError(s.T(), err) + jobID = testDropTable(tk, t, "t1", dom) + testCheckJobDone(t, store, jobID, false) } -func (s *testColumnSuiteToVerify) TestDropColumn() { - d, err := testNewDDLAndStart( - context.Background(), - WithStore(s.store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - tblInfo, err := testTableInfo(d, "t2", 4) - require.NoError(s.T(), err) - ctx := testNewContext(d) - - err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) +func TestDropColumnInColumnTest(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (c1 int, c2 int, c3 int, c4 int);") - testCreateTable(s.T(), ctx, d, s.dbInfo, tblInfo) - t := testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) + var tableID int64 + rs := tk.MustQuery("select TIDB_TABLE_ID from information_schema.tables where table_name='t1' and table_schema='test';") + tableIDi, _ := strconv.Atoi(rs.Rows()[0][0].(string)) + tableID = int64(tableIDi) + tbl := testGetTable(t, dom, tableID) + ctx := testNewContext(store) colName := "c4" defaultColValue := int64(4) row := types.MakeDatums(int64(1), int64(2), int64(3)) - _, err = t.AddRecord(ctx, append(row, types.NewDatum(defaultColValue))) - require.NoError(s.T(), err) + err := ctx.NewTxn(context.Background()) + require.NoError(t, err) + _, err = tbl.AddRecord(ctx, append(row, types.NewDatum(defaultColValue))) + require.NoError(t, err) txn, err := ctx.Txn(true) - require.NoError(s.T(), err) + require.NoError(t, err) err = txn.Commit(context.Background()) - require.NoError(s.T(), err) + require.NoError(t, err) checkOK := false var hookErr error var mu sync.Mutex - tc := &TestDDLCallback{} - tc.onJobUpdated = func(job *model.Job) { + d := dom.DDL() + tc := &ddl.TestDDLCallback{Do: dom} + tc.OnJobUpdatedExported = func(job *model.Job) { mu.Lock() defer mu.Unlock() if checkOK { return } - t, err1 := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) - if err1 != nil { - hookErr = errors.Trace(err1) - return - } - col := table.FindCol(t.(*tables.TableCommon).Columns, colName) + tbl := testGetTable(t, dom, tableID) + col := table.FindCol(tbl.(*tables.TableCommon).Columns, colName) if col == nil { checkOK = true return @@ -1076,76 +823,62 @@ func (s *testColumnSuiteToVerify) TestDropColumn() { d.SetHook(tc) - job := testDropColumn(s.T(), ctx, d, s.dbInfo, tblInfo, colName, false) - testCheckJobDone(s.T(), d, job, false) + jobID := testDropColumnInternal(tk, t, testNewContext(store), tableID, colName, false, dom) + testCheckJobDone(t, store, jobID, false) mu.Lock() hErr := hookErr ok := checkOK mu.Unlock() - require.NoError(s.T(), hErr) - require.True(s.T(), ok) - - err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) - - job = testDropTable(s.T(), ctx, d, s.dbInfo, tblInfo) - testCheckJobDone(s.T(), d, job, false) + require.NoError(t, hErr) + require.True(t, ok) - txn, err = ctx.Txn(true) - require.NoError(s.T(), err) - err = txn.Commit(context.Background()) - require.NoError(s.T(), err) - - err = d.Stop() - require.NoError(s.T(), err) + jobID = testDropTable(tk, t, "t1", dom) + testCheckJobDone(t, store, jobID, false) } -func (s *testColumnSuiteToVerify) TestDropColumns() { - d, err := testNewDDLAndStart( - context.Background(), - WithStore(s.store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - tblInfo, err := testTableInfo(d, "t2", 4) - require.NoError(s.T(), err) - ctx := testNewContext(d) +func TestDropColumns(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t1 (c1 int, c2 int, c3 int, c4 int);") - err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) + var tableID int64 + rs := tk.MustQuery("select TIDB_TABLE_ID from information_schema.tables where table_name='t1' and table_schema='test';") + tableIDi, _ := strconv.Atoi(rs.Rows()[0][0].(string)) + tableID = int64(tableIDi) + tbl := testGetTable(t, dom, tableID) - testCreateTable(s.T(), ctx, d, s.dbInfo, tblInfo) - t := testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) + ctx := testNewContext(store) + err := ctx.NewTxn(context.Background()) + require.NoError(t, err) colNames := []string{"c3", "c4"} defaultColValue := int64(4) row := types.MakeDatums(int64(1), int64(2), int64(3)) - _, err = t.AddRecord(ctx, append(row, types.NewDatum(defaultColValue))) - require.NoError(s.T(), err) + _, err = tbl.AddRecord(ctx, append(row, types.NewDatum(defaultColValue))) + require.NoError(t, err) txn, err := ctx.Txn(true) - require.NoError(s.T(), err) + require.NoError(t, err) err = txn.Commit(context.Background()) - require.NoError(s.T(), err) + require.NoError(t, err) checkOK := false var hookErr error var mu sync.Mutex - tc := &TestDDLCallback{} - tc.onJobUpdated = func(job *model.Job) { + d := dom.DDL() + tc := &ddl.TestDDLCallback{Do: dom} + tc.OnJobUpdatedExported = func(job *model.Job) { mu.Lock() defer mu.Unlock() if checkOK { return } - t, err1 := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) - if err1 != nil { - hookErr = errors.Trace(err1) - return - } + tbl := testGetTable(t, dom, tableID) for _, colName := range colNames { - col := table.FindCol(t.(*tables.TableCommon).Columns, colName) + col := table.FindCol(tbl.(*tables.TableCommon).Columns, colName) if col == nil { checkOK = true return @@ -1155,147 +888,22 @@ func (s *testColumnSuiteToVerify) TestDropColumns() { d.SetHook(tc) - job := testDropColumns(s.T(), ctx, d, s.dbInfo, tblInfo, colNames, false) - testCheckJobDone(s.T(), d, job, false) + jobID := testDropColumns(tk, t, testNewContext(store), tableID, colNames, false, dom) + testCheckJobDone(t, store, jobID, false) mu.Lock() hErr := hookErr ok := checkOK mu.Unlock() - require.NoError(s.T(), hErr) - require.True(s.T(), ok) - - job = testDropTable(s.T(), ctx, d, s.dbInfo, tblInfo) - testCheckJobDone(s.T(), d, job, false) - err = d.Stop() - require.NoError(s.T(), err) -} - -func TestModifyColumn(t *testing.T) { - store, err := mockstore.NewMockStore() - require.NoError(t, err) - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - - require.NoError(t, err) - ctx := testNewContext(d) - - defer func() { - err := d.Stop() - require.NoError(t, err) - err = store.Close() - require.NoError(t, err) - }() - - tests := []struct { - origin string - to string - err error - }{ - {"int", "bigint", nil}, - {"int", "int unsigned", nil}, - {"varchar(10)", "text", nil}, - {"varbinary(10)", "blob", nil}, - {"text", "blob", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from utf8mb4 to binary")}, - {"varchar(10)", "varchar(8)", nil}, - {"varchar(10)", "varchar(11)", nil}, - {"varchar(10) character set utf8 collate utf8_bin", "varchar(10) character set utf8", nil}, - {"decimal(2,1)", "decimal(3,2)", nil}, - {"decimal(2,1)", "decimal(2,2)", nil}, - {"decimal(2,1)", "decimal(2,1)", nil}, - {"decimal(2,1)", "int", nil}, - {"decimal", "int", nil}, - {"decimal(2,1)", "bigint", nil}, - {"int", "varchar(10) character set gbk", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from binary to gbk")}, - {"varchar(10) character set gbk", "int", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from gbk to binary")}, - {"varchar(10) character set gbk", "varchar(10) character set utf8", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from gbk to utf8")}, - {"varchar(10) character set gbk", "char(10) character set utf8", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from gbk to utf8")}, - {"varchar(10) character set utf8", "char(10) character set gbk", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from utf8 to gbk")}, - {"varchar(10) character set utf8", "varchar(10) character set gbk", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from utf8 to gbk")}, - {"varchar(10) character set gbk", "varchar(255) character set gbk", nil}, - } - for _, tt := range tests { - ftA := colDefStrToFieldType(t, tt.origin, ctx) - ftB := colDefStrToFieldType(t, tt.to, ctx) - err := checkModifyTypes(ctx, ftA, ftB, false) - if err == nil { - require.NoErrorf(t, tt.err, "origin:%v, to:%v", tt.origin, tt.to) - } else { - require.EqualError(t, err, tt.err.Error()) - } - } -} + require.NoError(t, hErr) + require.True(t, ok) -func colDefStrToFieldType(t *testing.T, str string, ctx sessionctx.Context) *types.FieldType { - sqlA := "alter table t modify column a " + str - stmt, err := parser.New().ParseOneStmt(sqlA, "", "") - require.NoError(t, err) - colDef := stmt.(*ast.AlterTableStmt).Specs[0].NewColumns[0] - chs, coll := charset.GetDefaultCharsetAndCollate() - col, _, err := buildColumnAndConstraint(ctx, 0, colDef, nil, chs, coll) - require.NoError(t, err) - return &col.FieldType + jobID = testDropTable(tk, t, "t1", dom) + testCheckJobDone(t, store, jobID, false) } -func TestFieldCase(t *testing.T) { - var fields = []string{"field", "Field"} - colObjects := make([]*model.ColumnInfo, len(fields)) - for i, name := range fields { - colObjects[i] = &model.ColumnInfo{ - Name: model.NewCIStr(name), - } - } - err := checkDuplicateColumn(colObjects) - require.EqualError(t, err, infoschema.ErrColumnExists.GenWithStackByArgs("Field").Error()) -} - -func (s *testColumnSuiteToVerify) TestAutoConvertBlobTypeByLength() { - d, err := testNewDDLAndStart( - context.Background(), - WithStore(s.store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - // Close the customized ddl(worker goroutine included) after the test is finished, otherwise, it will - // cause go routine in TiDB leak test. - defer func() { - err := d.Stop() - require.NoError(s.T(), err) - }() - - sql := fmt.Sprintf("create table t0(c0 Blob(%d), c1 Blob(%d), c2 Blob(%d), c3 Blob(%d))", - tinyBlobMaxLength-1, blobMaxLength-1, mediumBlobMaxLength-1, longBlobMaxLength-1) - stmt, err := parser.New().ParseOneStmt(sql, "", "") - require.NoError(s.T(), err) - tblInfo, err := BuildTableInfoFromAST(stmt.(*ast.CreateTableStmt)) - require.NoError(s.T(), err) - genIDs, err := d.genGlobalIDs(1) - require.NoError(s.T(), err) - tblInfo.ID = genIDs[0] - - ctx := testNewContext(d) - err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) - testCreateTable(s.T(), ctx, d, s.dbInfo, tblInfo) - t := testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) - - require.Equal(s.T(), t.Cols()[0].Tp, mysql.TypeTinyBlob) - require.Equal(s.T(), t.Cols()[0].Flen, tinyBlobMaxLength) - require.Equal(s.T(), t.Cols()[1].Tp, mysql.TypeBlob) - require.Equal(s.T(), t.Cols()[1].Flen, blobMaxLength) - require.Equal(s.T(), t.Cols()[2].Tp, mysql.TypeMediumBlob) - require.Equal(s.T(), t.Cols()[2].Flen, mediumBlobMaxLength) - require.Equal(s.T(), t.Cols()[3].Tp, mysql.TypeLongBlob) - require.Equal(s.T(), t.Cols()[3].Flen, longBlobMaxLength) - - oldRow := types.MakeDatums([]byte("a"), []byte("a"), []byte("a"), []byte("a")) - _, err = t.AddRecord(ctx, oldRow) - require.NoError(s.T(), err) - - txn, err := ctx.Txn(true) - require.NoError(s.T(), err) - err = txn.Commit(context.Background()) - require.NoError(s.T(), err) +func testGetTable(t *testing.T, dom *domain.Domain, tableID int64) table.Table { + require.NoError(t, dom.Reload()) + tbl, exist := dom.InfoSchema().TableByID(tableID) + require.True(t, exist) + return tbl } diff --git a/ddl/db_test.go b/ddl/db_test.go index 2e4a2bf34e3f1..df2a296ee969c 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -17,29 +17,22 @@ package ddl_test import ( "context" "fmt" + "strconv" "strings" - "sync" "testing" "time" - "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" ddlutil "github.com/pingcap/tidb/ddl/util" - "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" - "github.com/pingcap/tidb/executor" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/testkit/external" "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/admin" "github.com/pingcap/tidb/util/mock" - "github.com/pingcap/tidb/util/testutil" "github.com/stretchr/testify/require" ) @@ -533,638 +526,32 @@ func TestCreateTableIgnoreCheckConstraint(t *testing.T) { ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) } -func TestAlterLock(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t_index_lock (c1 int, c2 int, C3 int)") - tk.MustExec("alter table t_index_lock add index (c1, c2), lock=none") -} - -func TestComment(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - validComment := strings.Repeat("a", 1024) - invalidComment := strings.Repeat("b", 1025) - - tk.MustExec("create table ct (c int, d int, e int, key (c) comment '" + validComment + "')") - tk.MustExec("create index i on ct (d) comment '" + validComment + "'") - tk.MustExec("alter table ct add key (e) comment '" + validComment + "'") - - tk.MustGetErrCode("create table ct1 (c int, key (c) comment '"+invalidComment+"')", errno.ErrTooLongIndexComment) - tk.MustGetErrCode("create index i1 on ct (d) comment '"+invalidComment+"b"+"'", errno.ErrTooLongIndexComment) - tk.MustGetErrCode("alter table ct add key (e) comment '"+invalidComment+"'", errno.ErrTooLongIndexComment) - - tk.MustExec("set @@sql_mode=''") - tk.MustExec("create table ct1 (c int, d int, e int, key (c) comment '" + invalidComment + "')") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Warning|1688|Comment for index 'c' is too long (max = 1024)")) - tk.MustExec("create index i1 on ct1 (d) comment '" + invalidComment + "b" + "'") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Warning|1688|Comment for index 'i1' is too long (max = 1024)")) - tk.MustExec("alter table ct1 add key (e) comment '" + invalidComment + "'") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Warning|1688|Comment for index 'e' is too long (max = 1024)")) -} - -func TestIfNotExists(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) +func TestAutoConvertBlobTypeByLength(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) defer clean() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") - tk.MustExec("create table t1 (a int key)") - - // ADD COLUMN - sql := "alter table t1 add column b int" - tk.MustExec(sql) - tk.MustGetErrCode(sql, errno.ErrDupFieldName) - tk.MustExec("alter table t1 add column if not exists b int") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Note|1060|Duplicate column name 'b'")) - - // ADD INDEX - sql = "alter table t1 add index idx_b (b)" - tk.MustExec(sql) - tk.MustGetErrCode(sql, errno.ErrDupKeyName) - tk.MustExec("alter table t1 add index if not exists idx_b (b)") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Note|1061|index already exist idx_b")) - - // CREATE INDEX - sql = "create index idx_b on t1 (b)" - tk.MustGetErrCode(sql, errno.ErrDupKeyName) - tk.MustExec("create index if not exists idx_b on t1 (b)") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Note|1061|index already exist idx_b")) - - // ADD PARTITION - tk.MustExec("drop table if exists t2") - tk.MustExec("create table t2 (a int key) partition by range(a) (partition p0 values less than (10), partition p1 values less than (20))") - sql = "alter table t2 add partition (partition p2 values less than (30))" - tk.MustExec(sql) - tk.MustGetErrCode(sql, errno.ErrSameNamePartition) - tk.MustExec("alter table t2 add partition if not exists (partition p2 values less than (30))") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Note|1517|Duplicate partition name p2")) -} - -func TestIfExists(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t1 (a int key, b int);") - - // DROP COLUMN - sql := "alter table t1 drop column b" + sql := fmt.Sprintf("create table t0(c0 Blob(%d), c1 Blob(%d), c2 Blob(%d), c3 Blob(%d))", + 255-1, 65535-1, 16777215-1, 4294967295-1) tk.MustExec(sql) - tk.MustGetErrCode(sql, errno.ErrCantDropFieldOrKey) - tk.MustExec("alter table t1 drop column if exists b") // only `a` exists now - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Note|1091|Can't DROP 'b'; check that column/key exists")) - - // CHANGE COLUMN - sql = "alter table t1 change column b c int" - tk.MustGetErrCode(sql, errno.ErrBadField) - tk.MustExec("alter table t1 change column if exists b c int") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Note|1054|Unknown column 'b' in 't1'")) - tk.MustExec("alter table t1 change column if exists a c int") // only `c` exists now - // MODIFY COLUMN - sql = "alter table t1 modify column a bigint" - tk.MustGetErrCode(sql, errno.ErrBadField) - tk.MustExec("alter table t1 modify column if exists a bigint") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Note|1054|Unknown column 'a' in 't1'")) - tk.MustExec("alter table t1 modify column if exists c bigint") // only `c` exists now - - // DROP INDEX - tk.MustExec("alter table t1 add index idx_c (c)") - sql = "alter table t1 drop index idx_c" - tk.MustExec(sql) - tk.MustGetErrCode(sql, errno.ErrCantDropFieldOrKey) - tk.MustExec("alter table t1 drop index if exists idx_c") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Note|1091|index idx_c doesn't exist")) - - // DROP PARTITION - tk.MustExec("drop table if exists t2") - tk.MustExec("create table t2 (a int key) partition by range(a) (partition pNeg values less than (0), partition p0 values less than (10), partition p1 values less than (20))") - sql = "alter table t2 drop partition p1" - tk.MustExec(sql) - tk.MustGetErrCode(sql, errno.ErrDropPartitionNonExistent) - tk.MustExec("alter table t2 drop partition if exists p1") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Note|1507|Error in list of partitions to DROP")) -} - -func TestCheckTooBigFieldLength(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table tr_01 (id int, name varchar(20000), purchased date ) default charset=utf8 collate=utf8_bin;") - - tk.MustExec("drop table if exists tr_02;") - tk.MustExec("create table tr_02 (id int, name varchar(16000), purchased date ) default charset=utf8mb4 collate=utf8mb4_bin;") - - tk.MustExec("drop table if exists tr_03;") - tk.MustExec("create table tr_03 (id int, name varchar(65534), purchased date ) default charset=latin1;") - - tk.MustExec("drop table if exists tr_04;") - tk.MustExec("create table tr_04 (a varchar(20000) ) default charset utf8;") - tk.MustGetErrCode("alter table tr_04 add column b varchar(20000) charset utf8mb4;", errno.ErrTooBigFieldlength) - tk.MustGetErrCode("alter table tr_04 convert to character set utf8mb4;", errno.ErrTooBigFieldlength) - tk.MustGetErrCode("create table tr (id int, name varchar(30000), purchased date ) default charset=utf8 collate=utf8_bin;", errno.ErrTooBigFieldlength) - tk.MustGetErrCode("create table tr (id int, name varchar(20000) charset utf8mb4, purchased date ) default charset=utf8 collate=utf8_bin;", errno.ErrTooBigFieldlength) - tk.MustGetErrCode("create table tr (id int, name varchar(65536), purchased date ) default charset=latin1;", errno.ErrTooBigFieldlength) - - tk.MustExec("drop table if exists tr_05;") - tk.MustExec("create table tr_05 (a varchar(16000) charset utf8);") - tk.MustExec("alter table tr_05 modify column a varchar(16000) charset utf8;") - tk.MustExec("alter table tr_05 modify column a varchar(16000) charset utf8mb4;") -} - -func TestGeneratedColumnWindowFunction(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustGetErrCode("CREATE TABLE t (a INT , b INT as (ROW_NUMBER() OVER (ORDER BY a)))", errno.ErrWindowInvalidWindowFuncUse) - tk.MustGetErrCode("CREATE TABLE t (a INT , index idx ((ROW_NUMBER() OVER (ORDER BY a))))", errno.ErrWindowInvalidWindowFuncUse) -} - -func TestCreateTableWithDecimalWithDoubleZero(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) - defer clean() - - tk := testkit.NewTestKit(t, store) - checkType := func(db, table, field string) { - ctx := tk.Session().(sessionctx.Context) - is := domain.GetDomain(ctx).InfoSchema() - tableInfo, err := is.TableByName(model.NewCIStr(db), model.NewCIStr(table)) - require.NoError(t, err) - tblInfo := tableInfo.Meta() - for _, col := range tblInfo.Columns { - if col.Name.L == field { - require.Equal(t, 10, col.Flen) - } - } - } - - tk.MustExec("use test") - tk.MustExec("drop table if exists tt") - tk.MustExec("create table tt(d decimal(0, 0))") - checkType("test", "tt", "d") - - tk.MustExec("drop table tt") - tk.MustExec("create table tt(a int)") - tk.MustExec("alter table tt add column d decimal(0, 0)") - checkType("test", "tt", "d") - - tk.MustExec("drop table tt") - tk.MustExec("create table tt(d int)") - tk.MustExec("alter table tt change column d d decimal(0, 0)") - checkType("test", "tt", "d") -} - -func TestAlterCheck(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table alter_check (pk int primary key)") - tk.MustExec("alter table alter_check alter check crcn ENFORCED") - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testkit.RowsWithSep("|", "Warning|8231|ALTER CHECK is not supported")) -} - -func TestDefaultSQLFunction(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - // For issue #13189 - // Use `DEFAULT()` in `INSERT` / `INSERT ON DUPLICATE KEY UPDATE` statement - tk.MustExec("create table t1 (a int primary key, b int default 20, c int default 30, d int default 40);") - tk.MustExec("SET @@time_zone = '+00:00'") - defer tk.MustExec("SET @@time_zone = DEFAULT") - tk.MustQuery("SELECT @@time_zone").Check(testkit.Rows("+00:00")) - tk.MustExec("create table t2 (a int primary key, b timestamp DEFAULT CURRENT_TIMESTAMP, c timestamp DEFAULT '2000-01-01 00:00:00')") - tk.MustExec("insert into t1 set a = 1, b = default(c);") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 30 30 40")) - tk.MustExec("insert into t1 set a = 2, b = default(c), c = default(d), d = default(b);") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 30 30 40", "2 30 40 20")) - tk.MustExec("insert into t1 values (2, 3, 4, 5) on duplicate key update b = default(d), c = default(b);") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 30 30 40", "2 40 20 20")) - tk.MustExec("delete from t1") - tk.MustExec("insert into t1 set a = default(b) + default(c) - default(d)") - tk.MustQuery("select * from t1;").Check(testkit.Rows("10 20 30 40")) - tk.MustExec("set @@timestamp = 1321009871") - defer tk.MustExec("set @@timestamp = DEFAULT") - tk.MustQuery("SELECT NOW()").Check(testkit.Rows("2011-11-11 11:11:11")) - tk.MustExec("insert into t2 set a = 1, b = default(c)") - tk.MustExec("insert into t2 set a = 2, c = default(b)") - tk.MustGetErrCode("insert into t2 set a = 3, b = default(a)", errno.ErrNoDefaultForField) - tk.MustExec("insert into t2 set a = 4, b = default(b), c = default(c)") - tk.MustExec("insert into t2 set a = 5, b = default, c = default") - tk.MustExec("insert into t2 set a = 6") - tk.MustQuery("select * from t2").Sort().Check(testkit.Rows( - "1 2000-01-01 00:00:00 2000-01-01 00:00:00", - "2 2011-11-11 11:11:11 2011-11-11 11:11:11", - "4 2011-11-11 11:11:11 2000-01-01 00:00:00", - "5 2011-11-11 11:11:11 2000-01-01 00:00:00", - "6 2011-11-11 11:11:11 2000-01-01 00:00:00")) - // Use `DEFAULT()` in `UPDATE` statement - tk.MustExec("delete from t1;") - tk.MustExec("insert into t1 value (1, 2, 3, 4);") - tk.MustExec("update t1 set a = 1, c = default(b);") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 2 20 4")) - tk.MustExec("insert into t1 value (2, 2, 3, 4);") - tk.MustExec("update t1 set c = default(b), b = default(c) where a = 2;") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 2 20 4", "2 30 20 4")) - tk.MustExec("delete from t1") - tk.MustExec("insert into t1 set a = 10") - tk.MustExec("update t1 set a = 10, b = default(c) + default(d)") - tk.MustQuery("select * from t1;").Check(testkit.Rows("10 70 30 40")) - tk.MustExec("set @@timestamp = 1671747742") - tk.MustExec("update t2 set b = default(c) WHERE a = 6") - tk.MustExec("update t2 set c = default(b) WHERE a = 5") - tk.MustGetErrCode("update t2 set b = default(a) WHERE a = 4", errno.ErrNoDefaultForField) - tk.MustExec("update t2 set b = default(b), c = default(c) WHERE a = 4") - // Non existing row! - tk.MustExec("update t2 set b = default(b), c = default(c) WHERE a = 3") - tk.MustExec("update t2 set b = default, c = default WHERE a = 2") - tk.MustExec("update t2 set b = default(b) WHERE a = 1") - tk.MustQuery("select * from t2;").Sort().Check(testkit.Rows( - "1 2022-12-22 22:22:22 2000-01-01 00:00:00", - "2 2022-12-22 22:22:22 2000-01-01 00:00:00", - "4 2022-12-22 22:22:22 2000-01-01 00:00:00", - "5 2011-11-11 11:11:11 2022-12-22 22:22:22", - "6 2000-01-01 00:00:00 2000-01-01 00:00:00")) - // Use `DEFAULT()` in `REPLACE` statement - tk.MustExec("delete from t1;") - tk.MustExec("insert into t1 value (1, 2, 3, 4);") - tk.MustExec("replace into t1 set a = 1, c = default(b);") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 20 20 40")) - tk.MustExec("insert into t1 value (2, 2, 3, 4);") - tk.MustExec("replace into t1 set a = 2, d = default(b), c = default(d);") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 20 20 40", "2 20 40 20")) - tk.MustExec("delete from t1") - tk.MustExec("insert into t1 set a = 10, c = 3") - tk.MustExec("replace into t1 set a = 10, b = default(c) + default(d)") - tk.MustQuery("select * from t1;").Check(testkit.Rows("10 70 30 40")) - tk.MustExec("replace into t1 set a = 20, d = default(c) + default(b)") - tk.MustQuery("select * from t1;").Check(testkit.Rows("10 70 30 40", "20 20 30 50")) - - // Use `DEFAULT()` in expression of generate columns, issue #12471 - tk.MustExec("DROP TABLE t2") - tk.MustExec("create table t2(a int default 9, b int as (1 + default(a)));") - tk.MustExec("insert into t2 values(1, default);") - tk.MustExec("insert into t2 values(2, default(b))") - tk.MustQuery("select * from t2").Sort().Check(testkit.Rows("1 10", "2 10")) - - // Use `DEFAULT()` with subquery, issue #13390 - tk.MustExec("create table t3(f1 int default 11);") - tk.MustExec("insert into t3 value ();") - tk.MustQuery("select default(f1) from (select * from t3) t1;").Check(testkit.Rows("11")) - tk.MustQuery("select default(f1) from (select * from (select * from t3) t1 ) t1;").Check(testkit.Rows("11")) - - tk.MustExec("create table t4(a int default 4);") - tk.MustExec("insert into t4 value (2);") - tk.MustQuery("select default(c) from (select b as c from (select a as b from t4) t3) t2;").Check(testkit.Rows("4")) - tk.MustGetErrCode("select default(a) from (select a from (select 1 as a) t4) t4;", errno.ErrNoDefaultForField) - - tk.MustExec("drop table t1, t2, t3, t4;") -} - -func TestCreateIndexType(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`CREATE TABLE test_index ( - price int(5) DEFAULT '0' NOT NULL, - area varchar(40) DEFAULT '' NOT NULL, - type varchar(40) DEFAULT '' NOT NULL, - transityes set('a','b'), - shopsyes enum('Y','N') DEFAULT 'Y' NOT NULL, - schoolsyes enum('Y','N') DEFAULT 'Y' NOT NULL, - petsyes enum('Y','N') DEFAULT 'Y' NOT NULL, - KEY price (price,area,type,transityes,shopsyes,schoolsyes,petsyes));`) -} - -func TestAlterPrimaryKey(t *testing.T) { - store, clean := testkit.CreateMockStoreWithSchemaLease(t, dbTestLease) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table test_add_pk(a int, b int unsigned , c varchar(255) default 'abc', d int as (a+b), e int as (a+1) stored, index idx(b))") - - // for generated columns - tk.MustGetErrCode("alter table test_add_pk add primary key(d);", errno.ErrUnsupportedOnGeneratedColumn) - // The primary key name is the same as the existing index name. - tk.MustExec("alter table test_add_pk add primary key idx(e)") - tk.MustExec("drop index `primary` on test_add_pk") - - // for describing table - tk.MustExec("create table test_add_pk1(a int, index idx(a))") - tk.MustQuery("desc test_add_pk1").Check(testutil.RowsWithSep(",", `a,int(11),YES,MUL,,`)) - tk.MustExec("alter table test_add_pk1 add primary key idx(a)") - tk.MustQuery("desc test_add_pk1").Check(testutil.RowsWithSep(",", `a,int(11),NO,PRI,,`)) - tk.MustExec("alter table test_add_pk1 drop primary key") - tk.MustQuery("desc test_add_pk1").Check(testutil.RowsWithSep(",", `a,int(11),NO,MUL,,`)) - tk.MustExec("create table test_add_pk2(a int, b int, index idx(a))") - tk.MustExec("alter table test_add_pk2 add primary key idx(a, b)") - tk.MustQuery("desc test_add_pk2").Check(testutil.RowsWithSep(",", ""+ - "a int(11) NO PRI ]\n"+ - "[b int(11) NO PRI ")) - tk.MustQuery("show create table test_add_pk2").Check(testutil.RowsWithSep("|", ""+ - "test_add_pk2 CREATE TABLE `test_add_pk2` (\n"+ - " `a` int(11) NOT NULL,\n"+ - " `b` int(11) NOT NULL,\n"+ - " KEY `idx` (`a`),\n"+ - " PRIMARY KEY (`a`,`b`) /*T![clustered_index] NONCLUSTERED */\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) - tk.MustExec("alter table test_add_pk2 drop primary key") - tk.MustQuery("desc test_add_pk2").Check(testutil.RowsWithSep(",", ""+ - "a int(11) NO MUL ]\n"+ - "[b int(11) NO ")) - - // Check if the primary key exists before checking the table's pkIsHandle. - tk.MustGetErrCode("alter table test_add_pk drop primary key", errno.ErrCantDropFieldOrKey) - - // for the limit of name - validName := strings.Repeat("a", mysql.MaxIndexIdentifierLen) - invalidName := strings.Repeat("b", mysql.MaxIndexIdentifierLen+1) - tk.MustGetErrCode("alter table test_add_pk add primary key "+invalidName+"(a)", errno.ErrTooLongIdent) - // for valid name - tk.MustExec("alter table test_add_pk add primary key " + validName + "(a)") - // for multiple primary key - tk.MustGetErrCode("alter table test_add_pk add primary key (a)", errno.ErrMultiplePriKey) - tk.MustExec("alter table test_add_pk drop primary key") - // for not existing primary key - tk.MustGetErrCode("alter table test_add_pk drop primary key", errno.ErrCantDropFieldOrKey) - tk.MustGetErrCode("drop index `primary` on test_add_pk", errno.ErrCantDropFieldOrKey) - - // for too many key parts specified - tk.MustGetErrCode("alter table test_add_pk add primary key idx_test(f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17);", - errno.ErrTooManyKeyParts) - - // for the limit of comment's length - validComment := "'" + strings.Repeat("a", ddl.MaxCommentLength) + "'" - invalidComment := "'" + strings.Repeat("b", ddl.MaxCommentLength+1) + "'" - tk.MustGetErrCode("alter table test_add_pk add primary key(a) comment "+invalidComment, errno.ErrTooLongIndexComment) - // for empty sql_mode - r := tk.MustQuery("select @@sql_mode") - sqlMode := r.Rows()[0][0].(string) - tk.MustExec("set @@sql_mode=''") - tk.MustExec("alter table test_add_pk add primary key(a) comment " + invalidComment) - require.Equal(t, uint16(1), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1688|Comment for index 'PRIMARY' is too long (max = 1024)")) - tk.MustExec("set @@sql_mode= '" + sqlMode + "'") - tk.MustExec("alter table test_add_pk drop primary key") - // for valid comment - tk.MustExec("alter table test_add_pk add primary key(a, b, c) comment " + validComment) - require.NoError(t, tk.Session().NewTxn(context.Background())) - tbl := external.GetTableByName(t, tk, "test", "test_add_pk") - col1Flag := tbl.Cols()[0].Flag - col2Flag := tbl.Cols()[1].Flag - col3Flag := tbl.Cols()[2].Flag - require.True(t, mysql.HasNotNullFlag(col1Flag) && !mysql.HasPreventNullInsertFlag(col1Flag)) - require.True(t, mysql.HasNotNullFlag(col2Flag) && !mysql.HasPreventNullInsertFlag(col2Flag) && mysql.HasUnsignedFlag(col2Flag)) - require.True(t, mysql.HasNotNullFlag(col3Flag) && !mysql.HasPreventNullInsertFlag(col3Flag) && !mysql.HasNoDefaultValueFlag(col3Flag)) - tk.MustExec("alter table test_add_pk drop primary key") - - // for null values in primary key - tk.MustExec("drop table test_add_pk") - tk.MustExec("create table test_add_pk(a int, b int unsigned , c varchar(255) default 'abc', index idx(b))") - tk.MustExec("insert into test_add_pk set a = 0, b = 0, c = 0") - tk.MustExec("insert into test_add_pk set a = 1") - tk.MustGetErrCode("alter table test_add_pk add primary key (b)", errno.ErrInvalidUseOfNull) - tk.MustExec("insert into test_add_pk set a = 2, b = 2") - tk.MustGetErrCode("alter table test_add_pk add primary key (a, b)", errno.ErrInvalidUseOfNull) - tk.MustExec("insert into test_add_pk set a = 3, c = 3") - tk.MustGetErrCode("alter table test_add_pk add primary key (c, b, a)", errno.ErrInvalidUseOfNull) -} - -func TestParallelDropSchemaAndDropTable(t *testing.T) { - store, dom, clean := testkit.CreateMockStoreAndDomainWithSchemaLease(t, dbTestLease) - defer clean() - - tk1 := testkit.NewTestKit(t, store) - tk1.MustExec("create database if not exists test_drop_schema_table") - tk1.MustExec("use test_drop_schema_table") - tk1.MustExec("create table t(c1 int, c2 int)") - var checkErr error - hook := &ddl.TestDDLCallback{Do: dom} - - dbInfo := external.GetSchemaByName(t, tk1, "test_drop_schema_table") - done := false - var wg sync.WaitGroup - tk2 := testkit.NewTestKit(t, store) - tk2.MustExec("use test_drop_schema_table") - hook.OnJobUpdatedExported = func(job *model.Job) { - if job.Type == model.ActionDropSchema && job.State == model.JobStateRunning && - job.SchemaState == model.StateWriteOnly && job.SchemaID == dbInfo.ID && done == false { - wg.Add(1) - done = true - go func() { - _, checkErr = tk2.Exec("drop table t") - wg.Done() - }() - time.Sleep(5 * time.Millisecond) - } - } - originalHook := dom.DDL().GetHook() - dom.DDL().SetHook(hook) - tk1.MustExec("drop database test_drop_schema_table") - dom.DDL().SetHook(originalHook) - wg.Wait() - require.True(t, done) - require.Error(t, checkErr) - // There are two possible assert result because: - // 1: If drop-database is finished before drop-table being put into the ddl job queue, it will return "unknown table" error directly in the previous check. - // 2: If drop-table has passed the previous check and been put into the ddl job queue, then drop-database finished, it will return schema change error. - assertRes := checkErr.Error() == "[domain:8028]Information schema is changed during the execution of the"+ - " statement(for example, table definition may be updated by other DDL ran in parallel). "+ - "If you see this error often, try increasing `tidb_max_delta_schema_count`. [try again later]" || - checkErr.Error() == "[schema:1051]Unknown table 'test_drop_schema_table.t'" - require.True(t, assertRes) - - // Below behaviour is use to mock query `curl "http://$IP:10080/tiflash/replica"` - fn := func(jobs []*model.Job) (bool, error) { - return executor.GetDropOrTruncateTableInfoFromJobs(jobs, 0, dom, func(job *model.Job, info *model.TableInfo) (bool, error) { - return false, nil - }) - } - require.NoError(t, tk1.Session().NewTxn(context.Background())) - txn, err := tk1.Session().Txn(true) - require.NoError(t, err) - require.NoError(t, admin.IterHistoryDDLJobs(txn, fn)) -} - -// TestCancelDropIndex tests cancel ddl job which type is drop primary key. -func TestCancelDropPrimaryKey(t *testing.T) { - store, dom, clean := testkit.CreateMockStoreAndDomainWithSchemaLease(t, dbTestLease) - defer clean() - idxName := "primary" - addIdxSQL := "alter table t add primary key idx_c2 (c2);" - dropIdxSQL := "alter table t drop primary key;" - testCancelDropIndex(t, store, dom.DDL(), idxName, addIdxSQL, dropIdxSQL, dom) -} - -// TestCancelDropIndex tests cancel ddl job which type is drop index. -func TestCancelDropIndex(t *testing.T) { - store, dom, clean := testkit.CreateMockStoreAndDomainWithSchemaLease(t, dbTestLease) - defer clean() - idxName := "idx_c2" - addIdxSQL := "alter table t add index idx_c2 (c2);" - dropIdxSQL := "alter table t drop index idx_c2;" - testCancelDropIndex(t, store, dom.DDL(), idxName, addIdxSQL, dropIdxSQL, dom) -} - -// testCancelDropIndex tests cancel ddl job which type is drop index. -func testCancelDropIndex(t *testing.T, store kv.Storage, d ddl.DDL, idxName, addIdxSQL, dropIdxSQL string, dom *domain.Domain) { - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(c1 int, c2 int)") - defer tk.MustExec("drop table t;") - for i := 0; i < 5; i++ { - tk.MustExec("insert into t values (?, ?)", i, i) - } - tests := []struct { - needAddIndex bool - jobState model.JobState - JobSchemaState model.SchemaState - cancelSucc bool - }{ - // model.JobStateNone means the jobs is canceled before the first run. - // if we cancel successfully, we need to set needAddIndex to false in the next test case. Otherwise, set needAddIndex to true. - {true, model.JobStateNone, model.StateNone, true}, - {false, model.JobStateRunning, model.StateWriteOnly, false}, - {true, model.JobStateRunning, model.StateDeleteOnly, false}, - {true, model.JobStateRunning, model.StateDeleteReorganization, false}, - } - var checkErr error - hook := &ddl.TestDDLCallback{Do: dom} - var jobID int64 - test := &tests[0] - hook.OnJobRunBeforeExported = func(job *model.Job) { - if (job.Type == model.ActionDropIndex || job.Type == model.ActionDropPrimaryKey) && - job.State == test.jobState && job.SchemaState == test.JobSchemaState { - jobID = job.ID - jobIDs := []int64{job.ID} - hookCtx := mock.NewContext() - hookCtx.Store = store - err := hookCtx.NewTxn(context.TODO()) - if err != nil { - checkErr = errors.Trace(err) - return - } - txn, err := hookCtx.Txn(true) - if err != nil { - checkErr = errors.Trace(err) - return - } - - errs, err := admin.CancelJobs(txn, jobIDs) - if err != nil { - checkErr = errors.Trace(err) - return - } - if errs[0] != nil { - checkErr = errors.Trace(errs[0]) - return - } - checkErr = txn.Commit(context.Background()) - } - } - originalHook := d.GetHook() - d.SetHook(hook) - for i := range tests { - test = &tests[i] - if test.needAddIndex { - tk.MustExec(addIdxSQL) - } - err := tk.ExecToErr(dropIdxSQL) - tbl := external.GetTableByName(t, tk, "test", "t") - indexInfo := tbl.Meta().FindIndexByName(idxName) - if test.cancelSucc { - require.NoError(t, checkErr) - require.EqualError(t, err, "[ddl:8214]Cancelled DDL job") - require.NotNil(t, indexInfo) - require.Equal(t, model.StatePublic, indexInfo.State) - } else { - err1 := admin.ErrCannotCancelDDLJob.GenWithStackByArgs(jobID) - require.NoError(t, err) - require.EqualError(t, checkErr, err1.Error()) - require.Nil(t, indexInfo) - } - } - d.SetHook(originalHook) - tk.MustExec(addIdxSQL) - tk.MustExec(dropIdxSQL) -} - -// TestCancelTruncateTable tests cancel ddl job which type is truncate table. -func TestCancelTruncateTable(t *testing.T) { - store, dom, clean := testkit.CreateMockStoreAndDomainWithSchemaLease(t, dbTestLease) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t(c1 int, c2 int)") - defer tk.MustExec("drop table t;") - var checkErr error - hook := &ddl.TestDDLCallback{Do: dom} - hook.OnJobRunBeforeExported = func(job *model.Job) { - if job.Type == model.ActionTruncateTable && job.State == model.JobStateNone { - jobIDs := []int64{job.ID} - hookCtx := mock.NewContext() - hookCtx.Store = store - err := hookCtx.NewTxn(context.Background()) - if err != nil { - checkErr = errors.Trace(err) - return - } - txn, err := hookCtx.Txn(true) - if err != nil { - checkErr = errors.Trace(err) - return - } - errs, err := admin.CancelJobs(txn, jobIDs) - if err != nil { - checkErr = errors.Trace(err) - return - } - if errs[0] != nil { - checkErr = errors.Trace(errs[0]) - return - } - checkErr = txn.Commit(context.Background()) - } - } - dom.DDL().SetHook(hook) - err := tk.ExecToErr("truncate table t") - require.NoError(t, checkErr) - require.EqualError(t, err, "[ddl:8214]Cancelled DDL job") + var tableID int64 + rs := tk.MustQuery("select TIDB_TABLE_ID from information_schema.tables where table_name='t0' and table_schema='test';") + tableIDi, _ := strconv.Atoi(rs.Rows()[0][0].(string)) + tableID = int64(tableIDi) + + tbl, exist := dom.InfoSchema().TableByID(tableID) + require.True(t, exist) + + require.Equal(t, tbl.Cols()[0].Tp, mysql.TypeTinyBlob) + require.Equal(t, tbl.Cols()[0].Flen, 255) + require.Equal(t, tbl.Cols()[1].Tp, mysql.TypeBlob) + require.Equal(t, tbl.Cols()[1].Flen, 65535) + require.Equal(t, tbl.Cols()[2].Tp, mysql.TypeMediumBlob) + require.Equal(t, tbl.Cols()[2].Flen, 16777215) + require.Equal(t, tbl.Cols()[3].Tp, mysql.TypeLongBlob) + require.Equal(t, tbl.Cols()[3].Flen, 4294967295) } func TestAddExpressionIndexRollback(t *testing.T) { diff --git a/ddl/ddl.go b/ddl/ddl.go index 5ee60425a52f4..bc81cebd6a383 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -181,6 +181,8 @@ type DDL interface { GetHook() Callback // SetHook sets the hook. SetHook(h Callback) + // DoDDLJob does the DDL job, it's exported for test. + DoDDLJob(ctx sessionctx.Context, job *model.Job) error } type limitJobTask struct { @@ -611,6 +613,7 @@ func recordLastDDLInfo(ctx sessionctx.Context, job *model.Job) { ctx.GetSessionVars().LastDDLInfo.SeqNum = job.SeqNum } +// DoDDLJob will return func checkHistoryJobInTest(ctx sessionctx.Context, historyJob *model.Job) { if !(flag.Lookup("test.v") != nil || flag.Lookup("check.v") != nil) { return @@ -682,11 +685,11 @@ func setDDLJobQuery(ctx sessionctx.Context, job *model.Job) { } } -// doDDLJob will return +// DoDDLJob will return // - nil: found in history DDL job and no job error // - context.Cancel: job has been sent to worker, but not found in history DDL job before cancel // - other: found in history DDL job and return that job error -func (d *ddl) doDDLJob(ctx sessionctx.Context, job *model.Job) error { +func (d *ddl) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { // Get a global job ID and put the DDL job in the queue. setDDLJobQuery(ctx, job) task := &limitJobTask{job, make(chan error)} @@ -731,7 +734,7 @@ func (d *ddl) doDDLJob(ctx sessionctx.Context, job *model.Job) error { i++ ticker = updateTickerInterval(ticker, 10*d.lease, job, i) case <-d.ctx.Done(): - logutil.BgLogger().Info("[ddl] doDDLJob will quit because context done") + logutil.BgLogger().Info("[ddl] DoDDLJob will quit because context done") return context.Canceled } diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index b546dfc12c89b..43b0bb391d023 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -145,7 +145,7 @@ func (d *ddl) CreateSchemaWithInfo( Args: []interface{}{dbInfo}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -175,7 +175,7 @@ func (d *ddl) ModifySchemaCharsetAndCollate(ctx sessionctx.Context, stmt *ast.Al BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{toCharset, toCollate}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -205,7 +205,7 @@ func (d *ddl) ModifySchemaDefaultPlacement(ctx sessionctx.Context, stmt *ast.Alt BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{placementPolicyRef}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -363,7 +363,7 @@ func (d *ddl) ModifySchemaSetTiFlashReplica(sctx sessionctx.Context, stmt *ast.A BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{*tiflashReplica}, } - err := d.doDDLJob(sctx, job) + err := d.DoDDLJob(sctx, job) err = d.callHookOnChanged(err) if err != nil { oneFail = tbl.ID @@ -417,7 +417,7 @@ func (d *ddl) AlterTablePlacement(ctx sessionctx.Context, ident ast.Ident, place Args: []interface{}{placementPolicyRef}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -532,7 +532,7 @@ func (d *ddl) DropSchema(ctx sessionctx.Context, schema model.CIStr) (err error) BinlogInfo: &model.HistoryInfo{}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) if err != nil { return errors.Trace(err) @@ -2325,7 +2325,7 @@ func (d *ddl) CreateTableWithInfo( return nil } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if err != nil { // table exists, but if_not_exists flags is true, so we ignore this error. if onExist == OnExistIgnore && infoschema.ErrTableExists.Equal(err) { @@ -2418,7 +2418,7 @@ func (d *ddl) BatchCreateTableWithInfo(ctx sessionctx.Context, } jobs.Args = append(jobs.Args, args) - err = d.doDDLJob(ctx, jobs) + err = d.DoDDLJob(ctx, jobs) if err != nil { // table exists, but if_not_exists flags is true, so we ignore this error. if onExist == OnExistIgnore && infoschema.ErrTableExists.Equal(err) { @@ -2476,7 +2476,7 @@ func (d *ddl) CreatePlacementPolicyWithInfo(ctx sessionctx.Context, policy *mode BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{policy, onExist == OnExistReplace}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -2539,7 +2539,7 @@ func (d *ddl) RecoverTable(ctx sessionctx.Context, recoverInfo *RecoverInfo) (er recoverInfo.SnapshotTS, recoverTableCheckFlagNone, recoverInfo.AutoIDs.RandomID, recoverInfo.OldSchemaName, recoverInfo.OldTableName}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -3260,7 +3260,7 @@ func (d *ddl) RebaseAutoID(ctx sessionctx.Context, ident ast.Ident, newBase int6 BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{newBase, force}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -3310,7 +3310,7 @@ func (d *ddl) ShardRowID(ctx sessionctx.Context, tableIdent ast.Ident, uVal uint BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{uVal}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -3476,7 +3476,7 @@ func (d *ddl) AddColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTab Args: []interface{}{col, spec.Position, 0}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) // column exists, but if_not_exists flags is true, so we ignore this error. if infoschema.ErrColumnExists.Equal(err) && spec.IfNotExists { ctx.GetSessionVars().StmtCtx.AppendNote(err) @@ -3552,7 +3552,7 @@ func (d *ddl) AddColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alte Args: []interface{}{columns, positions, offsets, ifNotExists}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if err != nil { return errors.Trace(err) } @@ -3613,7 +3613,7 @@ func (d *ddl) AddTablePartitions(ctx sessionctx.Context, ident ast.Ident, spec * Args: []interface{}{partInfo}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if dbterror.ErrSameNamePartition.Equal(err) && spec.IfNotExists { ctx.GetSessionVars().StmtCtx.AppendNote(err) return nil @@ -3708,7 +3708,7 @@ func (d *ddl) TruncateTablePartition(ctx sessionctx.Context, ident ast.Ident, sp Args: []interface{}{pids}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if err != nil { return errors.Trace(err) } @@ -3753,7 +3753,7 @@ func (d *ddl) DropTablePartition(ctx sessionctx.Context, ident ast.Ident, spec * Args: []interface{}{partNames}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if err != nil { if dbterror.ErrDropPartitionNonExistent.Equal(err) && spec.IfExists { ctx.GetSessionVars().StmtCtx.AppendNote(err) @@ -3948,7 +3948,7 @@ func (d *ddl) ExchangeTablePartition(ctx sessionctx.Context, ident ast.Ident, sp Args: []interface{}{defID, ptSchema.ID, ptMeta.ID, partName, spec.WithValidation}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if err != nil { return errors.Trace(err) } @@ -3990,7 +3990,7 @@ func (d *ddl) DropColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTa Args: []interface{}{colName}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) // column not exists, but if_exists flags is true, so we ignore this error. if dbterror.ErrCantDropFieldOrKey.Equal(err) && spec.IfExists { ctx.GetSessionVars().StmtCtx.AppendNote(err) @@ -4067,7 +4067,7 @@ func (d *ddl) DropColumns(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt Args: []interface{}{colNames, ifExists}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if err != nil { return errors.Trace(err) } @@ -4660,7 +4660,7 @@ func (d *ddl) ChangeColumn(ctx context.Context, sctx sessionctx.Context, ident a return errors.Trace(err) } - err = d.doDDLJob(sctx, job) + err = d.DoDDLJob(sctx, job) // column not exists, but if_exists flags is true, so we ignore this error. if infoschema.ErrColumnNotExists.Equal(err) && spec.IfExists { sctx.GetSessionVars().StmtCtx.AppendNote(err) @@ -4735,7 +4735,7 @@ func (d *ddl) RenameColumn(ctx sessionctx.Context, ident ast.Ident, spec *ast.Al }, Args: []interface{}{&newCol, oldColName, spec.Position, 0, 0}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -4761,7 +4761,7 @@ func (d *ddl) ModifyColumn(ctx context.Context, sctx sessionctx.Context, ident a return errors.Trace(err) } - err = d.doDDLJob(sctx, job) + err = d.DoDDLJob(sctx, job) // column not exists, but if_exists flags is true, so we ignore this error. if infoschema.ErrColumnNotExists.Equal(err) && spec.IfExists { sctx.GetSessionVars().StmtCtx.AppendNote(err) @@ -4821,7 +4821,7 @@ func (d *ddl) AlterColumn(ctx sessionctx.Context, ident ast.Ident, spec *ast.Alt Args: []interface{}{col}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -4848,7 +4848,7 @@ func (d *ddl) AlterTableComment(ctx sessionctx.Context, ident ast.Ident, spec *a Args: []interface{}{spec.Comment}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -4869,7 +4869,7 @@ func (d *ddl) AlterTableAutoIDCache(ctx sessionctx.Context, ident ast.Ident, new Args: []interface{}{newCache}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -4920,7 +4920,7 @@ func (d *ddl) AlterTableCharsetAndCollate(ctx sessionctx.Context, ident ast.Iden BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{toCharset, toCollate, needsOverwriteCols}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -4977,7 +4977,7 @@ func (d *ddl) AlterTableSetTiFlashReplica(ctx sessionctx.Context, ident ast.Iden BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{*replicaInfo}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -5086,7 +5086,7 @@ func (d *ddl) UpdateTableReplicaInfo(ctx sessionctx.Context, physicalID int64, a BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{available, physicalID}, } - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -5192,7 +5192,7 @@ func (d *ddl) RenameIndex(ctx sessionctx.Context, ident ast.Ident, spec *ast.Alt Args: []interface{}{spec.FromKey, spec.ToKey}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -5222,7 +5222,7 @@ func (d *ddl) DropTable(ctx sessionctx.Context, ti ast.Ident) (err error) { BinlogInfo: &model.HistoryInfo{}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) if err != nil { return errors.Trace(err) @@ -5255,7 +5255,7 @@ func (d *ddl) DropView(ctx sessionctx.Context, ti ast.Ident) (err error) { BinlogInfo: &model.HistoryInfo{}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -5292,7 +5292,7 @@ func (d *ddl) TruncateTable(ctx sessionctx.Context, ti ast.Ident) error { // but the session was killed before return. ctx.AddTableLock([]model.TableLockTpInfo{{SchemaID: schema.ID, TableID: newTableID, Tp: tb.Meta().Lock.Tp}}) } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) if err != nil { if config.TableLockEnabled() { @@ -5340,7 +5340,7 @@ func (d *ddl) RenameTable(ctx sessionctx.Context, oldIdent, newIdent ast.Ident, Args: []interface{}{schemas[0].ID, newIdent.Name, schemas[0].Name}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -5386,7 +5386,7 @@ func (d *ddl) RenameTables(ctx sessionctx.Context, oldIdents, newIdents []ast.Id Args: []interface{}{oldSchemaIDs, newSchemaIDs, tableNames, tableIDs, oldSchemaNames}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -5575,7 +5575,7 @@ func (d *ddl) CreatePrimaryKey(ctx sessionctx.Context, ti ast.Ident, indexName m Priority: ctx.GetSessionVars().DDLReorgPriority, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -5765,7 +5765,7 @@ func (d *ddl) CreateIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.Inde Priority: ctx.GetSessionVars().DDLReorgPriority, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) // key exists, but if_not_exists flags is true, so we ignore this error. if dbterror.ErrDupKeyName.Equal(err) && ifNotExists { ctx.GetSessionVars().StmtCtx.AppendNote(err) @@ -5885,7 +5885,7 @@ func (d *ddl) CreateForeignKey(ctx sessionctx.Context, ti ast.Ident, fkName mode Args: []interface{}{fkInfo}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -5911,7 +5911,7 @@ func (d *ddl) DropForeignKey(ctx sessionctx.Context, ti ast.Ident, fkName model. Args: []interface{}{fkName}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -5966,7 +5966,7 @@ func (d *ddl) DropIndex(ctx sessionctx.Context, ti ast.Ident, indexName model.CI Args: []interface{}{indexName}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) // index not exists, but if_exists flags is true, so we ignore this error. if dbterror.ErrCantDropFieldOrKey.Equal(err) && ifExists { ctx.GetSessionVars().StmtCtx.AppendNote(err) @@ -6019,7 +6019,7 @@ func (d *ddl) DropIndexes(ctx sessionctx.Context, ti ast.Ident, specs []*ast.Alt Args: []interface{}{indexNames, ifExists}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -6220,7 +6220,7 @@ func (d *ddl) LockTables(ctx sessionctx.Context, stmt *ast.LockTablesStmt) error } // AddTableLock here is avoiding this job was executed successfully but the session was killed before return. ctx.AddTableLock(lockTables) - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) if err == nil { ctx.ReleaseTableLocks(unlockTables) ctx.AddTableLock(lockTables) @@ -6249,7 +6249,7 @@ func (d *ddl) UnlockTables(ctx sessionctx.Context, unlockTables []model.TableLoc Args: []interface{}{arg}, } - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) if err == nil { ctx.ReleaseAllTableLocks() } @@ -6279,7 +6279,7 @@ func (d *ddl) CleanDeadTableLock(unlockTables []model.TableLockTpInfo, se model. return err } defer d.sessPool.put(ctx) - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -6340,7 +6340,7 @@ func (d *ddl) CleanupTableLock(ctx sessionctx.Context, tables []*ast.TableName) BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{arg}, } - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) if err == nil { ctx.ReleaseTableLocks(cleanupTables) } @@ -6425,7 +6425,7 @@ func (d *ddl) RepairTable(ctx sessionctx.Context, table *ast.TableName, createSt BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{newTableInfo}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if err == nil { // Remove the old TableInfo from repairInfo before domain reload. domainutil.RepairInfo.RemoveFromRepairInfo(oldDBInfo.Name.L, oldTableInfo.Name.L) @@ -6504,7 +6504,7 @@ func (d *ddl) AlterSequence(ctx sessionctx.Context, stmt *ast.AlterSequenceStmt) Args: []interface{}{ident, stmt.SeqOptions}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -6532,7 +6532,7 @@ func (d *ddl) DropSequence(ctx sessionctx.Context, ti ast.Ident, ifExists bool) BinlogInfo: &model.HistoryInfo{}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -6565,7 +6565,7 @@ func (d *ddl) AlterIndexVisibility(ctx sessionctx.Context, ident ast.Ident, inde Args: []interface{}{indexName, invisible}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -6594,7 +6594,7 @@ func (d *ddl) AlterTableAttributes(ctx sessionctx.Context, ident ast.Ident, spec Args: []interface{}{rule}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if err != nil { return errors.Trace(err) } @@ -6635,7 +6635,7 @@ func (d *ddl) AlterTablePartitionAttributes(ctx sessionctx.Context, ident ast.Id Args: []interface{}{partitionID, rule}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if err != nil { return errors.Trace(err) } @@ -6703,7 +6703,7 @@ func (d *ddl) AlterTablePartitionPlacement(ctx sessionctx.Context, tableIdent as Args: []interface{}{partitionID, policyRefInfo}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -6882,7 +6882,7 @@ func (d *ddl) DropPlacementPolicy(ctx sessionctx.Context, stmt *ast.DropPlacemen BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{policyName}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -6916,7 +6916,7 @@ func (d *ddl) AlterPlacementPolicy(ctx sessionctx.Context, stmt *ast.AlterPlacem BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{newPolicyInfo}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(err) return errors.Trace(err) } @@ -6970,7 +6970,7 @@ func (d *ddl) AlterTableCache(ctx sessionctx.Context, ti ast.Ident) (err error) Args: []interface{}{}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) return d.callHookOnChanged(err) } @@ -7026,7 +7026,7 @@ func (d *ddl) AlterTableNoCache(ctx sessionctx.Context, ti ast.Ident) (err error Args: []interface{}{}, } - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) return d.callHookOnChanged(err) } diff --git a/ddl/ddl_error_test.go b/ddl/ddl_error_test.go new file mode 100644 index 0000000000000..b771ea273014d --- /dev/null +++ b/ddl/ddl_error_test.go @@ -0,0 +1,188 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package ddl_test + +import ( + "testing" + + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/errno" + "github.com/pingcap/tidb/testkit" + "github.com/stretchr/testify/require" +) + +// This test file contains tests that test the expected or unexpected DDL error. +// For expected error, we use SQL to check it. +// For unexpected error, we mock a SQL job to check it. + +func TestTableError(t *testing.T) { + store, clean := testkit.CreateMockStoreWithSchemaLease(t, testLease) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + tk.MustExec("create table testDrop(a int)") + // Schema ID is wrong, so dropping table is failed. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId", `return(-1)`)) + _, err := tk.Exec("drop table testDrop") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId")) + + // Table ID is wrong, so dropping table is failed. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/MockModifyJobTableId", `return(-1)`)) + _, err = tk.Exec("drop table testDrop") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/MockModifyJobTableId")) + + // Args is wrong, so creating table is failed. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/MockModifyJobArg", `return(true)`)) + _, err = tk.Exec("create table test.t1(a int)") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/MockModifyJobArg")) + + // Table exists, so creating table is failed. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId", `return(-1)`)) + _, err = tk.Exec("create table test.t1(a int)") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId")) + // Table exists, so creating table is failed. + tk.MustExec("create table test.t2(a int)") + tk.MustGetErrCode("create table test.t2(a int)", errno.ErrTableExists) +} + +func TestViewError(t *testing.T) { + store, clean := testkit.CreateMockStoreWithSchemaLease(t, testLease) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int)") + + // Args is wrong, so creating view is failed. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/MockModifyJobArg", `return(true)`)) + _, err := tk.Exec("create view v as select * from t") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/MockModifyJobArg")) +} + +func TestForeignKeyError(t *testing.T) { + store, clean := testkit.CreateMockStoreWithSchemaLease(t, testLease) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int)") + tk.MustExec("create table t1 (a int, FOREIGN KEY fk(a) REFERENCES t(a))") + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId", `return(-1)`)) + _, err := tk.Exec("alter table t1 add foreign key idx(a) REFERENCES t(a)") + require.Error(t, err) + _, err = tk.Exec("alter table t1 drop index fk") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId")) +} + +func TestIndexError(t *testing.T) { + store, clean := testkit.CreateMockStoreWithSchemaLease(t, testLease) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int)") + tk.MustExec("alter table t add index a(a)") + + // Schema ID is wrong. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId", `return(-1)`)) + _, err := tk.Exec("alter table t add index idx(a)") + require.Error(t, err) + _, err = tk.Exec("alter table t1 drop a") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId")) + + // for adding index + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/MockModifyJobArg", `return(true)`)) + _, err = tk.Exec("alter table t add index idx(a)") + require.Error(t, err) + _, err = tk.Exec("alter table t drop index a") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/MockModifyJobArg")) +} + +func TestColumnError(t *testing.T) { + store, clean := testkit.CreateMockStoreWithSchemaLease(t, testLease) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int, aa int, ab int)") + tk.MustExec("alter table t add index a(a)") + + // Invalid schema ID. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId", `return(-1)`)) + _, err := tk.Exec("alter table t add column ta int") + require.Error(t, err) + _, err = tk.Exec("alter table t drop column aa") + require.Error(t, err) + _, err = tk.Exec("alter table t drop column aa") + require.Error(t, err) + _, err = tk.Exec("alter table t add column ta int, add column tb int") + require.Error(t, err) + _, err = tk.Exec("alter table t drop column aa, drop column ab") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId")) + + // Invalid table ID. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/MockModifyJobTableId", `return(-1)`)) + _, err = tk.Exec("alter table t add column ta int") + require.Error(t, err) + _, err = tk.Exec("alter table t drop column aa") + require.Error(t, err) + _, err = tk.Exec("alter table t drop column aa") + require.Error(t, err) + _, err = tk.Exec("alter table t add column ta int, add column tb int") + require.Error(t, err) + _, err = tk.Exec("alter table t drop column aa, drop column ab") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/MockModifyJobTableId")) + + // Invalid argument. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/MockModifyJobArg", `return(true)`)) + _, err = tk.Exec("alter table t add column ta int") + require.Error(t, err) + _, err = tk.Exec("alter table t drop column aa") + require.Error(t, err) + _, err = tk.Exec("alter table t drop column aa") + require.Error(t, err) + _, err = tk.Exec("alter table t add column ta int, add column tb int") + require.Error(t, err) + _, err = tk.Exec("alter table t drop column aa, drop column ab") + require.Error(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/MockModifyJobArg")) + + tk.MustGetErrCode("alter table t add column c int after c5", errno.ErrBadField) + tk.MustGetErrCode("alter table t drop column c5", errno.ErrCantDropFieldOrKey) + tk.MustGetErrCode("alter table t add column c int after c5, add column d int", errno.ErrBadField) + tk.MustGetErrCode("alter table t drop column ab, drop column c5", errno.ErrCantDropFieldOrKey) +} + +func TestCreateDatabaseError(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId", `return(-1)`)) + tk.MustExec("create database db1;") + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockModifyJobSchemaId")) +} diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index fe6791d4a8dd3..6389137b30109 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -22,12 +22,15 @@ import ( "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" + "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/mock" "github.com/stretchr/testify/require" ) @@ -149,7 +152,7 @@ func testCreatePrimaryKey(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo * job := buildCreateIdxJob(dbInfo, tblInfo, true, "primary", colName) job.Type = model.ActionAddPrimaryKey ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) @@ -159,7 +162,7 @@ func testCreatePrimaryKey(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo * func testCreateIndex(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job { job := buildCreateIdxJob(dbInfo, tblInfo, unique, indexName, colName) ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) @@ -174,8 +177,9 @@ func testAddColumn(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.D Args: args, BinlogInfo: &model.HistoryInfo{}, } + ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) @@ -190,8 +194,9 @@ func testAddColumns(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model. Args: args, BinlogInfo: &model.HistoryInfo{}, } + ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) @@ -214,24 +219,15 @@ func buildDropIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName s func testDropIndex(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job { job := buildDropIdxJob(dbInfo, tblInfo, indexName) + ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) return job } -func buildRebaseAutoIDJobJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, newBaseID int64) *model.Job { - return &model.Job{ - SchemaID: dbInfo.ID, - TableID: tblInfo.ID, - Type: model.ActionRebaseAutoID, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{newBaseID}, - } -} - func TestGetIntervalFromPolicy(t *testing.T) { policy := []time.Duration{ 1 * time.Second, @@ -258,3 +254,257 @@ func TestGetIntervalFromPolicy(t *testing.T) { require.Equal(t, val, 2*time.Second) require.False(t, changed) } + +func colDefStrToFieldType(t *testing.T, str string, ctx sessionctx.Context) *types.FieldType { + sqlA := "alter table t modify column a " + str + stmt, err := parser.New().ParseOneStmt(sqlA, "", "") + require.NoError(t, err) + colDef := stmt.(*ast.AlterTableStmt).Specs[0].NewColumns[0] + chs, coll := charset.GetDefaultCharsetAndCollate() + col, _, err := buildColumnAndConstraint(ctx, 0, colDef, nil, chs, coll) + require.NoError(t, err) + return &col.FieldType +} + +func TestModifyColumn(t *testing.T) { + ctx := mock.NewContext() + tests := []struct { + origin string + to string + err error + }{ + {"int", "bigint", nil}, + {"int", "int unsigned", nil}, + {"varchar(10)", "text", nil}, + {"varbinary(10)", "blob", nil}, + {"text", "blob", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from utf8mb4 to binary")}, + {"varchar(10)", "varchar(8)", nil}, + {"varchar(10)", "varchar(11)", nil}, + {"varchar(10) character set utf8 collate utf8_bin", "varchar(10) character set utf8", nil}, + {"decimal(2,1)", "decimal(3,2)", nil}, + {"decimal(2,1)", "decimal(2,2)", nil}, + {"decimal(2,1)", "decimal(2,1)", nil}, + {"decimal(2,1)", "int", nil}, + {"decimal", "int", nil}, + {"decimal(2,1)", "bigint", nil}, + {"int", "varchar(10) character set gbk", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from binary to gbk")}, + {"varchar(10) character set gbk", "int", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from gbk to binary")}, + {"varchar(10) character set gbk", "varchar(10) character set utf8", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from gbk to utf8")}, + {"varchar(10) character set gbk", "char(10) character set utf8", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from gbk to utf8")}, + {"varchar(10) character set utf8", "char(10) character set gbk", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from utf8 to gbk")}, + {"varchar(10) character set utf8", "varchar(10) character set gbk", dbterror.ErrUnsupportedModifyCharset.GenWithStackByArgs("charset from utf8 to gbk")}, + {"varchar(10) character set gbk", "varchar(255) character set gbk", nil}, + } + for _, tt := range tests { + ftA := colDefStrToFieldType(t, tt.origin, ctx) + ftB := colDefStrToFieldType(t, tt.to, ctx) + err := checkModifyTypes(ctx, ftA, ftB, false) + if err == nil { + require.NoErrorf(t, tt.err, "origin:%v, to:%v", tt.origin, tt.to) + } else { + require.EqualError(t, err, tt.err.Error()) + } + } +} + +func TestFieldCase(t *testing.T) { + var fields = []string{"field", "Field"} + colObjects := make([]*model.ColumnInfo, len(fields)) + for i, name := range fields { + colObjects[i] = &model.ColumnInfo{ + Name: model.NewCIStr(name), + } + } + err := checkDuplicateColumn(colObjects) + require.EqualError(t, err, infoschema.ErrColumnExists.GenWithStackByArgs("Field").Error()) +} + +func TestIgnorableSpec(t *testing.T) { + specs := []ast.AlterTableType{ + ast.AlterTableOption, + ast.AlterTableAddColumns, + ast.AlterTableAddConstraint, + ast.AlterTableDropColumn, + ast.AlterTableDropPrimaryKey, + ast.AlterTableDropIndex, + ast.AlterTableDropForeignKey, + ast.AlterTableModifyColumn, + ast.AlterTableChangeColumn, + ast.AlterTableRenameTable, + ast.AlterTableAlterColumn, + } + for _, spec := range specs { + require.False(t, isIgnorableSpec(spec)) + } + + ignorableSpecs := []ast.AlterTableType{ + ast.AlterTableLock, + ast.AlterTableAlgorithm, + } + for _, spec := range ignorableSpecs { + require.True(t, isIgnorableSpec(spec)) + } +} + +func TestBuildJobDependence(t *testing.T) { + store := createMockStore(t) + defer func() { + require.NoError(t, store.Close()) + }() + // Add some non-add-index jobs. + job1 := &model.Job{ID: 1, TableID: 1, Type: model.ActionAddColumn} + job2 := &model.Job{ID: 2, TableID: 1, Type: model.ActionCreateTable} + job3 := &model.Job{ID: 3, TableID: 2, Type: model.ActionDropColumn} + job6 := &model.Job{ID: 6, TableID: 1, Type: model.ActionDropTable} + job7 := &model.Job{ID: 7, TableID: 2, Type: model.ActionModifyColumn} + job9 := &model.Job{ID: 9, SchemaID: 111, Type: model.ActionDropSchema} + job11 := &model.Job{ID: 11, TableID: 2, Type: model.ActionRenameTable, Args: []interface{}{int64(111), "old db name"}} + err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + m := meta.NewMeta(txn) + require.NoError(t, m.EnQueueDDLJob(job1)) + require.NoError(t, m.EnQueueDDLJob(job2)) + require.NoError(t, m.EnQueueDDLJob(job3)) + require.NoError(t, m.EnQueueDDLJob(job6)) + require.NoError(t, m.EnQueueDDLJob(job7)) + require.NoError(t, m.EnQueueDDLJob(job9)) + require.NoError(t, m.EnQueueDDLJob(job11)) + return nil + }) + require.NoError(t, err) + job4 := &model.Job{ID: 4, TableID: 1, Type: model.ActionAddIndex} + err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + m := meta.NewMeta(txn) + err := buildJobDependence(m, job4) + require.NoError(t, err) + require.Equal(t, job4.DependencyID, int64(2)) + return nil + }) + require.NoError(t, err) + job5 := &model.Job{ID: 5, TableID: 2, Type: model.ActionAddIndex} + err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + m := meta.NewMeta(txn) + err := buildJobDependence(m, job5) + require.NoError(t, err) + require.Equal(t, job5.DependencyID, int64(3)) + return nil + }) + require.NoError(t, err) + job8 := &model.Job{ID: 8, TableID: 3, Type: model.ActionAddIndex} + err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + m := meta.NewMeta(txn) + err := buildJobDependence(m, job8) + require.NoError(t, err) + require.Equal(t, job8.DependencyID, int64(0)) + return nil + }) + require.NoError(t, err) + job10 := &model.Job{ID: 10, SchemaID: 111, TableID: 3, Type: model.ActionAddIndex} + err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + m := meta.NewMeta(txn) + err := buildJobDependence(m, job10) + require.NoError(t, err) + require.Equal(t, job10.DependencyID, int64(9)) + return nil + }) + require.NoError(t, err) + job12 := &model.Job{ID: 12, SchemaID: 112, TableID: 2, Type: model.ActionAddIndex} + err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + m := meta.NewMeta(txn) + err := buildJobDependence(m, job12) + require.NoError(t, err) + require.Equal(t, job12.DependencyID, int64(11)) + return nil + }) + require.NoError(t, err) +} + +func TestNotifyDDLJob(t *testing.T) { + store := createMockStore(t) + defer func() { + require.NoError(t, store.Close()) + }() + + getFirstNotificationAfterStartDDL := func(d *ddl) { + select { + case <-d.workers[addIdxWorker].ddlJobCh: + default: + // The notification may be received by the worker. + } + select { + case <-d.workers[generalWorker].ddlJobCh: + default: + // The notification may be received by the worker. + } + } + + d, err := testNewDDLAndStart( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + require.NoError(t, err) + defer func() { + require.NoError(t, d.Stop()) + }() + getFirstNotificationAfterStartDDL(d) + // Ensure that the notification is not handled in workers `start` function. + d.cancel() + for _, worker := range d.workers { + worker.close() + } + + job := &model.Job{ + SchemaID: 1, + TableID: 2, + Type: model.ActionCreateTable, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{}, + } + // Test the notification mechanism of the owner and the server receiving the DDL request on the same TiDB. + // This DDL request is a general DDL job. + d.asyncNotifyWorker(job) + select { + case <-d.workers[generalWorker].ddlJobCh: + default: + require.FailNow(t, "do not get the general job notification") + } + // Test the notification mechanism of the owner and the server receiving the DDL request on the same TiDB. + // This DDL request is a add index DDL job. + job.Type = model.ActionAddIndex + d.asyncNotifyWorker(job) + select { + case <-d.workers[addIdxWorker].ddlJobCh: + default: + require.FailNow(t, "do not get the add index job notification") + } + + // Test the notification mechanism that the owner and the server receiving the DDL request are not on the same TiDB. + // And the etcd client is nil. + d1, err := testNewDDLAndStart( + context.Background(), + WithStore(store), + WithLease(testLease), + ) + require.NoError(t, err) + defer func() { + require.NoError(t, d1.Stop()) + }() + getFirstNotificationAfterStartDDL(d1) + // Ensure that the notification is not handled by worker's "start". + d1.cancel() + for _, worker := range d1.workers { + worker.close() + } + d1.ownerManager.RetireOwner() + d1.asyncNotifyWorker(job) + job.Type = model.ActionCreateTable + d1.asyncNotifyWorker(job) + testCheckOwner(t, d1, false) + select { + case <-d1.workers[addIdxWorker].ddlJobCh: + require.FailNow(t, "should not get the add index job notification") + case <-d1.workers[generalWorker].ddlJobCh: + require.FailNow(t, "should not get the general job notification") + default: + } +} diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 0c9ff9a2befad..10c2d22d8105f 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -303,6 +303,13 @@ func (d *ddl) addBatchDDLJobs(tasks []*limitJobTask) { if mayNeedReorg(job) { jobListKey = meta.AddIndexJobListKey } + failpoint.Inject("MockModifyJobArg", func(val failpoint.Value) { + if val.(bool) { + if len(job.Args) > 0 { + job.Args[0] = 1 + } + } + }) if err = t.EnQueueDDLJob(job, jobListKey); err != nil { return errors.Trace(err) } @@ -342,9 +349,22 @@ func (d *ddl) getHistoryDDLJob(id int64) (*model.Job, error) { return job, errors.Trace(err) } +func injectFailPointForGetJob(job *model.Job) { + if job == nil { + return + } + failpoint.Inject("mockModifyJobSchemaId", func(val failpoint.Value) { + job.SchemaID = int64(val.(int)) + }) + failpoint.Inject("MockModifyJobTableId", func(val failpoint.Value) { + job.TableID = int64(val.(int)) + }) +} + // getFirstDDLJob gets the first DDL job form DDL queue. func (w *worker) getFirstDDLJob(t *meta.Meta) (*model.Job, error) { job, err := t.GetDDLJobByIdx(0) + injectFailPointForGetJob(job) return job, errors.Trace(err) } diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index 8370345e12177..f233c7c8f6635 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -1,330 +1,52 @@ -// Copyright 2015 PingCAP, Inc. +//// Copyright 2015 PingCAP, Inc. +//// +//// Licensed under the Apache License, Version 2.0 (the "License"); +//// you may not use this file except in compliance with the License. +//// You may obtain a copy of the License at +//// +//// http://www.apache.org/licenses/LICENSE-2.0 +//// +//// Unless required by applicable law or agreed to in writing, software +//// distributed under the License is distributed on an "AS IS" BASIS, +//// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//// See the License for the specific language governing permissions and +//// limitations under the License. // -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ddl +package ddl_test import ( "context" + "strconv" "sync" "testing" "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" - "github.com/pingcap/tidb/parser/ast" - "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/table" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/admin" - "github.com/pingcap/tidb/util/mock" - "github.com/pingcap/tidb/util/sqlexec" + "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/util" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" ) -type testDDLSerialSuiteToVerify struct { - suite.Suite -} - -func TestDDLSerialSuite(t *testing.T) { - suite.Run(t, new(testDDLSerialSuiteToVerify)) -} - const testLease = 5 * time.Millisecond -func (s *testDDLSerialSuiteToVerify) SetupSuite() { - SetWaitTimeWhenErrorOccurred(time.Microsecond) -} - func TestCheckOwner(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() + _, dom, clean := testkit.CreateMockStoreAndDomainWithSchemaLease(t, testLease) + defer clean() - d1, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d1.Stop()) - }() time.Sleep(testLease) - testCheckOwner(t, d1, true) - - require.Equal(t, d1.GetLease(), testLease) -} - -func TestNotifyDDLJob(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - - getFirstNotificationAfterStartDDL := func(d *ddl) { - select { - case <-d.workers[addIdxWorker].ddlJobCh: - default: - // The notification may be received by the worker. - } - select { - case <-d.workers[generalWorker].ddlJobCh: - default: - // The notification may be received by the worker. - } - } - - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d.Stop()) - }() - getFirstNotificationAfterStartDDL(d) - // Ensure that the notification is not handled in workers `start` function. - d.cancel() - for _, worker := range d.workers { - worker.close() - } - - job := &model.Job{ - SchemaID: 1, - TableID: 2, - Type: model.ActionCreateTable, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{}, - } - // Test the notification mechanism of the owner and the server receiving the DDL request on the same TiDB. - // This DDL request is a general DDL job. - d.asyncNotifyWorker(job) - select { - case <-d.workers[generalWorker].ddlJobCh: - default: - require.FailNow(t, "do not get the general job notification") - } - // Test the notification mechanism of the owner and the server receiving the DDL request on the same TiDB. - // This DDL request is a add index DDL job. - job.Type = model.ActionAddIndex - d.asyncNotifyWorker(job) - select { - case <-d.workers[addIdxWorker].ddlJobCh: - default: - require.FailNow(t, "do not get the add index job notification") - } - - // Test the notification mechanism that the owner and the server receiving the DDL request are not on the same TiDB. - // And the etcd client is nil. - d1, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d1.Stop()) - }() - getFirstNotificationAfterStartDDL(d1) - // Ensure that the notification is not handled by worker's "start". - d1.cancel() - for _, worker := range d1.workers { - worker.close() - } - d1.ownerManager.RetireOwner() - d1.asyncNotifyWorker(job) - job.Type = model.ActionCreateTable - d1.asyncNotifyWorker(job) - testCheckOwner(t, d1, false) - select { - case <-d1.workers[addIdxWorker].ddlJobCh: - require.FailNow(t, "should not get the add index job notification") - case <-d1.workers[generalWorker].ddlJobCh: - require.FailNow(t, "should not get the general job notification") - default: - } -} - -// TestRunWorker tests no job is handled when the value of RunWorker is false. -func (s *testDDLSerialSuiteToVerify) TestRunWorker() { - store := createMockStore(s.T()) - defer func() { - require.NoError(s.T(), store.Close()) - }() - - RunWorker = false - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - testCheckOwner(s.T(), d, false) - defer func() { - require.NoError(s.T(), d.Stop()) - }() - - // Make sure the DDL worker is nil. - worker := d.generalWorker() - require.Nil(s.T(), worker) - // Make sure the DDL job can be done and exit that goroutine. - RunWorker = true - d1, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - testCheckOwner(s.T(), d1, true) - defer func() { - err := d1.Stop() - require.NoError(s.T(), err) - }() - worker = d1.generalWorker() - require.NotNil(s.T(), worker) -} - -func TestSchemaError(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d.Stop()) - }() - ctx := testNewContext(d) - - doDDLJobErr(t, 1, 0, model.ActionCreateSchema, []interface{}{1}, ctx, d) -} - -func TestTableError(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d.Stop()) - }() - ctx := testNewContext(d) - - // Schema ID is wrong, so dropping table is failed. - doDDLJobErr(t, -1, 1, model.ActionDropTable, nil, ctx, d) - // Table ID is wrong, so dropping table is failed. - dbInfo, err := testSchemaInfo(d, "test_ddl") - require.NoError(t, err) - testCreateSchema(t, testNewContext(d), d, dbInfo) - job := doDDLJobErr(t, dbInfo.ID, -1, model.ActionDropTable, nil, ctx, d) - - // Table ID or schema ID is wrong, so getting table is failed. - tblInfo, err := testTableInfo(d, "t", 3) - require.NoError(t, err) - testCreateTable(t, ctx, d, dbInfo, tblInfo) - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { - job.SchemaID = -1 - job.TableID = -1 - m := meta.NewMeta(txn) - _, err1 := getTableInfoAndCancelFaultJob(m, job, job.SchemaID) - require.Error(t, err1) - job.SchemaID = dbInfo.ID - _, err1 = getTableInfoAndCancelFaultJob(m, job, job.SchemaID) - require.Error(t, err1) - return nil - }) - require.NoError(t, err) - - // Args is wrong, so creating table is failed. - doDDLJobErr(t, 1, 1, model.ActionCreateTable, []interface{}{1}, ctx, d) - // Schema ID is wrong, so creating table is failed. - doDDLJobErr(t, -1, tblInfo.ID, model.ActionCreateTable, []interface{}{tblInfo}, ctx, d) - // Table exists, so creating table is failed. - tblInfo.ID++ - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionCreateTable, []interface{}{tblInfo}, ctx, d) - -} - -func TestViewError(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d.Stop()) - }() - ctx := testNewContext(d) - dbInfo, err := testSchemaInfo(d, "test_ddl") - require.NoError(t, err) - testCreateSchema(t, testNewContext(d), d, dbInfo) - - // Table ID or schema ID is wrong, so getting table is failed. - tblInfo := testViewInfo(t, d, "t", 3) - testCreateView(t, ctx, d, dbInfo, tblInfo) - - // Args is wrong, so creating view is failed. - doDDLJobErr(t, 1, 1, model.ActionCreateView, []interface{}{1}, ctx, d) - // Schema ID is wrong and orReplace is false, so creating view is failed. - doDDLJobErr(t, -1, tblInfo.ID, model.ActionCreateView, []interface{}{tblInfo, false}, ctx, d) - // View exists and orReplace is false, so creating view is failed. - tblInfo.ID++ - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionCreateView, []interface{}{tblInfo, false}, ctx, d) - + require.Equal(t, dom.DDL().OwnerManager().IsOwner(), true) + require.Equal(t, dom.DDL().GetLease(), testLease) } func TestInvalidDDLJob(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d.Stop()) - }() - ctx := testNewContext(d) + store, dom, clean := testkit.CreateMockStoreAndDomainWithSchemaLease(t, testLease) + defer clean() job := &model.Job{ SchemaID: 0, @@ -333,1130 +55,34 @@ func TestInvalidDDLJob(t *testing.T) { BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{}, } + ctx := testNewContext(store) ctx.SetValue(sessionctx.QueryString, "skip") - err = d.doDDLJob(ctx, job) + err := dom.DDL().DoDDLJob(ctx, job) require.Equal(t, err.Error(), "[ddl:8204]invalid ddl job type: none") } -func TestForeignKeyError(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d.Stop()) - }() - ctx := testNewContext(d) - - doDDLJobErr(t, -1, 1, model.ActionAddForeignKey, nil, ctx, d) - doDDLJobErr(t, -1, 1, model.ActionDropForeignKey, nil, ctx, d) - - dbInfo, err := testSchemaInfo(d, "test_ddl") - require.NoError(t, err) - tblInfo, err := testTableInfo(d, "t", 3) - require.NoError(t, err) - testCreateSchema(t, ctx, d, dbInfo) - testCreateTable(t, ctx, d, dbInfo, tblInfo) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionDropForeignKey, []interface{}{model.NewCIStr("c1_foreign_key")}, ctx, d) -} - -func TestIndexError(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d.Stop()) - }() - ctx := testNewContext(d) - - // Schema ID is wrong. - doDDLJobErr(t, -1, 1, model.ActionAddIndex, nil, ctx, d) - doDDLJobErr(t, -1, 1, model.ActionDropIndex, nil, ctx, d) - - dbInfo, err := testSchemaInfo(d, "test_ddl") - require.NoError(t, err) - tblInfo, err := testTableInfo(d, "t", 3) - require.NoError(t, err) - testCreateSchema(t, ctx, d, dbInfo) - testCreateTable(t, ctx, d, dbInfo, tblInfo) - - // for adding index - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, []interface{}{1}, ctx, d) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, - []interface{}{false, model.NewCIStr("t"), 1, - []*ast.IndexPartSpecification{{Column: &ast.ColumnName{Name: model.NewCIStr("c")}, Length: 256}}}, ctx, d) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, - []interface{}{false, model.NewCIStr("c1_index"), 1, - []*ast.IndexPartSpecification{{Column: &ast.ColumnName{Name: model.NewCIStr("c")}, Length: 256}}}, ctx, d) - testCreateIndex(t, ctx, d, dbInfo, tblInfo, false, "c1_index", "c1") - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionAddIndex, - []interface{}{false, model.NewCIStr("c1_index"), 1, - []*ast.IndexPartSpecification{{Column: &ast.ColumnName{Name: model.NewCIStr("c1")}, Length: 256}}}, ctx, d) - - // for dropping index - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionDropIndex, []interface{}{1}, ctx, d) - testDropIndex(t, ctx, d, dbInfo, tblInfo, "c1_index") - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionDropIndex, []interface{}{model.NewCIStr("c1_index")}, ctx, d) -} - -func TestColumnError(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d.Stop()) - }() - ctx := testNewContext(d) - - dbInfo, err := testSchemaInfo(d, "test_ddl") - require.NoError(t, err) - tblInfo, err := testTableInfo(d, "t", 3) - require.NoError(t, err) - testCreateSchema(t, ctx, d, dbInfo) - testCreateTable(t, ctx, d, dbInfo, tblInfo) - col := &model.ColumnInfo{ - Name: model.NewCIStr("c4"), - Offset: len(tblInfo.Columns), - DefaultValue: 0, - } - col.ID = allocateColumnID(tblInfo) - col.FieldType = *types.NewFieldType(mysql.TypeLong) - pos := &ast.ColumnPosition{Tp: ast.ColumnPositionAfter, RelativeColumn: &ast.ColumnName{Name: model.NewCIStr("c5")}} +func TestAddBatchJobError(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomainWithSchemaLease(t, testLease) + defer clean() + ctx := testNewContext(store) - cols := &[]*model.ColumnInfo{col} - positions := &[]*ast.ColumnPosition{pos} - - // for adding column - doDDLJobErr(t, -1, tblInfo.ID, model.ActionAddColumn, []interface{}{col, pos, 0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, -1, model.ActionAddColumn, []interface{}{col, pos, 0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionAddColumn, []interface{}{0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionAddColumn, []interface{}{col, pos, 0}, ctx, d) - - // for dropping column - doDDLJobErr(t, -1, tblInfo.ID, model.ActionDropColumn, []interface{}{col, pos, 0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, -1, model.ActionDropColumn, []interface{}{col, pos, 0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionDropColumn, []interface{}{0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionDropColumn, []interface{}{model.NewCIStr("c5")}, ctx, d) - - // for adding columns - doDDLJobErr(t, -1, tblInfo.ID, model.ActionAddColumns, []interface{}{cols, positions, 0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, -1, model.ActionAddColumns, []interface{}{cols, positions, 0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionAddColumns, []interface{}{0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionAddColumns, []interface{}{cols, positions, 0}, ctx, d) - - // for dropping columns - doDDLJobErr(t, -1, tblInfo.ID, model.ActionDropColumns, []interface{}{col, pos, 0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, -1, model.ActionDropColumns, []interface{}{col, pos, 0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionDropColumns, []interface{}{0}, ctx, d) - doDDLJobErr(t, dbInfo.ID, tblInfo.ID, model.ActionDropColumns, []interface{}{[]model.CIStr{model.NewCIStr("c5"), model.NewCIStr("c6")}, make([]bool, 2)}, ctx, d) -} - -func (s *testDDLSerialSuiteToVerify) TestAddBatchJobError() { - store := createMockStore(s.T()) - defer func() { - require.NoError(s.T(), store.Close()) - }() - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - defer func() { - require.NoError(s.T(), d.Stop()) - }() - ctx := testNewContext(d) - require.Nil(s.T(), failpoint.Enable("github.com/pingcap/tidb/ddl/mockAddBatchDDLJobsErr", `return(true)`)) + require.Nil(t, failpoint.Enable("github.com/pingcap/tidb/ddl/mockAddBatchDDLJobsErr", `return(true)`)) // Test the job runner should not hang forever. job := &model.Job{SchemaID: 1, TableID: 1} ctx.SetValue(sessionctx.QueryString, "skip") - err = d.doDDLJob(ctx, job) - require.Error(s.T(), err) - require.Equal(s.T(), err.Error(), "mockAddBatchDDLJobsErr") - require.Nil(s.T(), failpoint.Disable("github.com/pingcap/tidb/ddl/mockAddBatchDDLJobsErr")) -} - -func testCheckOwner(t *testing.T, d *ddl, expectedVal bool) { - require.Equal(t, d.isOwner(), expectedVal) -} - -func testCheckJobDone(t *testing.T, d *ddl, job *model.Job, isAdd bool) { - require.NoError(t, kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { - m := meta.NewMeta(txn) - historyJob, err := m.GetHistoryDDLJob(job.ID) - require.NoError(t, err) - checkHistoryJob(t, historyJob) - if isAdd { - require.Equal(t, historyJob.SchemaState, model.StatePublic) - } else { - require.Equal(t, historyJob.SchemaState, model.StateNone) - } - - return nil - })) -} - -func testCheckJobCancelled(t *testing.T, d *ddl, job *model.Job, state *model.SchemaState) { - require.NoError(t, kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { - m := meta.NewMeta(txn) - historyJob, err := m.GetHistoryDDLJob(job.ID) - require.NoError(t, err) - require.True(t, historyJob.IsCancelled() || historyJob.IsRollbackDone(), "history job %s", historyJob) - if state != nil { - require.Equal(t, historyJob.SchemaState, *state) - } - return nil - })) -} - -func doDDLJobErrWithSchemaState(ctx sessionctx.Context, d *ddl, t *testing.T, schemaID, tableID int64, tp model.ActionType, - args []interface{}, state *model.SchemaState) *model.Job { - job := &model.Job{ - SchemaID: schemaID, - TableID: tableID, - Type: tp, - Args: args, - BinlogInfo: &model.HistoryInfo{}, - } - // TODO: check error detail - ctx.SetValue(sessionctx.QueryString, "skip") - require.Error(t, d.doDDLJob(ctx, job)) - testCheckJobCancelled(t, d, job, state) - - return job -} - -func doDDLJobSuccess(ctx sessionctx.Context, d *ddl, t *testing.T, schemaID, tableID int64, tp model.ActionType, - args []interface{}) { - job := &model.Job{ - SchemaID: schemaID, - TableID: tableID, - Type: tp, - Args: args, - BinlogInfo: &model.HistoryInfo{}, - } - ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) - require.NoError(t, err) -} - -func doDDLJobErr(t *testing.T, schemaID, tableID int64, tp model.ActionType, args []interface{}, ctx sessionctx.Context, d *ddl) *model.Job { - return doDDLJobErrWithSchemaState(ctx, d, t, schemaID, tableID, tp, args, nil) -} - -func checkCancelState(txn kv.Transaction, job *model.Job, test *testCancelJob) error { - var checkErr error - addIndexFirstReorg := (test.act == model.ActionAddIndex || test.act == model.ActionAddPrimaryKey) && - job.SchemaState == model.StateWriteReorganization && job.SnapshotVer == 0 - // If the action is adding index and the state is writing reorganization, it wants to test the case of cancelling the job when backfilling indexes. - // When the job satisfies this case of addIndexFirstReorg, the worker hasn't started to backfill indexes. - if test.cancelState == job.SchemaState && !addIndexFirstReorg && !job.IsRollingback() { - errs, err := admin.CancelJobs(txn, test.jobIDs) - if err != nil { - checkErr = errors.Trace(err) - return checkErr - } - // It only tests cancel one DDL job. - if !terror.ErrorEqual(errs[0], test.cancelRetErrs[0]) { - checkErr = errors.Trace(errs[0]) - return checkErr - } - } - return checkErr -} - -type testCancelJob struct { - jobIDs []int64 - cancelRetErrs []error // cancelRetErrs is the first return value of CancelJobs. - act model.ActionType // act is the job action. - cancelState model.SchemaState -} - -func buildCancelJobTests(firstID int64) []testCancelJob { - noErrs := []error{nil} - tests := []testCancelJob{ - {act: model.ActionAddIndex, jobIDs: []int64{firstID + 1}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, - {act: model.ActionAddIndex, jobIDs: []int64{firstID + 2}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, - {act: model.ActionAddIndex, jobIDs: []int64{firstID + 3}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, - {act: model.ActionAddIndex, jobIDs: []int64{firstID + 4}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 4)}, cancelState: model.StatePublic}, - - // Test cancel drop index job , see TestCancelDropIndex. - {act: model.ActionAddColumn, jobIDs: []int64{firstID + 5}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, - {act: model.ActionAddColumn, jobIDs: []int64{firstID + 6}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, - {act: model.ActionAddColumn, jobIDs: []int64{firstID + 7}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, - {act: model.ActionAddColumn, jobIDs: []int64{firstID + 8}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 8)}, cancelState: model.StatePublic}, - - // Test create table, watch out, table id will alloc a globalID. - {act: model.ActionCreateTable, jobIDs: []int64{firstID + 10}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - // Test create database, watch out, database id will alloc a globalID. - {act: model.ActionCreateSchema, jobIDs: []int64{firstID + 12}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - - {act: model.ActionDropColumn, jobIDs: []int64{firstID + 13}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 13)}, cancelState: model.StateDeleteOnly}, - {act: model.ActionDropColumn, jobIDs: []int64{firstID + 14}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 14)}, cancelState: model.StateWriteOnly}, - {act: model.ActionDropColumn, jobIDs: []int64{firstID + 15}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 15)}, cancelState: model.StateWriteReorganization}, - {act: model.ActionRebaseAutoID, jobIDs: []int64{firstID + 16}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionShardRowID, jobIDs: []int64{firstID + 17}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - - {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 18}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 19}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, - - {act: model.ActionAddForeignKey, jobIDs: []int64{firstID + 20}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionAddForeignKey, jobIDs: []int64{firstID + 21}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 21)}, cancelState: model.StatePublic}, - {act: model.ActionDropForeignKey, jobIDs: []int64{firstID + 22}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionDropForeignKey, jobIDs: []int64{firstID + 23}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 23)}, cancelState: model.StatePublic}, - - {act: model.ActionRenameTable, jobIDs: []int64{firstID + 24}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionRenameTable, jobIDs: []int64{firstID + 25}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 25)}, cancelState: model.StatePublic}, - - {act: model.ActionModifyTableCharsetAndCollate, jobIDs: []int64{firstID + 26}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionModifyTableCharsetAndCollate, jobIDs: []int64{firstID + 27}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 27)}, cancelState: model.StatePublic}, - {act: model.ActionTruncateTablePartition, jobIDs: []int64{firstID + 28}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionTruncateTablePartition, jobIDs: []int64{firstID + 29}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 29)}, cancelState: model.StatePublic}, - {act: model.ActionModifySchemaCharsetAndCollate, jobIDs: []int64{firstID + 31}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionModifySchemaCharsetAndCollate, jobIDs: []int64{firstID + 32}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 32)}, cancelState: model.StatePublic}, - - {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 33}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, - {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 34}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, - {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 35}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, - {act: model.ActionAddPrimaryKey, jobIDs: []int64{firstID + 36}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 36)}, cancelState: model.StatePublic}, - {act: model.ActionDropPrimaryKey, jobIDs: []int64{firstID + 37}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, - {act: model.ActionDropPrimaryKey, jobIDs: []int64{firstID + 38}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 38)}, cancelState: model.StateDeleteOnly}, - - {act: model.ActionAddColumns, jobIDs: []int64{firstID + 39}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, - {act: model.ActionAddColumns, jobIDs: []int64{firstID + 40}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, - {act: model.ActionAddColumns, jobIDs: []int64{firstID + 41}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, - {act: model.ActionAddColumns, jobIDs: []int64{firstID + 42}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 42)}, cancelState: model.StatePublic}, - - {act: model.ActionDropColumns, jobIDs: []int64{firstID + 43}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 43)}, cancelState: model.StateDeleteOnly}, - {act: model.ActionDropColumns, jobIDs: []int64{firstID + 44}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 44)}, cancelState: model.StateWriteOnly}, - {act: model.ActionDropColumns, jobIDs: []int64{firstID + 45}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 45)}, cancelState: model.StateWriteReorganization}, - - {act: model.ActionAlterIndexVisibility, jobIDs: []int64{firstID + 47}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionAlterIndexVisibility, jobIDs: []int64{firstID + 48}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 48)}, cancelState: model.StatePublic}, - - {act: model.ActionExchangeTablePartition, jobIDs: []int64{firstID + 54}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionExchangeTablePartition, jobIDs: []int64{firstID + 55}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 55)}, cancelState: model.StatePublic}, - - {act: model.ActionAddTablePartition, jobIDs: []int64{firstID + 60}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionAddTablePartition, jobIDs: []int64{firstID + 61}, cancelRetErrs: noErrs, cancelState: model.StateReplicaOnly}, - {act: model.ActionAddTablePartition, jobIDs: []int64{firstID + 62}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob}, cancelState: model.StatePublic}, - - // modify column has two different types, normal-type and reorg-type. The latter has 5 states and it can be cancelled except the public state. - {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 65}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 66}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly}, - {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 67}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly}, - {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 68}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization}, - {act: model.ActionModifyColumn, jobIDs: []int64{firstID + 69}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob}, cancelState: model.StatePublic}, - - // for drop indexes - {act: model.ActionDropIndexes, jobIDs: []int64{firstID + 72}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 72)}, cancelState: model.StateWriteOnly}, - {act: model.ActionDropIndexes, jobIDs: []int64{firstID + 73}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 73)}, cancelState: model.StateDeleteOnly}, - {act: model.ActionDropIndexes, jobIDs: []int64{firstID + 74}, cancelRetErrs: []error{admin.ErrCannotCancelDDLJob.GenWithStackByArgs(firstID + 74)}, cancelState: model.StateWriteReorganization}, - - // for alter db placement - {act: model.ActionModifySchemaDefaultPlacement, jobIDs: []int64{firstID + 75}, cancelRetErrs: noErrs, cancelState: model.StateNone}, - {act: model.ActionModifySchemaDefaultPlacement, jobIDs: []int64{firstID + 76}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 76)}, cancelState: model.StatePublic}, - } - - return tests -} - -func (s *testDDLSerialSuiteToVerify) checkDropIdx(t *testing.T, d *ddl, schemaID int64, tableID int64, idxName string, success bool) { - checkIdxExist(t, d, schemaID, tableID, idxName, !success) -} - -func (s *testDDLSerialSuiteToVerify) checkAddIdx(t *testing.T, d *ddl, schemaID int64, tableID int64, idxName string, success bool) { - checkIdxExist(t, d, schemaID, tableID, idxName, success) -} - -func checkIdxExist(t *testing.T, d *ddl, schemaID int64, tableID int64, idxName string, expectedExist bool) { - changedTable := testGetTable(t, d, schemaID, tableID) - var found bool - for _, idxInfo := range changedTable.Meta().Indices { - if idxInfo.Name.O == idxName { - found = true - break - } - } - require.Equal(t, found, expectedExist) + err := dom.DDL().DoDDLJob(ctx, job) + require.Error(t, err) + require.Equal(t, err.Error(), "mockAddBatchDDLJobsErr") + require.Nil(t, failpoint.Disable("github.com/pingcap/tidb/ddl/mockAddBatchDDLJobsErr")) } -func (s *testDDLSerialSuiteToVerify) checkAddColumns(d *ddl, schemaID int64, tableID int64, colNames []string, success bool) { - changedTable := testGetTable(s.T(), d, schemaID, tableID) - found := !checkColumnsNotFound(changedTable, colNames) - require.Equal(s.T(), found, success) -} - -func (s *testDDLSerialSuiteToVerify) checkCancelDropColumns(d *ddl, schemaID int64, tableID int64, colNames []string, success bool) { - changedTable := testGetTable(s.T(), d, schemaID, tableID) - notFound := checkColumnsNotFound(changedTable, colNames) - require.Equal(s.T(), notFound, success) -} - -func checkColumnsNotFound(t table.Table, colNames []string) bool { - notFound := true - for _, colName := range colNames { - for _, colInfo := range t.Meta().Columns { - if colInfo.Name.O == colName { - notFound = false - } - } - } - return notFound -} - -func checkIdxVisibility(changedTable table.Table, idxName string, expected bool) bool { - for _, idxInfo := range changedTable.Meta().Indices { - if idxInfo.Name.O == idxName && idxInfo.Invisible == expected { - return true - } - } - return false -} - -func (s *testDDLSerialSuiteToVerify) TestCancelJob() { - store := createMockStore(s.T()) - defer func() { - require.NoError(s.T(), store.Close()) - }() - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - defer func() { - require.NoError(s.T(), d.Stop()) - }() - dbInfo, err := testSchemaInfo(d, "test_cancel_job") - require.NoError(s.T(), err) - testCreateSchema(s.T(), testNewContext(d), d, dbInfo) - // create a partition table. - partitionTblInfo := testTableInfoWithPartition(s.T(), d, "t_partition", 5) - // Skip using sessPool. Make sure adding primary key can be successful. - partitionTblInfo.Columns[0].Flag |= mysql.NotNullFlag - // create table t (c1 int, c2 int, c3 int, c4 int, c5 int); - tblInfo, err := testTableInfo(d, "t", 5) - require.NoError(s.T(), err) - ctx := testNewContext(d) - err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) - err = ctx.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "1") - require.NoError(s.T(), err) - defer func() { - err := ctx.GetSessionVars().SetSystemVar("tidb_enable_exchange_partition", "0") - require.NoError(s.T(), err) - }() - testCreateTable(s.T(), ctx, d, dbInfo, partitionTblInfo) - tableAutoID := int64(100) - shardRowIDBits := uint64(5) - tblInfo.AutoIncID = tableAutoID - tblInfo.ShardRowIDBits = shardRowIDBits - job := testCreateTable(s.T(), ctx, d, dbInfo, tblInfo) - // insert t values (1, 2, 3, 4, 5); - originTable := testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - row := types.MakeDatums(1, 2, 3, 4, 5) - _, err = originTable.AddRecord(ctx, row) - require.NoError(s.T(), err) - txn, err := ctx.Txn(true) - require.NoError(s.T(), err) - err = txn.Commit(context.Background()) - require.NoError(s.T(), err) - - tc := &TestDDLCallback{} - // set up hook - firstJobID := job.ID - tests := buildCancelJobTests(firstJobID) - var checkErr error - var mu sync.Mutex - var test *testCancelJob - updateTest := func(t *testCancelJob) { - mu.Lock() - test = t - mu.Unlock() - } - hookCancelFunc := func(job *model.Job) { - if job.State == model.JobStateSynced || job.State == model.JobStateCancelled || job.State == model.JobStateCancelling { - return - } - // This hook only valid for the related test job. - // This is use to avoid parallel test fail. - mu.Lock() - if len(test.jobIDs) > 0 && test.jobIDs[0] != job.ID { - mu.Unlock() - return - } - mu.Unlock() - if checkErr != nil { - return - } - - hookCtx := mock.NewContext() - hookCtx.Store = store - err1 := hookCtx.NewTxn(context.Background()) - if err1 != nil { - checkErr = errors.Trace(err1) - return - } - txn, err1 = hookCtx.Txn(true) - if err1 != nil { - checkErr = errors.Trace(err1) - return - } - mu.Lock() - checkErr = checkCancelState(txn, job, test) - mu.Unlock() - if checkErr != nil { - return - } - err1 = txn.Commit(context.Background()) - if err1 != nil { - checkErr = errors.Trace(err1) - return - } - } - tc.onJobUpdated = hookCancelFunc - tc.onJobRunBefore = hookCancelFunc - d.SetHook(tc) - - // for adding index - updateTest(&tests[0]) - idxOrigName := "idx" - validArgs := []interface{}{false, model.NewCIStr(idxOrigName), - []*ast.IndexPartSpecification{{ - Column: &ast.ColumnName{Name: model.NewCIStr("c1")}, - Length: -1, - }}, nil} - - // When the job satisfies this test case, the option will be rollback, so the job's schema state is none. - cancelState := model.StateNone - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddIndex, validArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) - updateTest(&tests[1]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddIndex, validArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) - updateTest(&tests[2]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddIndex, validArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) - updateTest(&tests[3]) - testCreateIndex(s.T(), ctx, d, dbInfo, tblInfo, false, "idx", "c2") - require.NoError(s.T(), checkErr) - txn, err = ctx.Txn(true) - require.NoError(s.T(), err) - require.Nil(s.T(), txn.Commit(context.Background())) - s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, true) - - // for add column - updateTest(&tests[4]) - addingColName := "colA" - newColumnDef := &ast.ColumnDef{ - Name: &ast.ColumnName{Name: model.NewCIStr(addingColName)}, - Tp: &types.FieldType{Tp: mysql.TypeLonglong}, - Options: []*ast.ColumnOption{}, - } - chs, coll := charset.GetDefaultCharsetAndCollate() - col, _, err := buildColumnAndConstraint(ctx, 2, newColumnDef, nil, chs, coll) - require.NoError(s.T(), err) - - addColumnArgs := []interface{}{col, &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, 0} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, []string{addingColName}, false) - - updateTest(&tests[5]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, []string{addingColName}, false) - - updateTest(&tests[6]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumn, addColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, []string{addingColName}, false) - - updateTest(&tests[7]) - testAddColumn(s.T(), ctx, d, dbInfo, tblInfo, addColumnArgs) - require.NoError(s.T(), checkErr) - s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, []string{addingColName}, true) - - // for create table - tblInfo1, err := testTableInfo(d, "t1", 2) - require.NoError(s.T(), err) - updateTest(&tests[8]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo1.ID, model.ActionCreateTable, []interface{}{tblInfo1}, &cancelState) - require.NoError(s.T(), checkErr) - testCheckTableState(s.T(), d, dbInfo, tblInfo1, model.StateNone) - - // for create database - dbInfo1, err := testSchemaInfo(d, "test_cancel_job1") - require.NoError(s.T(), err) - updateTest(&tests[9]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo1.ID, 0, model.ActionCreateSchema, []interface{}{dbInfo1}, &cancelState) - require.NoError(s.T(), checkErr) - testCheckSchemaState(s.T(), d, dbInfo1, model.StateNone) - - // for drop column. - updateTest(&tests[10]) - dropColName := "c3" - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, false) - testDropColumn(s.T(), ctx, d, dbInfo, tblInfo, dropColName, false) - require.NoError(s.T(), checkErr) - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, true) - - updateTest(&tests[11]) - dropColName = "c4" - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, false) - testDropColumn(s.T(), ctx, d, dbInfo, tblInfo, dropColName, false) - require.NoError(s.T(), checkErr) - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, true) - - updateTest(&tests[12]) - dropColName = "c5" - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, false) - testDropColumn(s.T(), ctx, d, dbInfo, tblInfo, dropColName, false) - require.NoError(s.T(), checkErr) - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, []string{dropColName}, true) - - // cancel rebase auto id - updateTest(&tests[13]) - rebaseIDArgs := []interface{}{int64(200)} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionRebaseAutoID, rebaseIDArgs, &cancelState) - require.NoError(s.T(), checkErr) - changedTable := testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.Equal(s.T(), changedTable.Meta().AutoIncID, tableAutoID) - - // cancel shard bits - updateTest(&tests[14]) - shardRowIDArgs := []interface{}{uint64(7)} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionShardRowID, shardRowIDArgs, &cancelState) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.Equal(s.T(), changedTable.Meta().ShardRowIDBits, shardRowIDBits) - - // modify none-state column - col.DefaultValue = "1" - updateTest(&tests[15]) - modifyColumnArgs := []interface{}{col, col.Name, &ast.ColumnPosition{}, byte(0), uint64(0)} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, modifyColumnArgs, &test.cancelState) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - changedCol := model.FindColumnInfo(changedTable.Meta().Columns, col.Name.L) - require.Nil(s.T(), changedCol.DefaultValue) - - // modify delete-only-state column, - col.FieldType.Tp = mysql.TypeTiny - col.FieldType.Flen-- - updateTest(&tests[16]) - modifyColumnArgs = []interface{}{col, col.Name, &ast.ColumnPosition{}, byte(0), uint64(0)} - cancelState = model.StateNone - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, modifyColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - changedCol = model.FindColumnInfo(changedTable.Meta().Columns, col.Name.L) - require.Equal(s.T(), changedCol.FieldType.Tp, mysql.TypeLonglong) - require.Equal(s.T(), changedCol.FieldType.Flen, col.FieldType.Flen+1) - col.FieldType.Flen++ - - // Test add foreign key failed cause by canceled. - updateTest(&tests[17]) - addForeignKeyArgs := []interface{}{model.FKInfo{Name: model.NewCIStr("fk1")}} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, addForeignKeyArgs, &test.cancelState) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.Equal(s.T(), len(changedTable.Meta().ForeignKeys), 0) - - // Test add foreign key successful. - updateTest(&tests[18]) - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, addForeignKeyArgs) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.Equal(s.T(), len(changedTable.Meta().ForeignKeys), 1) - require.Equal(s.T(), changedTable.Meta().ForeignKeys[0].Name, addForeignKeyArgs[0].(model.FKInfo).Name) - - // Test drop foreign key failed cause by canceled. - updateTest(&tests[19]) - dropForeignKeyArgs := []interface{}{addForeignKeyArgs[0].(model.FKInfo).Name} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, dropForeignKeyArgs, &test.cancelState) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.Equal(s.T(), len(changedTable.Meta().ForeignKeys), 1) - require.Equal(s.T(), changedTable.Meta().ForeignKeys[0].Name, dropForeignKeyArgs[0].(model.CIStr)) - - // Test drop foreign key successful. - updateTest(&tests[20]) - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, dropForeignKeyArgs) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.Equal(s.T(), len(changedTable.Meta().ForeignKeys), 0) - - // test rename table failed caused by canceled. - test = &tests[21] - renameTableArgs := []interface{}{dbInfo.ID, model.NewCIStr("t2"), dbInfo.Name} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, renameTableArgs, &test.cancelState) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.Equal(s.T(), changedTable.Meta().Name.L, "t") - - // test rename table successful. - test = &tests[22] - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, renameTableArgs) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.Equal(s.T(), changedTable.Meta().Name.L, "t2") - - // test modify table charset failed caused by canceled. - test = &tests[23] - modifyTableCharsetArgs := []interface{}{"utf8mb4", "utf8mb4_bin"} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, modifyTableCharsetArgs, &test.cancelState) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.Equal(s.T(), changedTable.Meta().Charset, "utf8") - require.Equal(s.T(), changedTable.Meta().Collate, "utf8_bin") - - // test modify table charset successfully. - test = &tests[24] - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, modifyTableCharsetArgs) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.Equal(s.T(), changedTable.Meta().Charset, "utf8mb4") - require.Equal(s.T(), changedTable.Meta().Collate, "utf8mb4_bin") - - // test truncate table partition failed caused by canceled. - test = &tests[25] - truncateTblPartitionArgs := []interface{}{[]int64{partitionTblInfo.Partition.Definitions[0].ID}} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, partitionTblInfo.ID, test.act, truncateTblPartitionArgs, &test.cancelState) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, partitionTblInfo.ID) - require.True(s.T(), changedTable.Meta().Partition.Definitions[0].ID == partitionTblInfo.Partition.Definitions[0].ID) - - // test truncate table partition charset successfully. - test = &tests[26] - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, partitionTblInfo.ID, test.act, truncateTblPartitionArgs) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, partitionTblInfo.ID) - require.False(s.T(), changedTable.Meta().Partition.Definitions[0].ID == partitionTblInfo.Partition.Definitions[0].ID) - - // test modify schema charset failed caused by canceled. - test = &tests[27] - charsetAndCollate := []interface{}{"utf8mb4", "utf8mb4_bin"} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, charsetAndCollate, &test.cancelState) - require.NoError(s.T(), checkErr) - dbInfo, err = testGetSchemaInfoWithError(d, dbInfo.ID) - require.NoError(s.T(), err) - require.Equal(s.T(), dbInfo.Charset, "") - require.Equal(s.T(), dbInfo.Collate, "") - - // test modify table charset successfully. - test = &tests[28] - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, charsetAndCollate) - require.NoError(s.T(), checkErr) - dbInfo, err = testGetSchemaInfoWithError(d, dbInfo.ID) - require.NoError(s.T(), err) - require.Equal(s.T(), dbInfo.Charset, "utf8mb4") - require.Equal(s.T(), dbInfo.Collate, "utf8mb4_bin") - - // for adding primary key - tblInfo = changedTable.Meta() - updateTest(&tests[29]) - idxOrigName = "primary" - validArgs = []interface{}{false, model.NewCIStr(idxOrigName), - []*ast.IndexPartSpecification{{ - Column: &ast.ColumnName{Name: model.NewCIStr("c1")}, - Length: -1, - }}, nil} - cancelState = model.StateNone - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddPrimaryKey, validArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) - updateTest(&tests[30]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddPrimaryKey, validArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) - updateTest(&tests[31]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddPrimaryKey, validArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) - updateTest(&tests[32]) - testCreatePrimaryKey(s.T(), ctx, d, dbInfo, tblInfo, "c1") - require.NoError(s.T(), checkErr) - txn, err = ctx.Txn(true) - require.NoError(s.T(), err) - require.Nil(s.T(), txn.Commit(context.Background())) - s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, true) - - // for dropping primary key - updateTest(&tests[33]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionDropPrimaryKey, validArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkDropIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, false) - updateTest(&tests[34]) - testDropIndex(s.T(), ctx, d, dbInfo, tblInfo, idxOrigName) - require.NoError(s.T(), checkErr) - s.checkDropIdx(s.T(), d, dbInfo.ID, tblInfo.ID, idxOrigName, true) - - // for add columns - updateTest(&tests[35]) - addingColNames := []string{"colA", "colB", "colC", "colD", "colE", "colF"} - cols := make([]*table.Column, len(addingColNames)) - for i, addingColName := range addingColNames { - newColumnDef := &ast.ColumnDef{ - Name: &ast.ColumnName{Name: model.NewCIStr(addingColName)}, - Tp: &types.FieldType{Tp: mysql.TypeLonglong}, - Options: []*ast.ColumnOption{}, - } - col, _, err := buildColumnAndConstraint(ctx, 0, newColumnDef, nil, mysql.DefaultCharset, "") - require.NoError(s.T(), err) - cols[i] = col - } - offsets := make([]int, len(cols)) - positions := make([]*ast.ColumnPosition, len(cols)) - for i := range positions { - positions[i] = &ast.ColumnPosition{Tp: ast.ColumnPositionNone} - } - ifNotExists := make([]bool, len(cols)) - - addColumnArgs = []interface{}{cols, positions, offsets, ifNotExists} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumns, addColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, addingColNames, false) - - updateTest(&tests[36]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumns, addColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, addingColNames, false) - - updateTest(&tests[37]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, model.ActionAddColumns, addColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, addingColNames, false) - - updateTest(&tests[38]) - testAddColumns(s.T(), ctx, d, dbInfo, tblInfo, addColumnArgs) - require.NoError(s.T(), checkErr) - s.checkAddColumns(d, dbInfo.ID, tblInfo.ID, addingColNames, true) - - // for drop columns - updateTest(&tests[39]) - dropColNames := []string{"colA", "colB"} - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, false) - testDropColumns(s.T(), ctx, d, dbInfo, tblInfo, dropColNames, false) - require.NoError(s.T(), checkErr) - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, true) - - updateTest(&tests[40]) - dropColNames = []string{"colC", "colD"} - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, false) - testDropColumns(s.T(), ctx, d, dbInfo, tblInfo, dropColNames, false) - require.NoError(s.T(), checkErr) - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, true) - - updateTest(&tests[41]) - dropColNames = []string{"colE", "colF"} - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, false) - testDropColumns(s.T(), ctx, d, dbInfo, tblInfo, dropColNames, false) - require.NoError(s.T(), checkErr) - s.checkCancelDropColumns(d, dbInfo.ID, tblInfo.ID, dropColNames, true) - - // test alter index visibility failed caused by canceled. - indexName := "idx_c3" - testCreateIndex(s.T(), ctx, d, dbInfo, tblInfo, false, indexName, "c3") - require.NoError(s.T(), checkErr) - txn, err = ctx.Txn(true) - require.NoError(s.T(), err) - require.Nil(s.T(), txn.Commit(context.Background())) - s.checkAddIdx(s.T(), d, dbInfo.ID, tblInfo.ID, indexName, true) - - updateTest(&tests[42]) - alterIndexVisibility := []interface{}{model.NewCIStr(indexName), true} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, alterIndexVisibility, &test.cancelState) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.True(s.T(), checkIdxVisibility(changedTable, indexName, false)) - - // cancel alter index visibility successfully - updateTest(&tests[43]) - alterIndexVisibility = []interface{}{model.NewCIStr(indexName), true} - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tblInfo.ID, test.act, alterIndexVisibility) - require.NoError(s.T(), checkErr) - changedTable = testGetTable(s.T(), d, dbInfo.ID, tblInfo.ID) - require.True(s.T(), checkIdxVisibility(changedTable, indexName, true)) - - // test exchange partition failed caused by canceled - pt := testTableInfoWithPartition(s.T(), d, "pt", 5) - nt, err := testTableInfo(d, "nt", 5) - require.NoError(s.T(), err) - testCreateTable(s.T(), ctx, d, dbInfo, pt) - testCreateTable(s.T(), ctx, d, dbInfo, nt) - - updateTest(&tests[44]) - defID := pt.Partition.Definitions[0].ID - exchangeTablePartition := []interface{}{defID, dbInfo.ID, pt.ID, "p0", true} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, nt.ID, test.act, exchangeTablePartition, &test.cancelState) - require.NoError(s.T(), checkErr) - changedNtTable := testGetTable(s.T(), d, dbInfo.ID, nt.ID) - changedPtTable := testGetTable(s.T(), d, dbInfo.ID, pt.ID) - require.True(s.T(), changedNtTable.Meta().ID == nt.ID) - require.True(s.T(), changedPtTable.Meta().Partition.Definitions[0].ID == pt.Partition.Definitions[0].ID) - - // cancel exchange partition successfully - updateTest(&tests[45]) - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, nt.ID, test.act, exchangeTablePartition) - require.NoError(s.T(), checkErr) - changedNtTable = testGetTable(s.T(), d, dbInfo.ID, pt.Partition.Definitions[0].ID) - changedPtTable = testGetTable(s.T(), d, dbInfo.ID, pt.ID) - require.False(s.T(), changedNtTable.Meta().ID == nt.ID) - require.True(s.T(), changedPtTable.Meta().Partition.Definitions[0].ID == nt.ID) - - // Cancel add table partition. - baseTableInfo := testTableInfoWithPartitionLessThan(s.T(), d, "empty_table", 5, "1000") - testCreateTable(s.T(), ctx, d, dbInfo, baseTableInfo) - - cancelState = model.StateNone - updateTest(&tests[46]) - addedPartInfo := testAddedNewTablePartitionInfo(s.T(), d, baseTableInfo, "p1", "maxvalue") - addPartitionArgs := []interface{}{addedPartInfo} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, addPartitionArgs, &cancelState) - require.NoError(s.T(), checkErr) - baseTable := testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) - require.Equal(s.T(), len(baseTable.Meta().Partition.Definitions), 1) - - updateTest(&tests[47]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, addPartitionArgs, &cancelState) - require.NoError(s.T(), checkErr) - baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) - require.Equal(s.T(), len(baseTable.Meta().Partition.Definitions), 1) - - updateTest(&tests[48]) - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, addPartitionArgs) - require.NoError(s.T(), checkErr) - baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) - require.Equal(s.T(), len(baseTable.Meta().Partition.Definitions), 2) - require.Equal(s.T(), baseTable.Meta().Partition.Definitions[1].ID, addedPartInfo.Definitions[0].ID) - require.Equal(s.T(), baseTable.Meta().Partition.Definitions[1].LessThan[0], addedPartInfo.Definitions[0].LessThan[0]) - - // Cancel modify column which should reorg the data. - require.Nil(s.T(), failpoint.Enable("github.com/pingcap/tidb/ddl/skipMockContextDoExec", `return(true)`)) - baseTableInfo = testTableInfoWith2IndexOnFirstColumn(s.T(), d, "modify-table", 2) - // This will cost 2 global id, one for table id, the other for the job id. - testCreateTable(s.T(), ctx, d, dbInfo, baseTableInfo) - - cancelState = model.StateNone - newCol := baseTableInfo.Columns[0].Clone() - // change type from long to tinyint. - newCol.FieldType = *types.NewFieldType(mysql.TypeTiny) - // change from null to not null - newCol.FieldType.Flag |= mysql.NotNullFlag - newCol.FieldType.Flen = 2 - - originColName := baseTableInfo.Columns[0].Name - pos := &ast.ColumnPosition{Tp: ast.ColumnPositionNone} - - updateTest(&tests[49]) - modifyColumnArgs = []interface{}{&newCol, originColName, pos, mysql.TypeNull, 0} - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, modifyColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) - require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Tp, mysql.TypeLong) - require.Equal(s.T(), mysql.HasNotNullFlag(baseTable.Meta().Columns[0].FieldType.Flag), false) - - updateTest(&tests[50]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, modifyColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) - require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Tp, mysql.TypeLong) - require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Flag&mysql.NotNullFlag, uint(0)) - - updateTest(&tests[51]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, modifyColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) - require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Tp, mysql.TypeLong) - require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Flag&mysql.NotNullFlag, uint(0)) - - updateTest(&tests[52]) - doDDLJobErrWithSchemaState(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, modifyColumnArgs, &cancelState) - require.NoError(s.T(), checkErr) - baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) - require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Tp, mysql.TypeLong) - require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Flag&mysql.NotNullFlag, uint(0)) - - updateTest(&tests[53]) - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, baseTableInfo.ID, test.act, modifyColumnArgs) - require.NoError(s.T(), checkErr) - baseTable = testGetTable(s.T(), d, dbInfo.ID, baseTableInfo.ID) - require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Tp, mysql.TypeTiny) - require.Equal(s.T(), baseTable.Meta().Columns[0].FieldType.Flag&mysql.NotNullFlag, uint(1)) - require.Nil(s.T(), failpoint.Disable("github.com/pingcap/tidb/ddl/skipMockContextDoExec")) - - // for drop indexes - updateTest(&tests[54]) - ifExists := make([]bool, 2) - idxNames := []model.CIStr{model.NewCIStr("i1"), model.NewCIStr("i2")} - dropIndexesArgs := []interface{}{idxNames, ifExists} - tableInfo := createTestTableForDropIndexes(s.T(), ctx, d, dbInfo, "test-drop-indexes", 6) - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tableInfo.ID, test.act, dropIndexesArgs) - s.checkDropIndexes(d, dbInfo.ID, tableInfo.ID, idxNames, true) - - updateTest(&tests[55]) - idxNames = []model.CIStr{model.NewCIStr("i3"), model.NewCIStr("i4")} - dropIndexesArgs = []interface{}{idxNames, ifExists} - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tableInfo.ID, test.act, dropIndexesArgs) - s.checkDropIndexes(d, dbInfo.ID, tableInfo.ID, idxNames, true) - - updateTest(&tests[56]) - idxNames = []model.CIStr{model.NewCIStr("i5"), model.NewCIStr("i6")} - dropIndexesArgs = []interface{}{idxNames, ifExists} - doDDLJobSuccess(ctx, d, s.T(), dbInfo.ID, tableInfo.ID, test.act, dropIndexesArgs) - s.checkDropIndexes(d, dbInfo.ID, tableInfo.ID, idxNames, true) -} - -func TestIgnorableSpec(t *testing.T) { - specs := []ast.AlterTableType{ - ast.AlterTableOption, - ast.AlterTableAddColumns, - ast.AlterTableAddConstraint, - ast.AlterTableDropColumn, - ast.AlterTableDropPrimaryKey, - ast.AlterTableDropIndex, - ast.AlterTableDropForeignKey, - ast.AlterTableModifyColumn, - ast.AlterTableChangeColumn, - ast.AlterTableRenameTable, - ast.AlterTableAlterColumn, - } - for _, spec := range specs { - require.False(t, isIgnorableSpec(spec)) - } - - ignorableSpecs := []ast.AlterTableType{ - ast.AlterTableLock, - ast.AlterTableAlgorithm, - } - for _, spec := range ignorableSpecs { - require.True(t, isIgnorableSpec(spec)) - } -} - -func TestBuildJobDependence(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - // Add some non-add-index jobs. - job1 := &model.Job{ID: 1, TableID: 1, Type: model.ActionAddColumn} - job2 := &model.Job{ID: 2, TableID: 1, Type: model.ActionCreateTable} - job3 := &model.Job{ID: 3, TableID: 2, Type: model.ActionDropColumn} - job6 := &model.Job{ID: 6, TableID: 1, Type: model.ActionDropTable} - job7 := &model.Job{ID: 7, TableID: 2, Type: model.ActionModifyColumn} - job9 := &model.Job{ID: 9, SchemaID: 111, Type: model.ActionDropSchema} - job11 := &model.Job{ID: 11, TableID: 2, Type: model.ActionRenameTable, Args: []interface{}{int64(111), "old db name"}} - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { - m := meta.NewMeta(txn) - require.NoError(t, m.EnQueueDDLJob(job1)) - require.NoError(t, m.EnQueueDDLJob(job2)) - require.NoError(t, m.EnQueueDDLJob(job3)) - require.NoError(t, m.EnQueueDDLJob(job6)) - require.NoError(t, m.EnQueueDDLJob(job7)) - require.NoError(t, m.EnQueueDDLJob(job9)) - require.NoError(t, m.EnQueueDDLJob(job11)) - return nil - }) - require.NoError(t, err) - job4 := &model.Job{ID: 4, TableID: 1, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { - m := meta.NewMeta(txn) - err := buildJobDependence(m, job4) - require.NoError(t, err) - require.Equal(t, job4.DependencyID, int64(2)) - return nil - }) - require.NoError(t, err) - job5 := &model.Job{ID: 5, TableID: 2, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { - m := meta.NewMeta(txn) - err := buildJobDependence(m, job5) - require.NoError(t, err) - require.Equal(t, job5.DependencyID, int64(3)) - return nil - }) - require.NoError(t, err) - job8 := &model.Job{ID: 8, TableID: 3, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { - m := meta.NewMeta(txn) - err := buildJobDependence(m, job8) - require.NoError(t, err) - require.Equal(t, job8.DependencyID, int64(0)) - return nil - }) - require.NoError(t, err) - job10 := &model.Job{ID: 10, SchemaID: 111, TableID: 3, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { - m := meta.NewMeta(txn) - err := buildJobDependence(m, job10) - require.NoError(t, err) - require.Equal(t, job10.DependencyID, int64(9)) - return nil - }) - require.NoError(t, err) - job12 := &model.Job{ID: 12, SchemaID: 112, TableID: 2, Type: model.ActionAddIndex} - err = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { - m := meta.NewMeta(txn) - err := buildJobDependence(m, job12) - require.NoError(t, err) - require.Equal(t, job12.DependencyID, int64(11)) - return nil - }) - require.NoError(t, err) -} +func TestParallelDDL(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomainWithSchemaLease(t, testLease) + defer clean() -func addDDLJob(t *testing.T, d *ddl, job *model.Job) { - task := &limitJobTask{job, make(chan error)} - d.limitJobCh <- task - err := <-task.err - require.NoError(t, err) -} + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") -func TestParallelDDL(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - defer func() { - require.NoError(t, d.Stop()) - }() - ctx := testNewContext(d) - err = ctx.NewTxn(context.Background()) - require.NoError(t, err) /* build structure: DBs -> { @@ -1468,63 +94,24 @@ func TestParallelDDL(t *testing.T) { db1.t2 (c1 int primary key, c2 int, c3 int) db2.t3 (c1 int, c2 int, c3 int, c4 int) } - Data -> { - t1: (10, 10), (20, 20) - t2: (1, 1, 1), (2, 2, 2), (3, 3, 3) - t3: (11, 22, 33, 44) - } */ - // create database test_parallel_ddl_1; - dbInfo1, err := testSchemaInfo(d, "test_parallel_ddl_1") - require.NoError(t, err) - testCreateSchema(t, ctx, d, dbInfo1) - // create table t1 (c1 int, c2 int); - tblInfo1, err := testTableInfo(d, "t1", 2) - require.NoError(t, err) - testCreateTable(t, ctx, d, dbInfo1, tblInfo1) - // insert t1 values (10, 10), (20, 20) - tbl1 := testGetTable(t, d, dbInfo1.ID, tblInfo1.ID) - _, err = tbl1.AddRecord(ctx, types.MakeDatums(1, 1)) - require.NoError(t, err) - _, err = tbl1.AddRecord(ctx, types.MakeDatums(2, 2)) - require.NoError(t, err) - // create table t2 (c1 int primary key, c2 int, c3 int); - tblInfo2, err := testTableInfo(d, "t2", 3) - require.NoError(t, err) - tblInfo2.Columns[0].Flag = mysql.PriKeyFlag | mysql.NotNullFlag - tblInfo2.PKIsHandle = true - testCreateTable(t, ctx, d, dbInfo1, tblInfo2) - // insert t2 values (1, 1), (2, 2), (3, 3) - tbl2 := testGetTable(t, d, dbInfo1.ID, tblInfo2.ID) - _, err = tbl2.AddRecord(ctx, types.MakeDatums(1, 1, 1)) - require.NoError(t, err) - _, err = tbl2.AddRecord(ctx, types.MakeDatums(2, 2, 2)) - require.NoError(t, err) - _, err = tbl2.AddRecord(ctx, types.MakeDatums(3, 3, 3)) - require.NoError(t, err) - // create database test_parallel_ddl_2; - dbInfo2, err := testSchemaInfo(d, "test_parallel_ddl_2") - require.NoError(t, err) - testCreateSchema(t, ctx, d, dbInfo2) - // create table t3 (c1 int, c2 int, c3 int, c4 int); - tblInfo3, err := testTableInfo(d, "t3", 4) - require.NoError(t, err) - testCreateTable(t, ctx, d, dbInfo2, tblInfo3) - // insert t3 values (11, 22, 33, 44) - tbl3 := testGetTable(t, d, dbInfo2.ID, tblInfo3.ID) - _, err = tbl3.AddRecord(ctx, types.MakeDatums(11, 22, 33, 44)) - require.NoError(t, err) + tk.MustExec("create database test_parallel_ddl_1") + tk.MustExec("create database test_parallel_ddl_2") + tk.MustExec("create table test_parallel_ddl_1.t1(c1 int, c2 int, key db1_idx2(c2))") + tk.MustExec("create table test_parallel_ddl_1.t2(c1 int primary key, c2 int, c3 int)") + tk.MustExec("create table test_parallel_ddl_2.t3(c1 int, c2 int, c3 int, c4 int)") // set hook to execute jobs after all jobs are in queue. jobCnt := int64(11) - tc := &TestDDLCallback{} + tc := &ddl.TestDDLCallback{Do: dom} once := sync.Once{} var checkErr error - tc.onJobRunBefore = func(job *model.Job) { + tc.OnJobRunBeforeExported = func(job *model.Job) { // TODO: extract a unified function for other tests. once.Do(func() { qLen1 := int64(0) qLen2 := int64(0) + var err error for { checkErr = kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { m := meta.NewMeta(txn) @@ -1551,7 +138,7 @@ func TestParallelDDL(t *testing.T) { } }) } - d.SetHook(tc) + dom.DDL().SetHook(tc) /* prepare jobs: @@ -1568,112 +155,113 @@ func TestParallelDDL(t *testing.T) { / 10 / 2 / null / drop schema / / 11 / 2 / 2 / add index / */ - job1 := buildCreateIdxJob(dbInfo1, tblInfo1, false, "db1_idx1", "c1") - addDDLJob(t, d, job1) - job2 := buildCreateColumnJob(dbInfo1, tblInfo1, "c3", &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, nil) - addDDLJob(t, d, job2) - job3 := buildCreateIdxJob(dbInfo1, tblInfo1, false, "db1_idx2", "c3") - addDDLJob(t, d, job3) - job4 := buildDropColumnJob(dbInfo1, tblInfo2, "c3") - addDDLJob(t, d, job4) - job5 := buildDropIdxJob(dbInfo1, tblInfo1, "db1_idx1") - addDDLJob(t, d, job5) - job6 := buildCreateIdxJob(dbInfo1, tblInfo2, false, "db2_idx1", "c2") - addDDLJob(t, d, job6) - job7 := buildDropColumnJob(dbInfo2, tblInfo3, "c4") - addDDLJob(t, d, job7) - job8 := buildRebaseAutoIDJobJob(dbInfo2, tblInfo3, 1024) - addDDLJob(t, d, job8) - job9 := buildCreateIdxJob(dbInfo1, tblInfo1, false, "db1_idx3", "c2") - addDDLJob(t, d, job9) - job10 := buildDropSchemaJob(dbInfo2) - addDDLJob(t, d, job10) - job11 := buildCreateIdxJob(dbInfo2, tblInfo3, false, "db3_idx1", "c2") - addDDLJob(t, d, job11) - // TODO: add rename table job - - // check results. - isChecked := false - for !isChecked { - err := kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { - m := meta.NewMeta(txn) - lastJob, err := m.GetHistoryDDLJob(job11.ID) - require.NoError(t, err) - // all jobs are finished. - if lastJob != nil { - finishedJobs, err := m.GetAllHistoryDDLJobs() - require.NoError(t, err) - // get the last 12 jobs completed. - finishedJobs = finishedJobs[len(finishedJobs)-11:] - // check some jobs are ordered because of the dependence. - require.Equal(t, finishedJobs[0].ID, job1.ID, "%v", finishedJobs) - require.Equal(t, finishedJobs[1].ID, job2.ID, "%v", finishedJobs) - require.Equal(t, finishedJobs[2].ID, job3.ID, "%v", finishedJobs) - require.Equal(t, finishedJobs[4].ID, job5.ID, "%v", finishedJobs) - require.Equal(t, finishedJobs[10].ID, job11.ID, "%v", finishedJobs) - // check the jobs are ordered in the backfill-job queue or general-job queue. - backfillJobID := int64(0) - generalJobID := int64(0) - for _, job := range finishedJobs { - // check jobs' order. - if mayNeedReorg(job) { - require.Greater(t, job.ID, backfillJobID) - backfillJobID = job.ID - } else { - require.Greater(t, job.ID, generalJobID) - generalJobID = job.ID - } - // check jobs' state. - if job.ID == lastJob.ID { - require.Equal(t, job.State, model.JobStateCancelled, "job: %v", job) - } else { - require.Equal(t, job.State, model.JobStateSynced, "job: %v", job) - } - } - - isChecked = true - } - return nil - }) - require.NoError(t, err) - time.Sleep(10 * time.Millisecond) - } - - require.NoError(t, checkErr) - tc = &TestDDLCallback{} - d.SetHook(tc) -} - -func TestDDLPackageExecuteSQL(t *testing.T) { - store := createMockStore(t) - defer func() { - require.NoError(t, store.Close()) - }() + var wg util.WaitGroupWrapper - d, err := testNewDDLAndStart( - context.Background(), - WithStore(store), - WithLease(testLease), - ) - require.NoError(t, err) - testCheckOwner(t, d, true) - defer func() { - require.NoError(t, d.Stop()) - }() - worker := d.generalWorker() - require.NotNil(t, worker) + seqIDs := make([]int, 11) - // In test environment, worker.ctxPool will be nil, and get will return mock.Context. - // We just test that can use it to call sqlexec.SQLExecutor.Execute. - sess, err := worker.sessPool.get() - require.NoError(t, err) - defer worker.sessPool.put(sess) - se := sess.(sqlexec.SQLExecutor) - _, _ = se.Execute(context.Background(), "create table t(a int);") -} + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("alter table test_parallel_ddl_1.t1 add index db1_idx1(c1)") + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[0], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) + time.Sleep(5 * time.Millisecond) + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("alter table test_parallel_ddl_1.t1 add column c3 int") + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[1], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) + time.Sleep(5 * time.Millisecond) + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("alter table test_parallel_ddl_1.t1 add index db1_idxx(c1)") + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[2], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) + time.Sleep(5 * time.Millisecond) + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("alter table test_parallel_ddl_1.t2 drop column c3") + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[3], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) + time.Sleep(5 * time.Millisecond) + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("alter table test_parallel_ddl_1.t1 drop index db1_idx2") + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[4], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) + time.Sleep(5 * time.Millisecond) + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("alter table test_parallel_ddl_1.t2 add index db1_idx2(c2)") + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[5], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) + time.Sleep(5 * time.Millisecond) + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("alter table test_parallel_ddl_2.t3 drop column c4") + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[6], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) + time.Sleep(5 * time.Millisecond) + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("alter table test_parallel_ddl_2.t3 auto_id_cache 1024") + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[7], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) + time.Sleep(5 * time.Millisecond) + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("alter table test_parallel_ddl_1.t1 add index db1_idx3(c2)") + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[8], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) + time.Sleep(5 * time.Millisecond) + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + tk.MustExec("drop database test_parallel_ddl_2") + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[9], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) + time.Sleep(5 * time.Millisecond) + wg.Run(func() { + tk := testkit.NewTestKit(t, store) + _, err := tk.Exec("alter table test_parallel_ddl_2.t3 add index db3_idx1(c2)") + require.Error(t, err) + rs := tk.MustQuery("select json_extract(@@tidb_last_ddl_info, '$.seq_num')") + seqIDs[10], _ = strconv.Atoi(rs.Rows()[0][0].(string)) + }) -func (s *testDDLSerialSuiteToVerify) checkDropIndexes(d *ddl, schemaID int64, tableID int64, idxNames []model.CIStr, success bool) { - for _, idxName := range idxNames { - checkIdxExist(s.T(), d, schemaID, tableID, idxName.O, !success) - } + wg.Wait() + + // Table 1 order. + require.Less(t, seqIDs[0], seqIDs[1]) + require.Less(t, seqIDs[1], seqIDs[2]) + require.Less(t, seqIDs[2], seqIDs[4]) + require.Less(t, seqIDs[4], seqIDs[8]) + + // Table 2 order. + require.Less(t, seqIDs[3], seqIDs[10]) + + // Table 3 order. + require.Less(t, seqIDs[6], seqIDs[7]) + require.Less(t, seqIDs[7], seqIDs[9]) + require.Less(t, seqIDs[9], seqIDs[10]) + + // General job order. + require.Less(t, seqIDs[1], seqIDs[3]) + require.Less(t, seqIDs[3], seqIDs[4]) + require.Less(t, seqIDs[4], seqIDs[6]) + require.Less(t, seqIDs[6], seqIDs[7]) + require.Less(t, seqIDs[7], seqIDs[9]) + + // Reorg job order. + require.Less(t, seqIDs[2], seqIDs[5]) + require.Less(t, seqIDs[5], seqIDs[8]) + require.Less(t, seqIDs[8], seqIDs[10]) } diff --git a/ddl/fail_test.go b/ddl/fail_test.go index 8463e5e2a2473..741fa739ca289 100644 --- a/ddl/fail_test.go +++ b/ddl/fail_test.go @@ -11,68 +11,57 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - -package ddl +// +package ddl_test import ( - "context" + "strconv" + "testing" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/parser/ast" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" ) -func (s *testColumnSuiteToVerify) TestFailBeforeDecodeArgs() { - d, err := testNewDDLAndStart( - context.Background(), - WithStore(s.store), - WithLease(testLease), - ) - require.NoError(s.T(), err) - defer func() { - err := d.Stop() - require.NoError(s.T(), err) - }() - // create table t_fail (c1 int, c2 int); - tblInfo, err := testTableInfo(d, "t_fail", 2) - require.NoError(s.T(), err) - ctx := testNewContext(d) - err = ctx.NewTxn(context.Background()) - require.NoError(s.T(), err) - testCreateTable(s.T(), ctx, d, s.dbInfo, tblInfo) - // insert t_fail values (1, 2); - originTable := testGetTable(s.T(), d, s.dbInfo.ID, tblInfo.ID) - row := types.MakeDatums(1, 2) - _, err = originTable.AddRecord(ctx, row) - require.NoError(s.T(), err) - txn, err := ctx.Txn(true) - require.NoError(s.T(), err) - err = txn.Commit(context.Background()) - require.NoError(s.T(), err) +func TestFailBeforeDecodeArgs(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomainWithSchemaLease(t, testLease) + defer clean() + tk := testkit.NewTestKit(t, store) + + tk.MustExec("use test") + tk.MustExec("create table t1 (c1 int, c2 int);") + tk.MustExec("insert t1 values (1, 2);") + + var tableID int64 + rs := tk.MustQuery("select TIDB_TABLE_ID from information_schema.tables where table_name='t1' and table_schema='test';") + tableIDi, _ := strconv.Atoi(rs.Rows()[0][0].(string)) + tableID = int64(tableIDi) + + d := dom.DDL() + tc := &ddl.TestDDLCallback{Do: dom} - tc := &TestDDLCallback{} first := true stateCnt := 0 - tc.onJobRunBefore = func(job *model.Job) { + tc.OnJobRunBeforeExported = func(job *model.Job) { // It can be other schema states except failed schema state. // This schema state can only appear once. if job.SchemaState == model.StateWriteOnly { stateCnt++ } else if job.SchemaState == model.StateWriteReorganization { if first { - require.NoError(s.T(), failpoint.Enable("github.com/pingcap/tidb/ddl/errorBeforeDecodeArgs", `return(true)`)) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/errorBeforeDecodeArgs", `return(true)`)) first = false } else { - require.NoError(s.T(), failpoint.Disable("github.com/pingcap/tidb/ddl/errorBeforeDecodeArgs")) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/errorBeforeDecodeArgs")) } } } d.SetHook(tc) defaultValue := int64(3) - job := testCreateColumn(s.T(), ctx, d, s.dbInfo, tblInfo, "c3", &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, defaultValue) + jobID := testCreateColumn(tk, t, testNewContext(store), tableID, "c3", "", defaultValue, dom) // Make sure the schema state only appears once. - require.Equal(s.T(), 1, stateCnt) - testCheckJobDone(s.T(), d, job, true) + require.Equal(t, 1, stateCnt) + testCheckJobDone(t, store, jobID, true) } diff --git a/ddl/foreign_key.go b/ddl/foreign_key.go index d6991de8b18a2..f7b1ba5a1086b 100644 --- a/ddl/foreign_key.go +++ b/ddl/foreign_key.go @@ -24,7 +24,7 @@ import ( func onCreateForeignKey(t *meta.Meta, job *model.Job) (ver int64, _ error) { schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return ver, errors.Trace(err) } @@ -58,7 +58,7 @@ func onCreateForeignKey(t *meta.Meta, job *model.Job) (ver int64, _ error) { func onDropForeignKey(t *meta.Meta, job *model.Job) (ver int64, _ error) { schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return ver, errors.Trace(err) } diff --git a/ddl/foreign_key_test.go b/ddl/foreign_key_test.go index f7ea035de4a42..43f8b7d065429 100644 --- a/ddl/foreign_key_test.go +++ b/ddl/foreign_key_test.go @@ -19,8 +19,11 @@ import ( "strings" "sync" "testing" + "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -28,6 +31,8 @@ import ( "github.com/stretchr/testify/require" ) +const testLease = 5 * time.Millisecond + func testCreateForeignKey(t *testing.T, d *ddl, ctx sessionctx.Context, dbInfo *model.DBInfo, tblInfo *model.TableInfo, fkName string, keys []string, refTable string, refKeys []string, onDelete ast.ReferOptionType, onUpdate ast.ReferOptionType) *model.Job { FKName := model.NewCIStr(fkName) Keys := make([]model.CIStr, len(keys)) @@ -61,7 +66,7 @@ func testCreateForeignKey(t *testing.T, d *ddl, ctx sessionctx.Context, dbInfo * err := ctx.NewTxn(context.Background()) require.NoError(t, err) ctx.SetValue(sessionctx.QueryString, "skip") - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) require.NoError(t, err) return job } @@ -75,7 +80,7 @@ func testDropForeignKey(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *mo Args: []interface{}{model.NewCIStr(foreignKeyName)}, } ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) @@ -219,3 +224,19 @@ func TestForeignKey(t *testing.T) { err = txn.Commit(context.Background()) require.NoError(t, err) } + +func testCheckJobDone(t *testing.T, d *ddl, job *model.Job, isAdd bool) { + require.NoError(t, kv.RunInNewTxn(context.Background(), d.store, false, func(ctx context.Context, txn kv.Transaction) error { + m := meta.NewMeta(txn) + historyJob, err := m.GetHistoryDDLJob(job.ID) + require.NoError(t, err) + checkHistoryJob(t, historyJob) + if isAdd { + require.Equal(t, historyJob.SchemaState, model.StatePublic) + } else { + require.Equal(t, historyJob.SchemaState, model.StateNone) + } + + return nil + })) +} diff --git a/ddl/index.go b/ddl/index.go index d2fb695eda780..376e4442ef89f 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -401,7 +401,7 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo // Handle normal job. schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return ver, errors.Trace(err) } @@ -704,7 +704,7 @@ func onDropIndex(t *meta.Meta, job *model.Job) (ver int64, _ error) { func checkDropIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.IndexInfo, error) { schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, nil, errors.Trace(err) } @@ -836,7 +836,7 @@ func onDropIndexes(t *meta.Meta, job *model.Job) (ver int64, _ error) { func getSchemaInfos(t *meta.Meta, job *model.Job) (*model.TableInfo, []model.CIStr, []bool, error) { schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, nil, nil, errors.Trace(err) } @@ -949,7 +949,7 @@ func checkDropIndexOnAutoIncrementColumn(tblInfo *model.TableInfo, indexInfo *mo func checkRenameIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, model.CIStr, model.CIStr, error) { var from, to model.CIStr schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, from, to, errors.Trace(err) } @@ -978,7 +978,7 @@ func checkAlterIndexVisibility(t *meta.Meta, job *model.Job) (*model.TableInfo, ) schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, indexName, invisible, errors.Trace(err) } diff --git a/ddl/partition.go b/ddl/partition.go index fc1887d9397ac..4634f021e2243 100644 --- a/ddl/partition.go +++ b/ddl/partition.go @@ -63,7 +63,7 @@ const ( func checkAddPartition(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.PartitionInfo, []model.PartitionDefinition, error) { schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return nil, nil, nil, errors.Trace(err) } @@ -1052,7 +1052,7 @@ func (w *worker) onDropTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) ( job.State = model.JobStateCancelled return ver, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } @@ -1175,7 +1175,7 @@ func onTruncateTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (int64, e job.State = model.JobStateCancelled return ver, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } @@ -1314,7 +1314,7 @@ func (w *worker) onExchangeTablePartition(d *ddlCtx, t *meta.Meta, job *model.Jo return ver, errors.Trace(err) } - nt, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + nt, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } diff --git a/ddl/partition_test.go b/ddl/partition_test.go index dd35c0d5f1cb9..4484dcb07dde8 100644 --- a/ddl/partition_test.go +++ b/ddl/partition_test.go @@ -121,7 +121,7 @@ func buildDropPartitionJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, partN func testDropPartition(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, partNames []string) *model.Job { job := buildDropPartitionJob(dbInfo, tblInfo, partNames) ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) @@ -141,7 +141,7 @@ func buildTruncatePartitionJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, p func testTruncatePartition(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, pids []int64) *model.Job { job := buildTruncatePartitionJob(dbInfo, tblInfo, pids) ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) diff --git a/ddl/placement_policy_ddl_test.go b/ddl/placement_policy_ddl_test.go index 243427622942c..8f12a44a5bb90 100644 --- a/ddl/placement_policy_ddl_test.go +++ b/ddl/placement_policy_ddl_test.go @@ -45,7 +45,7 @@ func testCreatePlacementPolicy(t *testing.T, ctx sessionctx.Context, d *ddl, pol Args: []interface{}{policyInfo}, } ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) diff --git a/ddl/reorg_test.go b/ddl/reorg_test.go index 15fc4a9da09c1..a5355627ad0c8 100644 --- a/ddl/reorg_test.go +++ b/ddl/reorg_test.go @@ -283,3 +283,7 @@ func TestReorgOwner(t *testing.T) { }) require.NoError(t, err) } + +func testCheckOwner(t *testing.T, d *ddl, expectedVal bool) { + require.Equal(t, d.isOwner(), expectedVal) +} diff --git a/ddl/restart_test.go b/ddl/restart_test.go index c9290eab3ec30..cd1e330cd3fa7 100644 --- a/ddl/restart_test.go +++ b/ddl/restart_test.go @@ -72,11 +72,11 @@ func runInterruptedJob(d *ddl, job *model.Job, doneCh chan error) { ) ctx.SetValue(sessionctx.QueryString, "skip") - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) if errors.Is(err, context.Canceled) { endlessLoopTime := time.Now().Add(time.Minute) for history == nil { - // imitate doDDLJob's logic, quit only find history + // imitate DoDDLJob's logic, quit only find history history, _ = d.getHistoryDDLJob(job.ID) if history != nil { err = history.Error diff --git a/ddl/rollingback.go b/ddl/rollingback.go index 3eb67de2bb3b8..e99a7be9b90a4 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -81,7 +81,7 @@ func convertAddIdxJob2RollbackJob(t *meta.Meta, job *model.Job, tblInfo *model.T // to rollback add index operations. job.SnapshotVer == 0 indicates the workers are not started. func convertNotStartAddIdxJob2RollbackJob(t *meta.Meta, job *model.Job, occuredErr error) (ver int64, err error) { schemaID := job.SchemaID - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return ver, errors.Trace(err) } @@ -383,7 +383,7 @@ func rollingbackDropTableOrView(t *meta.Meta, job *model.Job) error { } func rollingbackDropTablePartition(t *meta.Meta, job *model.Job) (ver int64, err error) { - _, err = getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + _, err = GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } @@ -433,7 +433,7 @@ func cancelOnlyNotHandledJob(job *model.Job) (ver int64, err error) { } func rollingbackTruncateTable(t *meta.Meta, job *model.Job) (ver int64, err error) { - _, err = getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + _, err = GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } diff --git a/ddl/schema_test.go b/ddl/schema_test.go index fab0511b965b6..0b99510974589 100644 --- a/ddl/schema_test.go +++ b/ddl/schema_test.go @@ -50,7 +50,7 @@ func testCreateSchema(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *mode Args: []interface{}{dbInfo}, } ctx.SetValue(sessionctx.QueryString, "skip") - require.NoError(t, d.doDDLJob(ctx, job)) + require.NoError(t, d.DoDDLJob(ctx, job)) v := getSchemaVer(t, ctx) dbInfo.State = model.StatePublic @@ -70,7 +70,7 @@ func buildDropSchemaJob(dbInfo *model.DBInfo) *model.Job { func testDropSchema(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo) (*model.Job, int64) { job := buildDropSchemaJob(dbInfo) ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) ver := getSchemaVer(t, ctx) return job, ver @@ -179,7 +179,7 @@ func ExportTestSchema(t *testing.T) { BinlogInfo: &model.HistoryInfo{}, } ctx.SetValue(sessionctx.QueryString, "skip") - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) require.True(t, terror.ErrorEqual(err, infoschema.ErrDatabaseDropExists), "err %v", err) // Drop a database without a table. @@ -258,3 +258,31 @@ func testGetSchemaInfoWithError(d *ddl, schemaID int64) (*model.DBInfo, error) { } return dbInfo, nil } + +func doDDLJobErr(t *testing.T, schemaID, tableID int64, tp model.ActionType, args []interface{}, ctx sessionctx.Context, d *ddl) *model.Job { + job := &model.Job{ + SchemaID: schemaID, + TableID: tableID, + Type: tp, + Args: args, + BinlogInfo: &model.HistoryInfo{}, + } + // TODO: check error detail + require.Error(t, d.DoDDLJob(ctx, job)) + testCheckJobCancelled(t, d.store, job, nil) + + return job +} + +func testCheckJobCancelled(t *testing.T, store kv.Storage, job *model.Job, state *model.SchemaState) { + require.NoError(t, kv.RunInNewTxn(context.Background(), store, false, func(ctx context.Context, txn kv.Transaction) error { + m := meta.NewMeta(txn) + historyJob, err := m.GetHistoryDDLJob(job.ID) + require.NoError(t, err) + require.True(t, historyJob.IsCancelled() || historyJob.IsRollbackDone(), "history job %s", historyJob) + if state != nil { + require.Equal(t, historyJob.SchemaState, *state) + } + return nil + })) +} diff --git a/ddl/stat_test.go b/ddl/stat_test.go index f4b95e6b7ac86..d721cbfa7b39f 100644 --- a/ddl/stat_test.go +++ b/ddl/stat_test.go @@ -71,7 +71,7 @@ func TestDDLStatsInfo(t *testing.T) { done := make(chan error, 1) go func() { ctx.SetValue(sessionctx.QueryString, "skip") - done <- d.doDDLJob(ctx, job) + done <- d.DoDDLJob(ctx, job) }() exit := false diff --git a/ddl/table.go b/ddl/table.go index 67153844bcef6..6e843e22e455e 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -558,7 +558,8 @@ func getTable(store kv.Storage, schemaID int64, tblInfo *model.TableInfo) (table return tbl, errors.Trace(err) } -func getTableInfoAndCancelFaultJob(t *meta.Meta, job *model.Job, schemaID int64) (*model.TableInfo, error) { +// GetTableInfoAndCancelFaultJob is exported for test. +func GetTableInfoAndCancelFaultJob(t *meta.Meta, job *model.Job, schemaID int64) (*model.TableInfo, error) { tblInfo, err := checkTableExistAndCancelNonExistJob(t, job, schemaID) if err != nil { return nil, errors.Trace(err) @@ -617,7 +618,7 @@ func onTruncateTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ erro job.State = model.JobStateCancelled return ver, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return ver, errors.Trace(err) } @@ -757,7 +758,7 @@ func onRebaseAutoID(store kv.Storage, t *meta.Meta, job *model.Job, tp autoid.Al job.State = model.JobStateCancelled return ver, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) @@ -802,7 +803,7 @@ func onModifyTableAutoIDCache(t *meta.Meta, job *model.Job) (int64, error) { return 0, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return 0, errors.Trace(err) } @@ -823,7 +824,7 @@ func (w *worker) onShardRowID(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int6 job.State = model.JobStateCancelled return ver, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) @@ -934,7 +935,7 @@ func onRenameTables(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error } func checkAndRenameTables(t *meta.Meta, job *model.Job, oldSchemaID, newSchemaID int64, oldSchemaName, tableName *model.CIStr) (ver int64, tblInfo *model.TableInfo, _ error) { - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, oldSchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, oldSchemaID) if err != nil { return ver, tblInfo, errors.Trace(err) } @@ -996,7 +997,7 @@ func onModifyTableComment(t *meta.Meta, job *model.Job) (ver int64, _ error) { return ver, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } @@ -1023,7 +1024,7 @@ func onModifyTableCharsetAndCollate(t *meta.Meta, job *model.Job) (ver int64, _ return ver, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } @@ -1066,7 +1067,7 @@ func (w *worker) onSetTableFlashReplica(t *meta.Meta, job *model.Job) (ver int64 return ver, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } @@ -1141,7 +1142,7 @@ func onUpdateFlashReplicaStatus(t *meta.Meta, job *model.Job) (ver int64, _ erro return ver, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } @@ -1301,7 +1302,7 @@ func onRepairTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) tblInfo.State = model.StateNone // Check the old DB and old table exist. - _, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + _, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { return ver, errors.Trace(err) } @@ -1339,7 +1340,7 @@ func onAlterTableAttributes(t *meta.Meta, job *model.Job) (ver int64, err error) return 0, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return 0, err } @@ -1371,7 +1372,7 @@ func onAlterTablePartitionAttributes(t *meta.Meta, job *model.Job) (ver int64, e job.State = model.JobStateCancelled return 0, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return 0, err } @@ -1409,7 +1410,7 @@ func onAlterTablePartitionPlacement(t *meta.Meta, job *model.Job) (ver int64, er job.State = model.JobStateCancelled return 0, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return 0, err } @@ -1479,7 +1480,7 @@ func onAlterTablePlacement(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, return 0, errors.Trace(err) } - tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return 0, err } @@ -1567,7 +1568,7 @@ func updateLabelRules(job *model.Job, tblInfo *model.TableInfo, oldRules map[str } func onAlterCacheTable(t *meta.Meta, job *model.Job) (ver int64, err error) { - tbInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tbInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return 0, errors.Trace(err) } @@ -1610,7 +1611,7 @@ func onAlterCacheTable(t *meta.Meta, job *model.Job) (ver int64, err error) { } func onAlterNoCacheTable(t *meta.Meta, job *model.Job) (ver int64, err error) { - tbInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tbInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return 0, errors.Trace(err) } diff --git a/ddl/table_lock.go b/ddl/table_lock.go index 8671ddf4e846b..ec4d00b6dbb55 100644 --- a/ddl/table_lock.go +++ b/ddl/table_lock.go @@ -40,7 +40,7 @@ func onLockTables(t *meta.Meta, job *model.Job) (ver int64, err error) { for i, tl := range arg.LockTables { job.SchemaID = tl.SchemaID job.TableID = tl.TableID - tbInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tbInfo, err := GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, err } @@ -60,7 +60,7 @@ func onLockTables(t *meta.Meta, job *model.Job) (ver int64, err error) { job.SchemaID = arg.LockTables[arg.IndexOfLock].SchemaID job.TableID = arg.LockTables[arg.IndexOfLock].TableID var tbInfo *model.TableInfo - tbInfo, err = getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + tbInfo, err = GetTableInfoAndCancelFaultJob(t, job, job.SchemaID) if err != nil { return ver, err } diff --git a/ddl/table_test.go b/ddl/table_test.go index 92d4286432d7e..b9f3dc4dbe948 100644 --- a/ddl/table_test.go +++ b/ddl/table_test.go @@ -47,7 +47,7 @@ func testRenameTable( Args: []interface{}{oldSchemaID, tblInfo.Name, oldSchemaName}, } ctx.SetValue(sessionctx.QueryString, "skip") - require.NoError(t, d.doDDLJob(ctx, job)) + require.NoError(t, d.DoDDLJob(ctx, job)) v := getSchemaVer(t, ctx) tblInfo.State = model.StatePublic @@ -67,7 +67,7 @@ func testRenameTables( Args: []interface{}{oldSchemaIDs, newSchemaIDs, newTableNames, oldTableIDs, oldSchemaNames}, } ctx.SetValue(sessionctx.QueryString, "skip") - require.NoError(t, d.doDDLJob(ctx, job)) + require.NoError(t, d.DoDDLJob(ctx, job)) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: nil}) @@ -90,7 +90,7 @@ func testLockTable(t *testing.T, ctx sessionctx.Context, d *ddl, newSchemaID int Args: []interface{}{arg}, } ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) @@ -129,7 +129,7 @@ func testTruncateTable(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *mod Args: []interface{}{newTableID}, } ctx.SetValue(sessionctx.QueryString, "skip") - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) @@ -274,7 +274,7 @@ func testAlterCacheTable(t *testing.T, ctx sessionctx.Context, d *ddl, newSchema Args: []interface{}{}, } ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) @@ -291,7 +291,7 @@ func testAlterNoCacheTable(t *testing.T, ctx sessionctx.Context, d *ddl, newSche Args: []interface{}{}, } ctx.SetValue(sessionctx.QueryString, "skip") - require.NoError(t, d.doDDLJob(ctx, job)) + require.NoError(t, d.DoDDLJob(ctx, job)) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v}) @@ -398,7 +398,7 @@ func TestCreateTables(t *testing.T) { Args: []interface{}{infos}, } ctx.SetValue(sessionctx.QueryString, "skip") - err = d.doDDLJob(ctx, job) + err = d.DoDDLJob(ctx, job) require.NoError(t, err) testGetTable(t, d, dbInfo.ID, genIDs[0]) diff --git a/ddl/util_test.go b/ddl/util_test.go index a94b37a1215a1..37b0fa638c325 100644 --- a/ddl/util_test.go +++ b/ddl/util_test.go @@ -15,14 +15,12 @@ package ddl import ( - "bytes" "context" "fmt" "testing" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" - "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx" @@ -124,43 +122,6 @@ func testAddedNewTablePartitionInfo(t *testing.T, d *ddl, tblInfo *model.TableIn } } -// testViewInfo creates a test view with num int columns. -func testViewInfo(t *testing.T, d *ddl, name string, num int) *model.TableInfo { - tblInfo := &model.TableInfo{ - Name: model.NewCIStr(name), - } - genIDs, err := d.genGlobalIDs(1) - require.NoError(t, err) - tblInfo.ID = genIDs[0] - - cols := make([]*model.ColumnInfo, num) - viewCols := make([]model.CIStr, num) - - var stmtBuffer bytes.Buffer - stmtBuffer.WriteString("SELECT ") - for i := range cols { - col := &model.ColumnInfo{ - Name: model.NewCIStr(fmt.Sprintf("c%d", i+1)), - Offset: i, - State: model.StatePublic, - } - - col.ID = allocateColumnID(tblInfo) - cols[i] = col - viewCols[i] = col.Name - stmtBuffer.WriteString(cols[i].Name.L + ",") - } - stmtBuffer.WriteString("1 FROM t") - - view := model.ViewInfo{Cols: viewCols, Security: model.SecurityDefiner, Algorithm: model.AlgorithmMerge, - SelectStmt: stmtBuffer.String(), CheckOption: model.CheckOptionCascaded, Definer: &auth.UserIdentity{CurrentUser: true}} - - tblInfo.View = &view - tblInfo.Columns = cols - - return tblInfo -} - func testCreateTable(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) *model.Job { job := &model.Job{ SchemaID: dbInfo.ID, @@ -170,7 +131,7 @@ func testCreateTable(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model Args: []interface{}{tblInfo}, } ctx.SetValue(sessionctx.QueryString, "skip") - err := d.doDDLJob(ctx, job) + err := d.DoDDLJob(ctx, job) require.NoError(t, err) v := getSchemaVer(t, ctx) @@ -180,26 +141,6 @@ func testCreateTable(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model return job } -func testCreateView(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) *model.Job { - job := &model.Job{ - SchemaID: dbInfo.ID, - TableID: tblInfo.ID, - Type: model.ActionCreateView, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{tblInfo, false, 0}, - } - - require.True(t, tblInfo.IsView()) - ctx.SetValue(sessionctx.QueryString, "skip") - require.NoError(t, d.doDDLJob(ctx, job)) - - v := getSchemaVer(t, ctx) - tblInfo.State = model.StatePublic - checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) - tblInfo.State = model.StateNone - return job -} - func testDropTable(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo) *model.Job { job := &model.Job{ SchemaID: dbInfo.ID, @@ -208,7 +149,7 @@ func testDropTable(t *testing.T, ctx sessionctx.Context, d *ddl, dbInfo *model.D BinlogInfo: &model.HistoryInfo{}, } ctx.SetValue(sessionctx.QueryString, "skip") - require.NoError(t, d.doDDLJob(ctx, job)) + require.NoError(t, d.DoDDLJob(ctx, job)) v := getSchemaVer(t, ctx) checkHistoryJobArgs(t, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo})