diff --git a/pkg/ddl/executor.go b/pkg/ddl/executor.go index 5169d5fcf49bc..3fa5c1c147590 100644 --- a/pkg/ddl/executor.go +++ b/pkg/ddl/executor.go @@ -2282,9 +2282,8 @@ func (e *executor) AddTablePartitions(ctx sessionctx.Context, ident ast.Ident, s } if pi.Type == pmodel.PartitionTypeList { // TODO: make sure that checks in ddl_api and ddl_worker is the same. - err = checkAddListPartitions(meta) - if err != nil { - return errors.Trace(err) + if meta.Partition.GetDefaultListPartition() != -1 { + return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("ADD List partition, already contains DEFAULT partition. Please use REORGANIZE PARTITION instead") } } diff --git a/pkg/ddl/partition.go b/pkg/ddl/partition.go index a7cc4cf569b75..edd1a2e7d5f6f 100644 --- a/pkg/ddl/partition.go +++ b/pkg/ddl/partition.go @@ -97,7 +97,7 @@ func checkAddPartition(t *meta.Mutator, job *model.Job) (*model.TableInfo, *mode func (w *worker) onAddTablePartition(jobCtx *jobContext, job *model.Job) (ver int64, _ error) { // Handle the rolling back job if job.IsRollingback() { - ver, err := w.onDropTablePartition(jobCtx, job) + ver, err := w.rollbackLikeDropPartition(jobCtx, job) if err != nil { return ver, errors.Trace(err) } @@ -344,20 +344,6 @@ func rollbackAddingPartitionInfo(tblInfo *model.TableInfo) ([]int64, []string, [ return physicalTableIDs, partNames, rollbackBundles } -// Check if current table already contains DEFAULT list partition -func checkAddListPartitions(tblInfo *model.TableInfo) error { - for i := range tblInfo.Partition.Definitions { - for j := range tblInfo.Partition.Definitions[i].InValues { - for _, val := range tblInfo.Partition.Definitions[i].InValues[j] { - if val == "DEFAULT" { // should already be normalized - return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("ADD List partition, already contains DEFAULT partition. Please use REORGANIZE PARTITION instead") - } - } - } - } - return nil -} - // checkAddPartitionValue check add Partition Values, // For Range: values less than value must be strictly increasing for each partition. // For List: if a Default partition exists, @@ -398,9 +384,8 @@ func checkAddPartitionValue(meta *model.TableInfo, part *model.PartitionInfo) er } } case pmodel.PartitionTypeList: - err := checkAddListPartitions(meta) - if err != nil { - return err + if meta.Partition.GetDefaultListPartition() != -1 { + return dbterror.ErrGeneralUnsupportedDDL.GenWithStackByArgs("ADD List partition, already contains DEFAULT partition. Please use REORGANIZE PARTITION instead") } } return nil @@ -2144,75 +2129,118 @@ func dropLabelRules(ctx context.Context, schemaName, tableName string, partNames return infosync.UpdateLabelRules(ctx, patch) } -// onDropTablePartition deletes old partition meta. -func (w *worker) onDropTablePartition(jobCtx *jobContext, job *model.Job) (ver int64, _ error) { +// rollbackLikeDropPartition does rollback for Reorganize partition and Add partition. +// It will drop newly created partitions that has not yet been used, including cleaning +// up label rules and bundles as well as changed indexes due to global flag. +func (w *worker) rollbackLikeDropPartition(jobCtx *jobContext, job *model.Job) (ver int64, _ error) { args, err := model.GetTablePartitionArgs(job) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } - partNames, partInfo := args.PartNames, args.PartInfo + partInfo := args.PartInfo metaMut := jobCtx.metaMut tblInfo, err := GetTableInfoAndCancelFaultJob(metaMut, job, job.SchemaID) if err != nil { return ver, errors.Trace(err) } - if job.Type != model.ActionDropTablePartition { - // If rollback from reorganize partition, remove DroppingDefinitions from tableInfo - tblInfo.Partition.DroppingDefinitions = nil - // If rollback from adding table partition, remove addingDefinitions from tableInfo. - physicalTableIDs, pNames, rollbackBundles := rollbackAddingPartitionInfo(tblInfo) - err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), rollbackBundles) - if err != nil { - job.State = model.JobStateCancelled - return ver, errors.Wrapf(err, "failed to notify PD the placement rules") - } - // TODO: Will this drop LabelRules for existing partitions, if the new partitions have the same name? - err = dropLabelRules(w.ctx, job.SchemaName, tblInfo.Name.L, pNames) - if err != nil { - job.State = model.JobStateCancelled - return ver, errors.Wrapf(err, "failed to notify PD the label rules") - } + tblInfo.Partition.DroppingDefinitions = nil + physicalTableIDs, pNames, rollbackBundles := rollbackAddingPartitionInfo(tblInfo) + err = infosync.PutRuleBundlesWithDefaultRetry(context.TODO(), rollbackBundles) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Wrapf(err, "failed to notify PD the placement rules") + } + // TODO: Will this drop LabelRules for existing partitions, if the new partitions have the same name? + err = dropLabelRules(w.ctx, job.SchemaName, tblInfo.Name.L, pNames) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Wrapf(err, "failed to notify PD the label rules") + } - if _, err := alterTableLabelRule(job.SchemaName, tblInfo, getIDs([]*model.TableInfo{tblInfo})); err != nil { - job.State = model.JobStateCancelled - return ver, err - } - if job.Type == model.ActionAlterTablePartitioning { - // ALTER TABLE t PARTITION BY ... creates an additional - // Table ID - // Note, for REMOVE PARTITIONING, it is the same - // as for the single partition, to be changed to table. - physicalTableIDs = append(physicalTableIDs, partInfo.NewTableID) + if _, err := alterTableLabelRule(job.SchemaName, tblInfo, getIDs([]*model.TableInfo{tblInfo})); err != nil { + job.State = model.JobStateCancelled + return ver, err + } + if partInfo.Type != pmodel.PartitionTypeNone { + // ALTER TABLE ... PARTITION BY + // Also remove anything with the new table id + physicalTableIDs = append(physicalTableIDs, partInfo.NewTableID) + // Reset if it was normal table before + if tblInfo.Partition.Type == pmodel.PartitionTypeNone || + tblInfo.Partition.DDLType == pmodel.PartitionTypeNone { + tblInfo.Partition = nil } + } - var dropIndices []*model.IndexInfo - for _, indexInfo := range tblInfo.Indices { - if indexInfo.Unique && - indexInfo.State == model.StateDeleteReorganization && - tblInfo.Partition.DDLState == model.StateDeleteReorganization { - dropIndices = append(dropIndices, indexInfo) - } - } - for _, indexInfo := range dropIndices { - DropIndexColumnFlag(tblInfo, indexInfo) - RemoveDependentHiddenColumns(tblInfo, indexInfo) - removeIndexInfo(tblInfo, indexInfo) + var dropIndices []*model.IndexInfo + for _, indexInfo := range tblInfo.Indices { + if indexInfo.Unique && + indexInfo.State == model.StateDeleteReorganization && + tblInfo.Partition.DDLState == model.StateDeleteReorganization { + dropIndices = append(dropIndices, indexInfo) } + } + for _, indexInfo := range dropIndices { + DropIndexColumnFlag(tblInfo, indexInfo) + RemoveDependentHiddenColumns(tblInfo, indexInfo) + removeIndexInfo(tblInfo, indexInfo) + } + if tblInfo.Partition != nil { + tblInfo.Partition.ClearReorgIntermediateInfo() + } - if tblInfo.Partition.Type == pmodel.PartitionTypeNone { - tblInfo.Partition = nil - } else { - tblInfo.Partition.ClearReorgIntermediateInfo() - } - ver, err = updateVersionAndTableInfo(jobCtx, job, tblInfo, true) - if err != nil { - return ver, errors.Trace(err) - } - job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) - args.OldPhysicalTblIDs = physicalTableIDs - job.FillFinishedArgs(args) - return ver, nil + ver, err = updateVersionAndTableInfo(jobCtx, job, tblInfo, true) + if err != nil { + return ver, errors.Trace(err) + } + job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) + args.OldPhysicalTblIDs = physicalTableIDs + job.FillFinishedArgs(args) + return ver, nil +} + +// onDropTablePartition deletes old partition meta. +// States: +// StateNone +// +// Old partitions are queued to be deleted (delete_range), global index up-to-date +// +// StateDeleteReorganization +// +// Old partitions are not accessible/used by any sessions. +// Inserts/updates of global index which still have entries pointing to old partitions +// will overwrite those entries +// In the background we are reading all old partitions and deleting their entries from +// the global indexes. +// +// StateDeleteOnly +// +// old partitions are no longer visible, but if there is inserts/updates to the global indexes, +// duplicate key errors will be given, even if the entries are from dropped partitions +// Note that overlapping ranges (i.e. a dropped partitions with 'less than (N)' will now .. ?!? +// +// StateWriteOnly +// +// old partitions are blocked for read and write. But for read we are allowing +// "overlapping" partition to be read instead. Which means that write can only +// happen in the 'overlapping' partitions original range, not into the extended +// range open by the dropped partitions. +// +// StatePublic +// +// Original state, unaware of DDL +func (w *worker) onDropTablePartition(jobCtx *jobContext, job *model.Job) (ver int64, _ error) { + args, err := model.GetTablePartitionArgs(job) + if err != nil { + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + partNames := args.PartNames + metaMut := jobCtx.metaMut + tblInfo, err := GetTableInfoAndCancelFaultJob(metaMut, job, job.SchemaID) + if err != nil { + return ver, errors.Trace(err) } var physicalTableIDs []int64 @@ -2221,15 +2249,30 @@ func (w *worker) onDropTablePartition(jobCtx *jobContext, job *model.Job) (ver i originalState := job.SchemaState switch job.SchemaState { case model.StatePublic: - // If an error occurs, it returns that it cannot delete all partitions or that the partition doesn't exist. + // Here we mark the partitions to be dropped, so they are not read or written err = CheckDropTablePartition(tblInfo, partNames) if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } + // Reason, see https://github.com/pingcap/tidb/issues/55888 + // Only mark the partitions as to be dropped, so they are not used, but not yet removed. + originalDefs := tblInfo.Partition.Definitions + physicalTableIDs = updateDroppingPartitionInfo(tblInfo, partNames) + tblInfo.Partition.Definitions = originalDefs + tblInfo.Partition.DDLState = model.StateWriteOnly + tblInfo.Partition.DDLAction = model.ActionDropTablePartition + + job.SchemaState = model.StateWriteOnly + ver, err = updateVersionAndTableInfo(jobCtx, job, tblInfo, originalState != job.SchemaState) + case model.StateWriteOnly: + // Since the previous state do not use the dropping partitions, + // we can now actually remove them, allowing to write into the overlapping range + // of the higher range partition or LIST default partition. physicalTableIDs = updateDroppingPartitionInfo(tblInfo, partNames) err = dropLabelRules(w.ctx, job.SchemaName, tblInfo.Name.L, partNames) if err != nil { + // TODO: Add failpoint error/cancel injection and test failure/rollback and cancellation! job.State = model.JobStateCancelled return ver, errors.Wrapf(err, "failed to notify PD the label rules") } @@ -2265,12 +2308,14 @@ func (w *worker) onDropTablePartition(jobCtx *jobContext, job *model.Job) (ver i return ver, err } + tblInfo.Partition.DDLState = model.StateDeleteOnly job.SchemaState = model.StateDeleteOnly ver, err = updateVersionAndTableInfo(jobCtx, job, tblInfo, originalState != job.SchemaState) case model.StateDeleteOnly: - // This state is not a real 'DeleteOnly' state, because tidb does not maintaining the state check in partitionDefinition. + // This state is not a real 'DeleteOnly' state, because tidb does not maintain the state check in partitionDefinition. // Insert this state to confirm all servers can not see the old partitions when reorg is running, // so that no new data will be inserted into old partitions when reorganizing. + tblInfo.Partition.DDLState = model.StateDeleteReorganization job.SchemaState = model.StateDeleteReorganization ver, err = updateVersionAndTableInfo(jobCtx, job, tblInfo, originalState != job.SchemaState) case model.StateDeleteReorganization: @@ -2330,6 +2375,8 @@ func (w *worker) onDropTablePartition(jobCtx *jobContext, job *model.Job) (ver i } droppedDefs := tblInfo.Partition.DroppingDefinitions tblInfo.Partition.DroppingDefinitions = nil + tblInfo.Partition.DDLState = model.StateNone + tblInfo.Partition.DDLAction = model.ActionNone // used by ApplyDiff in updateSchemaVersion job.CtxVars = []any{physicalTableIDs} // TODO remove it. ver, err = updateVersionAndTableInfo(jobCtx, job, tblInfo, true) @@ -2464,6 +2511,7 @@ func (w *worker) onTruncateTablePartition(jobCtx *jobContext, job *model.Job) (i pi.DroppingDefinitions = truncatingDefinitions pi.NewPartitionIDs = newIDs[:] + tblInfo.Partition.DDLAction = model.ActionTruncateTablePartition job.SchemaState = model.StateDeleteOnly ver, err = updateVersionAndTableInfo(jobCtx, job, tblInfo, true) case model.StateDeleteOnly: @@ -3072,7 +3120,7 @@ func getReorgPartitionInfo(t *meta.Mutator, job *model.Job) (*model.TableInfo, [ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver int64, _ error) { // Handle the rolling back job if job.IsRollingback() { - ver, err := w.onDropTablePartition(jobCtx, job) + ver, err := w.rollbackLikeDropPartition(jobCtx, job) if err != nil { return ver, errors.Trace(err) } @@ -3277,6 +3325,7 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, job *model.Job) (ver metrics.GetBackfillProgressByLabel(metrics.LblReorgPartition, job.SchemaName, tblInfo.Name.String()).Set(0.1 / float64(math.MaxUint64)) job.SchemaState = model.StateDeleteOnly tblInfo.Partition.DDLState = model.StateDeleteOnly + tblInfo.Partition.DDLAction = job.Type ver, err = updateVersionAndTableInfoWithCheck(jobCtx, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) diff --git a/pkg/ddl/rollingback.go b/pkg/ddl/rollingback.go index 5be3db4915380..b83b09abfd78a 100644 --- a/pkg/ddl/rollingback.go +++ b/pkg/ddl/rollingback.go @@ -360,17 +360,12 @@ func convertAddTablePartitionJob2RollbackJob(jobCtx *jobContext, job *model.Job, } args.PartNames = partNames model.FillRollbackArgsForAddPartition(job, args) - /* - _, err = job.Encode(true) - if err != nil { - return ver, errors.Trace(err) - } - - */ ver, err = updateVersionAndTableInfo(jobCtx, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) } + tblInfo.Partition.DDLState = model.StateNone + tblInfo.Partition.DDLAction = model.ActionNone job.State = model.JobStateRollingback return ver, errors.Trace(otherwiseErr) } @@ -427,7 +422,7 @@ func convertReorgPartitionJob2RollbackJob(jobCtx *jobContext, job *model.Job, ot } // We cannot drop the index here, we need to wait until // the next schema version - // i.e. rollback in onDropTablePartition + // i.e. rollback in rollbackLikeDropPartition // New index that became public in this state, // mark it to be dropped in next schema version if indexInfo.Global { @@ -508,13 +503,6 @@ func convertReorgPartitionJob2RollbackJob(jobCtx *jobContext, job *model.Job, ot } args.PartNames = partNames job.FillArgs(args) - /* - _, err = job.Encode(true) - if err != nil { - return ver, errors.Trace(err) - } - - */ ver, err = updateVersionAndTableInfo(jobCtx, job, tblInfo, true) if err != nil { return ver, errors.Trace(err) diff --git a/pkg/ddl/tests/partition/BUILD.bazel b/pkg/ddl/tests/partition/BUILD.bazel index e4df09b2b22f0..6af9cc33914c7 100644 --- a/pkg/ddl/tests/partition/BUILD.bazel +++ b/pkg/ddl/tests/partition/BUILD.bazel @@ -6,6 +6,7 @@ go_test( srcs = [ "db_partition_test.go", "main_test.go", + "multi_domain_test.go", "placement_test.go", "reorg_partition_test.go", ], @@ -39,6 +40,7 @@ go_test( "//pkg/types", "//pkg/util/codec", "//pkg/util/dbterror", + "//pkg/util/logutil", "//pkg/util/mathutil", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/ddl/tests/partition/multi_domain_test.go b/pkg/ddl/tests/partition/multi_domain_test.go new file mode 100644 index 0000000000000..1c567c9a4afaf --- /dev/null +++ b/pkg/ddl/tests/partition/multi_domain_test.go @@ -0,0 +1,342 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partition + +import ( + "testing" + "time" + + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/session" + "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func TestMultiSchemaDropRangePartition(t *testing.T) { + createSQL := `create table t (a int primary key, b varchar(255)) partition by range (a) (partition p0 values less than (100), partition p1 values less than (200))` + initFn := func(tkO *testkit.TestKit) { + tkO.MustExec(`insert into t values (1,1),(2,2),(101,101),(102,102)`) + } + alterSQL := `alter table t drop partition p0` + loopFn := func(tkO, tkNO *testkit.TestKit) { + res := tkO.MustQuery(`select schema_state from information_schema.DDL_JOBS where table_name = 't' order by job_id desc limit 1`) + schemaState := res.Rows()[0][0].(string) + // TODO: Test both static and dynamic partition pruning! + switch schemaState { + case "write only": + // tkNO are unaware of the DDL + // tkO see non-readable/non-writable p0 partition, and should try to read from p1 + // in case there is something written to overlapping p1 + tkO.MustContainErrMsg(`insert into t values (1,1)`, "[table:1526]Table has no partition for value matching a partition being dropped, 'p0'") + tkNO.MustContainErrMsg(`insert into t values (1,1)`, "[kv:1062]Duplicate entry '1' for key 't.PRIMARY'") + tkO.MustContainErrMsg(`insert into t values (101,101)`, "[kv:1062]Duplicate entry '101' for key 't.PRIMARY'") + tkNO.MustContainErrMsg(`insert into t values (101,101)`, "[kv:1062]Duplicate entry '101' for key 't.PRIMARY'") + tkNO.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 1", "101 101", "102 102", "2 2")) + tkO.MustQuery(`select * from t`).Sort().Check(testkit.Rows("101 101", "102 102")) + case "delete only": + // tkNO see non-readable/non-writable p0 partition, and should try to read from p1 + // in case there is something written to overlapping p1 + // tkO is not aware of p0. + tkO.MustExec(`insert into t values (1,2)`) + tkNO.MustContainErrMsg(`insert into t values (1,2)`, "[table:1526]Table has no partition for value matching a partition being dropped, 'p0'") + tkO.MustContainErrMsg(`insert into t values (101,101)`, "[kv:1062]Duplicate entry '101' for key 't.PRIMARY'") + tkNO.MustContainErrMsg(`insert into t values (101,101)`, "[kv:1062]Duplicate entry '101' for key 't.PRIMARY'") + tkNO.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 2", "101 101", "102 102")) + // Original row should not be seen in StateWriteOnly + tkNO.MustQuery(`select * from t partition (p0)`).Sort().Check(testkit.Rows()) + tkNO.MustContainErrMsg(`select * from t partition (pNonExisting)`, "[table:1735]Unknown partition 'pnonexisting' in table 't'") + tkNO.MustQuery(`select * from t partition (p1)`).Sort().Check(testkit.Rows("1 2", "101 101", "102 102")) + tkNO.MustQuery(`select * from t where a < 1000`).Sort().Check(testkit.Rows("1 2", "101 101", "102 102")) + tkNO.MustQuery(`select * from t where a > 0`).Sort().Check(testkit.Rows("1 2", "101 101", "102 102")) + tkNO.MustQuery(`select * from t where a = 1`).Sort().Check(testkit.Rows("1 2")) + tkNO.MustQuery(`select * from t where a = 1 or a = 2 or a = 3`).Sort().Check(testkit.Rows("1 2")) + tkNO.MustQuery(`select * from t where a in (1,2,3)`).Sort().Check(testkit.Rows("1 2")) + tkNO.MustQuery(`select * from t where a < 100`).Sort().Check(testkit.Rows("1 2")) + + tkNO.MustQuery(`select * from t where b = 2`).Sort().Check(testkit.Rows("1 2")) + // TODO: Test update and delete! + // TODO: test key, hash and list partition without default partition :) + tkNO.MustQuery(`show create table t`).Check(testkit.Rows("" + + "t CREATE TABLE `t` (\n" + + " `a` int(11) NOT NULL,\n" + + " `b` varchar(255) DEFAULT NULL,\n" + + " PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" + + "PARTITION BY RANGE (`a`)\n" + + "(PARTITION `p0` VALUES LESS THAN (100),\n" + + " PARTITION `p1` VALUES LESS THAN (200))")) + tkO.MustQuery(`show create table t`).Check(testkit.Rows("" + + "t CREATE TABLE `t` (\n" + + " `a` int(11) NOT NULL,\n" + + " `b` varchar(255) DEFAULT NULL,\n" + + " PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" + + "PARTITION BY RANGE (`a`)\n" + + "(PARTITION `p1` VALUES LESS THAN (200))")) + case "delete reorganization": + // just to not fail :) + case "none": + // just to not fail :) + default: + require.Failf(t, "unhandled schema state '%s'", schemaState) + } + } + runMultiSchemaTest(t, createSQL, alterSQL, initFn, func(kit *testkit.TestKit) {}, loopFn) +} + +func TestMultiSchemaDropListDefaultPartition(t *testing.T) { + createSQL := `create table t (a int primary key, b varchar(255)) partition by list (a) (partition p0 values in (1,2,3), partition p1 values in (100,101,102,DEFAULT))` + initFn := func(tkO *testkit.TestKit) { + tkO.MustExec(`insert into t values (1,1),(2,2),(101,101),(102,102)`) + } + alterSQL := `alter table t drop partition p0` + loopFn := func(tkO, tkNO *testkit.TestKit) { + res := tkO.MustQuery(`select schema_state from information_schema.DDL_JOBS where table_name = 't' order by job_id desc limit 1`) + schemaState := res.Rows()[0][0].(string) + // TODO: Test both static and dynamic partition pruning! + switch schemaState { + case "write only": + // tkNO are unaware of the DDL + // tkO see non-readable/non-writable p0 partition, and should try to read from p1 + // in case there is something written to overlapping p1 + tkO.MustContainErrMsg(`insert into t values (1,1)`, "[table:1526]Table has no partition for value matching a partition being dropped, 'p0'") + tkNO.MustContainErrMsg(`insert into t values (1,1)`, "[kv:1062]Duplicate entry '1' for key 't.PRIMARY'") + tkO.MustContainErrMsg(`insert into t values (101,101)`, "[kv:1062]Duplicate entry '101' for key 't.PRIMARY'") + tkNO.MustContainErrMsg(`insert into t values (101,101)`, "[kv:1062]Duplicate entry '101' for key 't.PRIMARY'") + tkNO.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 1", "101 101", "102 102", "2 2")) + tkO.MustQuery(`select * from t`).Sort().Check(testkit.Rows("101 101", "102 102")) + case "delete only": + // tkNO see non-readable/non-writable p0 partition, and should try to read from p1 + // in case there is something written to overlapping p1 + // tkO is not aware of p0. + tkO.MustExec(`insert into t values (1,2)`) + tkNO.MustContainErrMsg(`insert into t values (1,2)`, "[table:1526]Table has no partition for value matching a partition being dropped, 'p0'") + tkO.MustContainErrMsg(`insert into t values (101,101)`, "[kv:1062]Duplicate entry '101' for key 't.PRIMARY'") + tkNO.MustContainErrMsg(`insert into t values (101,101)`, "[kv:1062]Duplicate entry '101' for key 't.PRIMARY'") + tkNO.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 2", "101 101", "102 102")) + // Original row should not be seen in StateWriteOnly + tkNO.MustQuery(`select * from t partition (p0)`).Sort().Check(testkit.Rows()) + tkNO.MustContainErrMsg(`select * from t partition (pNonExisting)`, "[table:1735]Unknown partition 'pnonexisting' in table 't'") + tkNO.MustQuery(`select * from t partition (p1)`).Sort().Check(testkit.Rows("1 2", "101 101", "102 102")) + tkNO.MustQuery(`select * from t where a < 1000`).Sort().Check(testkit.Rows("1 2", "101 101", "102 102")) + tkNO.MustQuery(`select * from t where a > 0`).Sort().Check(testkit.Rows("1 2", "101 101", "102 102")) + tkNO.MustQuery(`select * from t where a = 1`).Sort().Check(testkit.Rows("1 2")) + tkNO.MustQuery(`select * from t where a = 1 or a = 2 or a = 3`).Sort().Check(testkit.Rows("1 2")) + tkNO.MustQuery(`select * from t where a in (1,2,3)`).Sort().Check(testkit.Rows("1 2")) + tkNO.MustQuery(`select * from t where a < 100`).Sort().Check(testkit.Rows("1 2")) + + tkNO.MustQuery(`select * from t where b = 2`).Sort().Check(testkit.Rows("1 2")) + // TODO: Test update and delete! + // TODO: test key, hash and list partition without default partition :) + // Should we see the partition or not?!? + tkNO.MustQuery(`show create table t`).Check(testkit.Rows("" + + "t CREATE TABLE `t` (\n" + + " `a` int(11) NOT NULL,\n" + + " `b` varchar(255) DEFAULT NULL,\n" + + " PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" + + "PARTITION BY LIST (`a`)\n" + + "(PARTITION `p0` VALUES IN (1,2,3),\n" + + " PARTITION `p1` VALUES IN (100,101,102,DEFAULT))")) + tkO.MustQuery(`show create table t`).Check(testkit.Rows("" + + "t CREATE TABLE `t` (\n" + + " `a` int(11) NOT NULL,\n" + + " `b` varchar(255) DEFAULT NULL,\n" + + " PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" + + "PARTITION BY LIST (`a`)\n" + + "(PARTITION `p1` VALUES IN (100,101,102,DEFAULT))")) + case "delete reorganization": + // just to not fail :) + case "none": + // just to not fail :) + default: + require.Failf(t, "unhandled schema state '%s'", schemaState) + } + } + runMultiSchemaTest(t, createSQL, alterSQL, initFn, func(kit *testkit.TestKit) {}, loopFn) +} + +func TestMultiSchemaDropListColumnsDefaultPartition(t *testing.T) { + createSQL := `create table t (a int, b varchar(255), c varchar (255), primary key (a,b)) partition by list columns (a,b) (partition p0 values in ((1,"1"),(2,"2"),(3,"3")), partition p1 values in ((100,"100"),(101,"101"),(102,"102"),DEFAULT))` + initFn := func(tkO *testkit.TestKit) { + tkO.MustExec(`insert into t values (1,1,1),(2,2,2),(101,101,101),(102,102,102)`) + } + alterSQL := `alter table t drop partition p0` + loopFn := func(tkO, tkNO *testkit.TestKit) { + res := tkO.MustQuery(`select schema_state from information_schema.DDL_JOBS where table_name = 't' order by job_id desc limit 1`) + schemaState := res.Rows()[0][0].(string) + // TODO: Test both static and dynamic partition pruning! + switch schemaState { + case "write only": + // tkNO are unaware of the DDL + // tkO see non-readable/non-writable p0 partition, and should try to read from p1 + // in case there is something written to overlapping p1 + tkO.MustContainErrMsg(`insert into t values (1,1,1)`, "[table:1526]Table has no partition for value matching a partition being dropped, 'p0'") + tkNO.MustContainErrMsg(`insert into t values (1,1,1)`, "[kv:1062]Duplicate entry '1-1' for key 't.PRIMARY'") + tkO.MustContainErrMsg(`insert into t values (101,101,101)`, "[kv:1062]Duplicate entry '101-101' for key 't.PRIMARY'") + tkNO.MustContainErrMsg(`insert into t values (101,101,101)`, "[kv:1062]Duplicate entry '101-101' for key 't.PRIMARY'") + tkNO.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 1 1", "101 101 101", "102 102 102", "2 2 2")) + tkO.MustQuery(`select * from t`).Sort().Check(testkit.Rows("101 101 101", "102 102 102")) + case "delete only": + // tkNO see non-readable/non-writable p0 partition, and should try to read from p1 + // in case there is something written to overlapping p1 + // tkO is not aware of p0. + tkO.MustExec(`insert into t values (1,1,2)`) + tkNO.MustContainErrMsg(`insert into t values (1,1,2)`, "[table:1526]Table has no partition for value matching a partition being dropped, 'p0'") + tkO.MustContainErrMsg(`insert into t values (101,101,101)`, "[kv:1062]Duplicate entry '101-101' for key 't.PRIMARY'") + tkNO.MustContainErrMsg(`insert into t values (101,101,101)`, "[kv:1062]Duplicate entry '101-101' for key 't.PRIMARY'") + tkNO.MustQuery(`select * from t`).Sort().Check(testkit.Rows("1 1 2", "101 101 101", "102 102 102")) + // Original row should not be seen in StateWriteOnly + tkNO.MustQuery(`select * from t partition (p0)`).Sort().Check(testkit.Rows()) + tkNO.MustContainErrMsg(`select * from t partition (pNonExisting)`, "[table:1735]Unknown partition 'pnonexisting' in table 't'") + tkNO.MustQuery(`select * from t partition (p1)`).Sort().Check(testkit.Rows("1 1 2", "101 101 101", "102 102 102")) + tkNO.MustQuery(`select * from t where a < 1000`).Sort().Check(testkit.Rows("1 1 2", "101 101 101", "102 102 102")) + tkNO.MustQuery(`select * from t where a > 0`).Sort().Check(testkit.Rows("1 1 2", "101 101 101", "102 102 102")) + tkNO.MustQuery(`select * from t where a = 1`).Sort().Check(testkit.Rows("1 1 2")) + tkNO.MustQuery(`select * from t where a = 1 or a = 2 or a = 3`).Sort().Check(testkit.Rows("1 1 2")) + tkNO.MustQuery(`select * from t where a in (1,2,3) or b in ("1","2")`).Sort().Check(testkit.Rows("1 1 2")) + tkNO.MustQuery(`select * from t where a in (1,2,3)`).Sort().Check(testkit.Rows("1 1 2")) + tkNO.MustQuery(`select * from t where a < 100`).Sort().Check(testkit.Rows("1 1 2")) + + tkNO.MustQuery(`select * from t where c = "2"`).Sort().Check(testkit.Rows("1 1 2")) + tkNO.MustQuery(`select * from t where b = "1"`).Sort().Check(testkit.Rows("1 1 2")) + // TODO: Test update and delete! + // TODO: test key, hash and list partition without default partition :) + // Should we see the partition or not?!? + tkNO.MustQuery(`show create table t`).Check(testkit.Rows("" + + "t CREATE TABLE `t` (\n" + + " `a` int(11) NOT NULL,\n" + + " `b` varchar(255) NOT NULL,\n" + + " `c` varchar(255) DEFAULT NULL,\n" + + " PRIMARY KEY (`a`,`b`) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" + + "PARTITION BY LIST COLUMNS(`a`,`b`)\n" + + "(PARTITION `p0` VALUES IN ((1,'1'),(2,'2'),(3,'3')),\n" + + " PARTITION `p1` VALUES IN ((100,'100'),(101,'101'),(102,'102'),DEFAULT))")) + tkO.MustQuery(`show create table t`).Check(testkit.Rows("" + + "t CREATE TABLE `t` (\n" + + " `a` int(11) NOT NULL,\n" + + " `b` varchar(255) NOT NULL,\n" + + " `c` varchar(255) DEFAULT NULL,\n" + + " PRIMARY KEY (`a`,`b`) /*T![clustered_index] CLUSTERED */\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" + + "PARTITION BY LIST COLUMNS(`a`,`b`)\n" + + "(PARTITION `p1` VALUES IN ((100,'100'),(101,'101'),(102,'102'),DEFAULT))")) + case "delete reorganization": + // just to not fail :) + case "none": + // just to not fail :) + default: + require.Failf(t, "unhandled schema state '%s'", schemaState) + } + } + runMultiSchemaTest(t, createSQL, alterSQL, initFn, func(kit *testkit.TestKit) {}, loopFn) +} + +func runMultiSchemaTest(t *testing.T, createSQL, alterSQL string, initFn, postFn func(*testkit.TestKit), loopFn func(tO, tNO *testkit.TestKit)) { + distCtx := testkit.NewDistExecutionContextWithLease(t, 2, 15*time.Second) + store := distCtx.Store + domOwner := distCtx.GetDomain(0) + domNonOwner := distCtx.GetDomain(1) + defer func() { + domOwner.Close() + domNonOwner.Close() + store.Close() + }() + + if !domOwner.DDL().OwnerManager().IsOwner() { + domOwner, domNonOwner = domNonOwner, domOwner + } + + seOwner, err := session.CreateSessionWithDomain(store, domOwner) + require.NoError(t, err) + seNonOwner, err := session.CreateSessionWithDomain(store, domNonOwner) + require.NoError(t, err) + + tkDDLOwner := testkit.NewTestKitWithSession(t, store, seOwner) + tkDDLOwner.MustExec(`use test`) + tkDDLOwner.MustExec(`set @@global.tidb_enable_global_index = 1`) + tkDDLOwner.MustExec(`set @@session.tidb_enable_global_index = 1`) + tkO := testkit.NewTestKitWithSession(t, store, seOwner) + tkO.MustExec(`use test`) + tkNO := testkit.NewTestKitWithSession(t, store, seNonOwner) + tkNO.MustExec(`use test`) + + tkDDLOwner.MustExec(createSQL) + domOwner.Reload() + domNonOwner.Reload() + initFn(tkO) + verStart := domNonOwner.InfoSchema().SchemaMetaVersion() + hookChan := make(chan struct{}) + hookFunc := func(job *model.Job) { + hookChan <- struct{}{} + logutil.BgLogger().Info("XXXXXXXXXXX Hook now waiting", zap.String("job.State", job.State.String()), zap.String("job.SchemaStage", job.SchemaState.String())) + <-hookChan + logutil.BgLogger().Info("XXXXXXXXXXX Hook released", zap.String("job.State", job.State.String()), zap.String("job.SchemaStage", job.SchemaState.String())) + } + failpoint.EnableCall("github.com/pingcap/tidb/pkg/ddl/onJobRunAfter", hookFunc) + defer failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/onJobRunAfter") + alterChan := make(chan struct{}) + go func() { + tkDDLOwner.MustExec(alterSQL) + logutil.BgLogger().Info("XXXXXXXXXXX drop partition done!") + alterChan <- struct{}{} + }() + // Skip the first state, since we want to compare before vs after in the loop + <-hookChan + hookChan <- struct{}{} + verCurr := verStart + 1 + i := 0 + for { + // Waiting for the next State change to be done (i.e. blocking the state after) + releaseHook := true + for { + select { + case <-hookChan: + case <-alterChan: + releaseHook = false + logutil.BgLogger().Info("XXXXXXXXXXX release hook") + break + } + domOwner.Reload() + if domNonOwner.InfoSchema().SchemaMetaVersion() == domOwner.InfoSchema().SchemaMetaVersion() { + // looping over reorganize data/indexes + hookChan <- struct{}{} + continue + } + break + } + logutil.BgLogger().Info("XXXXXXXXXXX states loop", zap.Int64("verCurr", verCurr), zap.Int64("NonOwner ver", domNonOwner.InfoSchema().SchemaMetaVersion()), zap.Int64("Owner ver", domOwner.InfoSchema().SchemaMetaVersion())) + domOwner.Reload() + require.Equal(t, verCurr-1, domNonOwner.InfoSchema().SchemaMetaVersion()) + require.Equal(t, verCurr, domOwner.InfoSchema().SchemaMetaVersion()) + loopFn(tkO, tkNO) + domNonOwner.Reload() + if !releaseHook { + // Alter done! + break + } + // Continue to next state + verCurr++ + i++ + hookChan <- struct{}{} + } + logutil.BgLogger().Info("XXXXXXXXXXX states loop done") + postFn(tkO) +} diff --git a/pkg/meta/model/table.go b/pkg/meta/model/table.go index 2aa22b94eb93d..a643e07cec9b7 100644 --- a/pkg/meta/model/table.go +++ b/pkg/meta/model/table.go @@ -736,6 +736,8 @@ type PartitionInfo struct { States []PartitionState `json:"states"` Num uint64 `json:"num"` + // Indicate which DDL Action is currently on going + DDLAction ActionType `json:"ddl_action,omitempty"` // Only used during ReorganizePartition so far DDLState SchemaState `json:"ddl_state"` // Set during ALTER TABLE ... if the table id needs to change @@ -848,6 +850,8 @@ func (pi *PartitionInfo) HasTruncatingPartitionID(pid int64) bool { // ClearReorgIntermediateInfo remove intermediate information used during reorganize partition. func (pi *PartitionInfo) ClearReorgIntermediateInfo() { + pi.DDLAction = ActionNone + pi.DDLState = StateNone pi.DDLType = model.PartitionTypeNone pi.DDLExpr = "" pi.DDLColumns = nil @@ -877,6 +881,122 @@ func (pi *PartitionInfo) GetPartitionIDByName(partitionDefinitionName string) in return -1 } +// GetDefaultListPartition return the index of Definitions +// that contains the LIST Default partition otherwise it returns -1 +func (pi *PartitionInfo) GetDefaultListPartition() int { + if pi.Type != model.PartitionTypeList { + return -1 + } + defs := pi.Definitions + for i := range defs { + if len(defs[i].InValues) == 0 { + return i + } + for _, vs := range defs[i].InValues { + if len(vs) == 1 && vs[0] == "DEFAULT" { + return i + } + } + } + + return -1 +} + +// CanHaveOverlappingDroppingPartition returns true if special handling +// is needed during DDL of partitioned tables, +// where range or list with default partition can have +// overlapping partitions. +// Example: +// ... PARTITION BY RANGE (a) +// (PARTITION p0 VALUES LESS THAN (10), +// PARTITION p1 VALUES LESS THAN (20)) +// ALTER TABLE t DROP PARTITION p0; +// When p0 is gone, then p1 can have values < 10, +// so if p0 is visible for one session, while another session +// have dropped p0, a value '9' will then be in p1, instead of p0, +// i.e. an "overlapping" partition, that needs special handling. +// Same can happen for LIST partitioning, if there is a DEFAULT partition. +func (pi *PartitionInfo) CanHaveOverlappingDroppingPartition() bool { + if pi.DDLAction == ActionDropTablePartition && + pi.DDLState == StateWriteOnly { + return true + } + return false +} + +// ReplaceWithOverlappingPartitionIdx returns the overlapping partition +// if there is one and a previous error. +// Functions based on locatePartitionCommon, like GetPartitionIdxByRow +// will return the found partition, with an error, +// since it is being dropped. +// This function will correct the partition index and error if it can. +// For example of Overlapping partition, +// see CanHaveOverlappingDroppingPartition +// This function should not be used for writing, since we should block +// writes to partitions that are being dropped. +// But for read, we should replace the dropping partitions with +// the overlapping partition if it exists, so we can read new data +// from sessions one step ahead in the DDL State. +func (pi *PartitionInfo) ReplaceWithOverlappingPartitionIdx(idx int, err error) (int, error) { + if err != nil && idx >= 0 { + idx = pi.GetOverlappingDroppingPartitionIdx(idx) + if idx >= 0 { + err = nil + } + } + return idx, err +} + +// GetOverlappingDroppingPartitionIdx takes the index of Definitions +// and returns possible overlapping partition to use instead. +// Only used during DROP PARTITION! +// For RANGE, DROP PARTITION must be a consecutive range of partitions. +// For LIST, it only takes effect if there is default partition. +// returns same idx if no overlapping partition +// return -1 if the partition is being dropped, with no overlapping partition, +// like for last range partition dropped or no default list partition. +// See CanHaveOverlappingDroppingPartition() for more info about +// Overlapping dropping partition. +func (pi *PartitionInfo) GetOverlappingDroppingPartitionIdx(idx int) int { + if idx < 0 || idx >= len(pi.Definitions) { + return -1 + } + if pi.CanHaveOverlappingDroppingPartition() { + switch pi.Type { + case model.PartitionTypeRange: + for i := idx; i < len(pi.Definitions); i++ { + if pi.IsDropping(i) { + continue + } + return i + } + // Last partition is also dropped! + return -1 + case model.PartitionTypeList: + if pi.IsDropping(idx) { + defaultIdx := pi.GetDefaultListPartition() + if defaultIdx == idx { + return -1 + } + return defaultIdx + } + return idx + } + } + return idx +} + +// IsDropping returns true if the partition +// is being dropped (i.e. in DroppingDefinitions) +func (pi *PartitionInfo) IsDropping(idx int) bool { + for _, def := range pi.DroppingDefinitions { + if def.ID == pi.Definitions[idx].ID { + return true + } + } + return false +} + // SetOriginalPartitionIDs sets the order of the original partition IDs // in case it needs to be rolled back. LIST Partitioning would not know otherwise. func (pi *PartitionInfo) SetOriginalPartitionIDs() { diff --git a/pkg/planner/core/casetest/partition/testdata/integration_partition_suite_out.json b/pkg/planner/core/casetest/partition/testdata/integration_partition_suite_out.json index e70e65ab26ccd..f4fba3dc27975 100644 --- a/pkg/planner/core/casetest/partition/testdata/integration_partition_suite_out.json +++ b/pkg/planner/core/casetest/partition/testdata/integration_partition_suite_out.json @@ -1121,7 +1121,7 @@ { "SQL": "select * from tlist1 where a = 1 and b in (1,2)", "DynamicPlan": [ - "IndexReader 2.00 root partition:p0,p1 index:IndexRangeScan", + "IndexReader 2.00 root partition:all index:IndexRangeScan", "└─IndexRangeScan 2.00 cop[tikv] table:tlist1, index:PRIMARY(a, b) range:[1 1,1 1], [1 2,1 2], keep order:false, stats:pseudo" ], "StaticPlan": [ @@ -1139,7 +1139,7 @@ { "SQL": "select * from tlist1 where a = 1 and b in (1,2) order by b", "DynamicPlan": [ - "IndexReader 2.00 root partition:p0,p1 index:IndexRangeScan", + "IndexReader 2.00 root partition:all index:IndexRangeScan", "└─IndexRangeScan 2.00 cop[tikv] table:tlist1, index:PRIMARY(a, b) range:[1 1,1 1], [1 2,1 2], keep order:true, stats:pseudo" ], "StaticPlan": [ @@ -1158,7 +1158,7 @@ { "SQL": "select * from tlist1 where a = 1 and b in (1,2) order by b desc", "DynamicPlan": [ - "IndexReader 2.00 root partition:p0,p1 index:IndexRangeScan", + "IndexReader 2.00 root partition:all index:IndexRangeScan", "└─IndexRangeScan 2.00 cop[tikv] table:tlist1, index:PRIMARY(a, b) range:[1 1,1 1], [1 2,1 2], keep order:true, desc, stats:pseudo" ], "StaticPlan": [ @@ -1422,7 +1422,7 @@ { "SQL": "select * from tlist2 where a = 1 and b in (1,2)", "DynamicPlan": [ - "TableReader 2.00 root partition:p0,p1 data:TableRangeScan", + "TableReader 2.00 root partition:all data:TableRangeScan", "└─TableRangeScan 2.00 cop[tikv] table:tlist2 range:[1 1,1 1], [1 2,1 2], keep order:false, stats:pseudo" ], "StaticPlan": [ @@ -1440,7 +1440,7 @@ { "SQL": "select * from tlist2 where a = 1 and b in (1,2) order by b", "DynamicPlan": [ - "TableReader 2.00 root partition:p0,p1 data:TableRangeScan", + "TableReader 2.00 root partition:all data:TableRangeScan", "└─TableRangeScan 2.00 cop[tikv] table:tlist2 range:[1 1,1 1], [1 2,1 2], keep order:true, stats:pseudo" ], "StaticPlan": [ @@ -1460,7 +1460,7 @@ "SQL": "select * from tlist2 where a = 1 and b in (1,2) order by b desc", "DynamicPlan": [ "Sort 0.02 root test.tlist2.b:desc", - "└─TableReader 2.00 root partition:p0,p1 data:TableRangeScan", + "└─TableReader 2.00 root partition:all data:TableRangeScan", " └─TableRangeScan 2.00 cop[tikv] table:tlist2 range:[1 1,1 1], [1 2,1 2], keep order:false, stats:pseudo" ], "StaticPlan": [ @@ -1701,7 +1701,7 @@ { "SQL": "select * from tlist3 where a in (1,3) and 1 = 1", "DynamicPlan": [ - "TableReader 2.00 root partition:p0,p1 data:TableRangeScan", + "TableReader 2.00 root partition:all data:TableRangeScan", "└─TableRangeScan 2.00 cop[tikv] table:tlist3 range:[1,1], [3,3], keep order:false, stats:pseudo" ], "StaticPlan": [ diff --git a/pkg/planner/core/casetest/partition/testdata/partition_pruner_in.json b/pkg/planner/core/casetest/partition/testdata/partition_pruner_in.json index 1ece78adf46cf..aa35b16978330 100644 --- a/pkg/planner/core/casetest/partition/testdata/partition_pruner_in.json +++ b/pkg/planner/core/casetest/partition/testdata/partition_pruner_in.json @@ -66,7 +66,7 @@ }, { "SQL": "select * from t1 where a in (1,2,3) or b in (4,5,6)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a in (1,2,3) and b in (4,5,6)", @@ -86,7 +86,7 @@ }, { "SQL": "select * from t1 where ( a=1 and b=1) or (a=6 and b=6)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a = 100 and b = 100", @@ -130,7 +130,7 @@ }, { "SQL": "select * from t1 where a = 1 or (a = 10 and b is null)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a = 8 or (a = 10 and b is null)", @@ -162,7 +162,7 @@ }, { "SQL": "select * from t1 where a = 100 or b in (1,6)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a = 100 or b in (100,200)", @@ -170,15 +170,15 @@ }, { "SQL": "select * from t1 where a in (1,6) or b in (1,2) or (a=3 and b =3)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a in (1,6)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a in (1,6) or (a=3 and b =3)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a in (1,6) and (a=3 and b =3)", @@ -218,11 +218,11 @@ }, { "SQL": "select * from t1 where t1.a in (select b from t2 where a in (1,2)) order by a", - "Pruner": "t1: p0,p1; t2: p0" + "Pruner": "t1: all; t2: p0" }, { "SQL": "select * from t1 where t1.a in (select b from t1 where a in (1,2)) order by a", - "Pruner": "t1: p0; t1: p0,p1" + "Pruner": "t1: all; t1: p0" }, { "SQL": "select * from t1 left join t2 on t1.id = t2.id where (t1.a=1 or t1.a = 3) and t2.a in (6,7,8)", @@ -238,15 +238,15 @@ }, { "SQL": "select count(*) from t1 join t2 on t1.b = t2.b where t1.a in (1,2) and t2.a in (1,6) and t1.b in (1,6)", - "Pruner": "t1: p0; t2: p0,p1" + "Pruner": "t1: p0; t2: all" }, { "SQL": "select /*+ INL_JOIN(t2,t1) */ count(*) from t2 join t1 on t2.b = t1.b where t2.a in (1,2) and t1.a in (1,6) and t1.b in (1,6)", - "Pruner": "t1: p0,p1; t2: p0" + "Pruner": "t1: all; t2: p0" }, { "SQL": "select /*+ INL_HASH_JOIN(t1,t2) */ count(*) from t2 join t1 on t2.b = t1.b where t2.a in (1,2) and t1.a in (1,6) and t1.b in (6,1)", - "Pruner": "t1: p0,p1; t2: p0" + "Pruner": "t1: all; t2: p0" }, { "SQL": "select /*+ INL_HASH_JOIN(t1,t2) */ count(*) from t2 join t1 on t2.b = t1.b where t2.a in (1,2) and t1.a in (1,6) and t1.b in (100,9,6)", @@ -254,7 +254,7 @@ }, { "SQL": "select /*+ INL_HASH_JOIN(t1,t2) */ count(*) from t2 join t1 on t2.b = t1.b where t2.a in (1,2) and t1.a in (1,6) and t1.b in (100,9,6,1)", - "Pruner": "t1: p0,p1; t2: p0" + "Pruner": "t1: all; t2: p0" }, { "SQL": "select * from t1 where a in (1,2,3) union select * from t1 where b in (6,7,8) order by a", @@ -275,7 +275,7 @@ }, { "SQL": "select * from t1 where a < 3 or b > 4", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a < 3 and b > 4", @@ -295,11 +295,11 @@ }, { "SQL": "select * from t1 where (a<=1 and b<=1) or (a >=6 and b>=6)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a <= 100 and b <= 100", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 join t2 on t1.id = t2.id where (t1.a <= 3 and (t1.b >= 3 and t1.b <= 5)) and (t2.a >= 6 and t2.a <= 8) and t2.b>=7 and t2.id>=7", @@ -331,11 +331,11 @@ }, { "SQL": "select * from t1 where a <= 1 or (a <= 10 and b is null)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a <= 8 or b <= 9", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a <= 3 and false", @@ -363,7 +363,7 @@ }, { "SQL": "select * from t1 where a = 100 or b >= 1 and b <= 6", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a = 100 or (b >= 100 and b <= 200)", @@ -371,15 +371,15 @@ }, { "SQL": "select * from t1 where (a >= 1 and a <= 6) or (b >= 1 and b <= 2) or (a<=3 and b <=3)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a >= 1 and a <= 6", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where (a >= 1 and a <= 6) or (a>=3 and b >=3)", - "Pruner": "t1: p0,p1" + "Pruner": "t1: all" }, { "SQL": "select * from t1 where a in (1,6) and (a=3 and b =3)", @@ -411,19 +411,19 @@ }, { "SQL": "select * from t1 join t2 on t1.id = t2.id where (t1.a<=1 or t1.a <= 3) and (t2.a <= 6 and t2.b <= 6)", - "Pruner": "t1: p0; t2: p0,p1" + "Pruner": "t1: p0; t2: all" }, { "SQL": "select * from t1 join t1 as t2 on t1.id = t2.id where (t1.a<=1 or t1.a <= 3) and (t2.a <= 6 and t2.b <= 6)", - "Pruner": "t1: p0; t2: p0,p1" + "Pruner": "t1: p0; t2: all" }, { "SQL": "select * from t1 where t1.a in (select b from t2 where a BETWEEN 1 AND 2) order by a", - "Pruner": "t1: p0,p1; t2: p0" + "Pruner": "t1: all; t2: p0" }, { "SQL": "select * from t1 where t1.a in (select b from t1 where a BETWEEN 1 AND 2) order by a", - "Pruner": "t1: p0; t1: p0,p1" + "Pruner": "t1: all; t1: p0" }, { "SQL": "select * from t1 left join t2 on t1.id = t2.id where (t1.a<=1 or t1.a <= 3) and t2.a BETWEEN 6 AND 8", @@ -439,11 +439,11 @@ }, { "SQL": "select count(*) from t1 join t2 on t1.b = t2.b where t1.a BETWEEN 1 AND 2 and t2.a BETWEEN 1 AND 6 and t1.b BETWEEN 1 AND 6", - "Pruner": "t1: p0; t2: p0,p1" + "Pruner": "t1: p0; t2: all" }, { "SQL": "select /*+ INL_JOIN(t2,t1) */ count(*) from t2 join t1 on t2.b = t1.b where t2.a BETWEEN 1 AND 2 and t1.a BETWEEN 1 AND 6 and t1.b BETWEEN 1 AND 6", - "Pruner": "t1: p0,p1; t2: p0" + "Pruner": "t1: all; t2: p0" } ] } diff --git a/pkg/planner/core/casetest/partition/testdata/partition_pruner_out.json b/pkg/planner/core/casetest/partition/testdata/partition_pruner_out.json index 035f909ec3383..6e154007ec9d3 100644 --- a/pkg/planner/core/casetest/partition/testdata/partition_pruner_out.json +++ b/pkg/planner/core/casetest/partition/testdata/partition_pruner_out.json @@ -362,12 +362,12 @@ "6 6 6" ], "Plan": [ - "TableReader 59.91 root partition:p0,p1 data:Selection", + "TableReader 59.91 root partition:all data:Selection", "└─Selection 59.91 cop[tikv] or(in(test_partition.t1.a, 1, 2, 3), in(test_partition.t1.b, 4, 5, 6))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 59.91 root partition:p0,p1 index:Selection", + "IndexReader 59.91 root partition:all index:Selection", "└─Selection 59.91 cop[tikv] or(in(test_partition_1.t1.a, 1, 2, 3), in(test_partition_1.t1.b, 4, 5, 6))", " └─IndexFullScan 10000.00 cop[tikv] table:t1, index:a(a, b, id) keep order:false, stats:pseudo" ] @@ -442,12 +442,12 @@ "6 6 6" ], "Plan": [ - "TableReader 0.02 root partition:p0,p1 data:Selection", + "TableReader 0.02 root partition:all data:Selection", "└─Selection 0.02 cop[tikv] or(and(eq(test_partition.t1.a, 1), eq(test_partition.t1.b, 1)), and(eq(test_partition.t1.a, 6), eq(test_partition.t1.b, 6)))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 0.20 root partition:p0,p1 index:IndexRangeScan", + "IndexReader 0.20 root partition:all index:IndexRangeScan", "└─IndexRangeScan 0.20 cop[tikv] table:t1, index:a(a, b, id) range:[1 1,1 1], [6 6,6 6], keep order:false, stats:pseudo" ] }, @@ -631,12 +631,12 @@ " 10 " ], "Plan": [ - "TableReader 10.01 root partition:p0,p1 data:Selection", + "TableReader 10.01 root partition:all data:Selection", "└─Selection 10.01 cop[tikv] or(eq(test_partition.t1.a, 1), and(eq(test_partition.t1.a, 10), isnull(test_partition.t1.b)))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 10.10 root partition:p0,p1 index:IndexRangeScan", + "IndexReader 10.10 root partition:all index:IndexRangeScan", "└─IndexRangeScan 10.10 cop[tikv] table:t1, index:a(a, b, id) range:[1,1], [10 NULL,10 NULL], keep order:false, stats:pseudo" ] }, @@ -763,12 +763,12 @@ "6 6 6" ], "Plan": [ - "TableReader 29.98 root partition:p0,p1 data:Selection", + "TableReader 29.98 root partition:all data:Selection", "└─Selection 29.98 cop[tikv] or(eq(test_partition.t1.a, 100), in(test_partition.t1.b, 1, 6))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 29.98 root partition:p0,p1 index:Selection", + "IndexReader 29.98 root partition:all index:Selection", "└─Selection 29.98 cop[tikv] or(eq(test_partition_1.t1.a, 100), in(test_partition_1.t1.b, 1, 6))", " └─IndexFullScan 10000.00 cop[tikv] table:t1, index:a(a, b, id) keep order:false, stats:pseudo" ] @@ -796,12 +796,12 @@ "6 6 6" ], "Plan": [ - "TableReader 39.97 root partition:p0,p1 data:Selection", + "TableReader 39.97 root partition:all data:Selection", "└─Selection 39.97 cop[tikv] or(in(test_partition.t1.a, 1, 6), or(in(test_partition.t1.b, 1, 2), and(eq(test_partition.t1.a, 3), eq(test_partition.t1.b, 3))))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 40.06 root partition:p0,p1 index:Selection", + "IndexReader 40.06 root partition:all index:Selection", "└─Selection 40.06 cop[tikv] or(in(test_partition_1.t1.a, 1, 6), or(in(test_partition_1.t1.b, 1, 2), and(eq(test_partition_1.t1.a, 3), eq(test_partition_1.t1.b, 3))))", " └─IndexFullScan 10000.00 cop[tikv] table:t1, index:a(a, b, id) keep order:false, stats:pseudo" ] @@ -813,12 +813,12 @@ "6 6 6" ], "Plan": [ - "TableReader 20.00 root partition:p0,p1 data:Selection", + "TableReader 20.00 root partition:all data:Selection", "└─Selection 20.00 cop[tikv] in(test_partition.t1.a, 1, 6)", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 20.00 root partition:p0,p1 index:IndexRangeScan", + "IndexReader 20.00 root partition:all index:IndexRangeScan", "└─IndexRangeScan 20.00 cop[tikv] table:t1, index:a(a, b, id) range:[1,1], [6,6], keep order:false, stats:pseudo" ] }, @@ -830,12 +830,12 @@ "6 6 6" ], "Plan": [ - "TableReader 20.01 root partition:p0,p1 data:Selection", + "TableReader 20.01 root partition:all data:Selection", "└─Selection 20.01 cop[tikv] or(in(test_partition.t1.a, 1, 6), and(eq(test_partition.t1.a, 3), eq(test_partition.t1.b, 3)))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 20.10 root partition:p0,p1 index:IndexRangeScan", + "IndexReader 20.10 root partition:all index:IndexRangeScan", "└─IndexRangeScan 20.10 cop[tikv] table:t1, index:a(a, b, id) range:[1,1], [3 3,3 3], [6,6], keep order:false, stats:pseudo" ] }, @@ -998,7 +998,7 @@ " │ └─HashAgg 15.98 cop[tikv] group by:test_partition.t2.b, ", " │ └─Selection 19.98 cop[tikv] in(test_partition.t2.a, 1, 2), not(isnull(test_partition.t2.b))", " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root partition:p0,p1 data:Selection", + " └─TableReader(Probe) 9990.00 root partition:all data:Selection", " └─Selection 9990.00 cop[tikv] not(isnull(test_partition.t1.a))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], @@ -1009,7 +1009,7 @@ " │ └─IndexReader 159.84 root partition:p0 index:HashAgg", " │ └─HashAgg 159.84 cop[tikv] group by:test_partition_1.t2.b, ", " │ └─IndexRangeScan 199.80 cop[tikv] table:t2, index:a(a, b, id) range:[1 -inf,1 +inf], [2 -inf,2 +inf], keep order:false, stats:pseudo", - " └─IndexReader(Probe) 199.80 root partition:p0,p1 index:Selection", + " └─IndexReader(Probe) 199.80 root partition:all index:Selection", " └─Selection 199.80 cop[tikv] not(isnull(test_partition_1.t1.a))", " └─IndexRangeScan 200.00 cop[tikv] table:t1, index:a(a, b, id) range: decided by [eq(test_partition_1.t1.a, test_partition_1.t2.b)], keep order:false, stats:pseudo" ] @@ -1028,7 +1028,7 @@ " │ └─HashAgg 15.98 cop[tikv] group by:test_partition.t1.b, ", " │ └─Selection 19.98 cop[tikv] in(test_partition.t1.a, 1, 2), not(isnull(test_partition.t1.b))", " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root partition:p0,p1 data:Selection", + " └─TableReader(Probe) 9990.00 root partition:all data:Selection", " └─Selection 9990.00 cop[tikv] not(isnull(test_partition.t1.a))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], @@ -1039,7 +1039,7 @@ " │ └─IndexReader 159.84 root partition:p0 index:HashAgg", " │ └─HashAgg 159.84 cop[tikv] group by:test_partition_1.t1.b, ", " │ └─IndexRangeScan 199.80 cop[tikv] table:t1, index:a(a, b, id) range:[1 -inf,1 +inf], [2 -inf,2 +inf], keep order:false, stats:pseudo", - " └─IndexReader(Probe) 199.80 root partition:p0,p1 index:Selection", + " └─IndexReader(Probe) 199.80 root partition:all index:Selection", " └─Selection 199.80 cop[tikv] not(isnull(test_partition_1.t1.a))", " └─IndexRangeScan 200.00 cop[tikv] table:t1, index:a(a, b, id) range: decided by [eq(test_partition_1.t1.a, test_partition_1.t1.b)], keep order:false, stats:pseudo" ] @@ -1123,7 +1123,7 @@ "Plan": [ "StreamAgg 1.00 root funcs:count(1)->Column#9", "└─HashJoin 0.00 root inner join, equal:[eq(test_partition.t1.b, test_partition.t2.b)]", - " ├─TableReader(Build) 0.04 root partition:p0,p1 data:Selection", + " ├─TableReader(Build) 0.04 root partition:all data:Selection", " │ └─Selection 0.04 cop[tikv] in(test_partition.t2.a, 1, 6), in(test_partition.t2.b, 1, 6), not(isnull(test_partition.t2.b))", " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", " └─TableReader(Probe) 0.04 root partition:p0 data:Selection", @@ -1133,7 +1133,7 @@ "IndexPlan": [ "StreamAgg 1.00 root funcs:count(1)->Column#9", "└─HashJoin 0.10 root inner join, equal:[eq(test_partition_1.t1.b, test_partition_1.t2.b)]", - " ├─IndexReader(Build) 0.40 root partition:p0,p1 index:Selection", + " ├─IndexReader(Build) 0.40 root partition:all index:Selection", " │ └─Selection 0.40 cop[tikv] not(isnull(test_partition_1.t2.b))", " │ └─IndexRangeScan 0.40 cop[tikv] table:t2, index:a(a, b, id) range:[1 1,1 1], [1 6,1 6], [6 1,6 1], [6 6,6 6], keep order:false, stats:pseudo", " └─IndexReader(Probe) 0.40 root partition:p0 index:Selection", @@ -1149,7 +1149,7 @@ "Plan": [ "StreamAgg 1.00 root funcs:count(1)->Column#9", "└─HashJoin 0.00 root inner join, equal:[eq(test_partition.t2.b, test_partition.t1.b)]", - " ├─TableReader(Build) 0.04 root partition:p0,p1 data:Selection", + " ├─TableReader(Build) 0.04 root partition:all data:Selection", " │ └─Selection 0.04 cop[tikv] in(test_partition.t1.a, 1, 6), in(test_partition.t1.b, 1, 6), not(isnull(test_partition.t1.b))", " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", " └─TableReader(Probe) 0.04 root partition:p0 data:Selection", @@ -1162,7 +1162,7 @@ " ├─IndexReader(Build) 0.40 root partition:p0 index:Selection", " │ └─Selection 0.40 cop[tikv] not(isnull(test_partition_1.t2.b))", " │ └─IndexRangeScan 0.40 cop[tikv] table:t2, index:a(a, b, id) range:[1 1,1 1], [1 6,1 6], [2 1,2 1], [2 6,2 6], keep order:false, stats:pseudo", - " └─IndexReader(Probe) 0.13 root partition:p0,p1 index:Selection", + " └─IndexReader(Probe) 0.13 root partition:all index:Selection", " └─Selection 0.13 cop[tikv] in(test_partition_1.t1.b, 1, 6), not(isnull(test_partition_1.t1.b))", " └─IndexRangeScan 63.94 cop[tikv] table:t1, index:a(a, b, id) range: decided by [eq(test_partition_1.t1.b, test_partition_1.t2.b) in(test_partition_1.t1.a, 1, 6)], keep order:false, stats:pseudo" ] @@ -1175,7 +1175,7 @@ "Plan": [ "StreamAgg 1.00 root funcs:count(1)->Column#9", "└─HashJoin 0.00 root inner join, equal:[eq(test_partition.t2.b, test_partition.t1.b)]", - " ├─TableReader(Build) 0.04 root partition:p0,p1 data:Selection", + " ├─TableReader(Build) 0.04 root partition:all data:Selection", " │ └─Selection 0.04 cop[tikv] in(test_partition.t1.a, 1, 6), in(test_partition.t1.b, 6, 1), not(isnull(test_partition.t1.b))", " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", " └─TableReader(Probe) 0.04 root partition:p0 data:Selection", @@ -1188,7 +1188,7 @@ " ├─IndexReader(Build) 0.40 root partition:p0 index:Selection", " │ └─Selection 0.40 cop[tikv] not(isnull(test_partition_1.t2.b))", " │ └─IndexRangeScan 0.40 cop[tikv] table:t2, index:a(a, b, id) range:[1 1,1 1], [1 6,1 6], [2 1,2 1], [2 6,2 6], keep order:false, stats:pseudo", - " └─IndexReader(Probe) 0.13 root partition:p0,p1 index:Selection", + " └─IndexReader(Probe) 0.13 root partition:all index:Selection", " └─Selection 0.13 cop[tikv] in(test_partition_1.t1.b, 6, 1), not(isnull(test_partition_1.t1.b))", " └─IndexRangeScan 63.94 cop[tikv] table:t1, index:a(a, b, id) range: decided by [eq(test_partition_1.t1.b, test_partition_1.t2.b) in(test_partition_1.t1.a, 1, 6)], keep order:false, stats:pseudo" ] @@ -1227,7 +1227,7 @@ "Plan": [ "StreamAgg 1.00 root funcs:count(1)->Column#9", "└─HashJoin 0.01 root inner join, equal:[eq(test_partition.t2.b, test_partition.t1.b)]", - " ├─TableReader(Build) 0.08 root partition:p0,p1 data:Selection", + " ├─TableReader(Build) 0.08 root partition:all data:Selection", " │ └─Selection 0.08 cop[tikv] in(test_partition.t1.a, 1, 6), in(test_partition.t1.b, 100, 9, 6, 1), not(isnull(test_partition.t1.b))", " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", " └─TableReader(Probe) 0.08 root partition:p0 data:Selection", @@ -1240,7 +1240,7 @@ " ├─IndexReader(Build) 0.80 root partition:p0 index:Selection", " │ └─Selection 0.80 cop[tikv] not(isnull(test_partition_1.t2.b))", " │ └─IndexRangeScan 0.80 cop[tikv] table:t2, index:a(a, b, id) range:[1 1,1 1], [1 6,1 6], [1 9,1 9], [1 100,1 100], [2 1,2 1], [2 6,2 6], [2 9,2 9], [2 100,2 100], keep order:false, stats:pseudo", - " └─IndexReader(Probe) 0.51 root partition:p0,p1 index:Selection", + " └─IndexReader(Probe) 0.51 root partition:all index:Selection", " └─Selection 0.51 cop[tikv] in(test_partition_1.t1.b, 100, 9, 6, 1), not(isnull(test_partition_1.t1.b))", " └─IndexRangeScan 127.87 cop[tikv] table:t1, index:a(a, b, id) range: decided by [eq(test_partition_1.t1.b, test_partition_1.t2.b) in(test_partition_1.t1.a, 1, 6)], keep order:false, stats:pseudo" ] @@ -1340,12 +1340,12 @@ "9 9 9" ], "Plan": [ - "TableReader 5548.89 root partition:p0,p1 data:Selection", + "TableReader 5548.89 root partition:all data:Selection", "└─Selection 5548.89 cop[tikv] or(lt(test_partition.t1.a, 3), gt(test_partition.t1.b, 4))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 5548.89 root partition:p0,p1 index:Selection", + "IndexReader 5548.89 root partition:all index:Selection", "└─Selection 5548.89 cop[tikv] or(lt(test_partition_1.t1.a, 3), gt(test_partition_1.t1.b, 4))", " └─IndexFullScan 10000.00 cop[tikv] table:t1, index:a(a, b, id) keep order:false, stats:pseudo" ] @@ -1424,12 +1424,12 @@ "9 9 9" ], "Plan": [ - "TableReader 2092.85 root partition:p0,p1 data:Selection", + "TableReader 2092.85 root partition:all data:Selection", "└─Selection 2092.85 cop[tikv] or(and(le(test_partition.t1.a, 1), le(test_partition.t1.b, 1)), and(ge(test_partition.t1.a, 6), ge(test_partition.t1.b, 6)))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 5325.33 root partition:p0,p1 index:Selection", + "IndexReader 5325.33 root partition:all index:Selection", "└─Selection 5325.33 cop[tikv] or(and(le(test_partition_1.t1.a, 1), le(test_partition_1.t1.b, 1)), and(ge(test_partition_1.t1.a, 6), ge(test_partition_1.t1.b, 6)))", " └─IndexRangeScan 6656.67 cop[tikv] table:t1, index:a(a, b, id) range:[-inf,1], [6,+inf], keep order:false, stats:pseudo" ] @@ -1449,12 +1449,12 @@ "9 9 9" ], "Plan": [ - "TableReader 1104.45 root partition:p0,p1 data:Selection", + "TableReader 1104.45 root partition:all data:Selection", "└─Selection 1104.45 cop[tikv] le(test_partition.t1.a, 100), le(test_partition.t1.b, 100)", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 1104.45 root partition:p0,p1 index:Selection", + "IndexReader 1104.45 root partition:all index:Selection", "└─Selection 1104.45 cop[tikv] le(test_partition_1.t1.b, 100)", " └─IndexRangeScan 3323.33 cop[tikv] table:t1, index:a(a, b, id) range:[-inf,100], keep order:false, stats:pseudo" ] @@ -1603,12 +1603,12 @@ " 10 " ], "Plan": [ - "TableReader 3325.55 root partition:p0,p1 data:Selection", + "TableReader 3325.55 root partition:all data:Selection", "└─Selection 3325.55 cop[tikv] or(le(test_partition.t1.a, 1), and(le(test_partition.t1.a, 10), isnull(test_partition.t1.b)))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 2658.67 root partition:p0,p1 index:Selection", + "IndexReader 2658.67 root partition:all index:Selection", "└─Selection 2658.67 cop[tikv] or(le(test_partition_1.t1.a, 1), and(le(test_partition_1.t1.a, 10), isnull(test_partition_1.t1.b)))", " └─IndexRangeScan 3323.33 cop[tikv] table:t1, index:a(a, b, id) range:[-inf,10], keep order:false, stats:pseudo" ] @@ -1627,12 +1627,12 @@ "9 9 9" ], "Plan": [ - "TableReader 5542.21 root partition:p0,p1 data:Selection", + "TableReader 5542.21 root partition:all data:Selection", "└─Selection 5542.21 cop[tikv] or(le(test_partition.t1.a, 8), le(test_partition.t1.b, 9))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 5542.21 root partition:p0,p1 index:Selection", + "IndexReader 5542.21 root partition:all index:Selection", "└─Selection 5542.21 cop[tikv] or(le(test_partition_1.t1.a, 8), le(test_partition_1.t1.b, 9))", " └─IndexFullScan 10000.00 cop[tikv] table:t1, index:a(a, b, id) keep order:false, stats:pseudo" ] @@ -1752,12 +1752,12 @@ "6 6 6" ], "Plan": [ - "TableReader 259.75 root partition:p0,p1 data:Selection", + "TableReader 259.75 root partition:all data:Selection", "└─Selection 259.75 cop[tikv] or(eq(test_partition.t1.a, 100), and(ge(test_partition.t1.b, 1), le(test_partition.t1.b, 6)))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 259.75 root partition:p0,p1 index:Selection", + "IndexReader 259.75 root partition:all index:Selection", "└─Selection 259.75 cop[tikv] or(eq(test_partition_1.t1.a, 100), and(ge(test_partition_1.t1.b, 1), le(test_partition_1.t1.b, 6)))", " └─IndexFullScan 10000.00 cop[tikv] table:t1, index:a(a, b, id) keep order:false, stats:pseudo" ] @@ -1787,12 +1787,12 @@ "6 6 6" ], "Plan": [ - "TableReader 1543.67 root partition:p0,p1 data:Selection", + "TableReader 1543.67 root partition:all data:Selection", "└─Selection 1543.67 cop[tikv] or(and(ge(test_partition.t1.a, 1), le(test_partition.t1.a, 6)), or(and(ge(test_partition.t1.b, 1), le(test_partition.t1.b, 2)), and(le(test_partition.t1.a, 3), le(test_partition.t1.b, 3))))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 1543.67 root partition:p0,p1 index:Selection", + "IndexReader 1543.67 root partition:all index:Selection", "└─Selection 1543.67 cop[tikv] or(and(ge(test_partition_1.t1.a, 1), le(test_partition_1.t1.a, 6)), or(and(ge(test_partition_1.t1.b, 1), le(test_partition_1.t1.b, 2)), and(le(test_partition_1.t1.a, 3), le(test_partition_1.t1.b, 3))))", " └─IndexFullScan 10000.00 cop[tikv] table:t1, index:a(a, b, id) keep order:false, stats:pseudo" ] @@ -1808,12 +1808,12 @@ "6 6 6" ], "Plan": [ - "TableReader 250.00 root partition:p0,p1 data:Selection", + "TableReader 250.00 root partition:all data:Selection", "└─Selection 250.00 cop[tikv] ge(test_partition.t1.a, 1), le(test_partition.t1.a, 6)", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 250.00 root partition:p0,p1 index:IndexRangeScan", + "IndexReader 250.00 root partition:all index:IndexRangeScan", "└─IndexRangeScan 250.00 cop[tikv] table:t1, index:a(a, b, id) range:[1,6], keep order:false, stats:pseudo" ] }, @@ -1832,12 +1832,12 @@ "9 9 9" ], "Plan": [ - "TableReader 1333.33 root partition:p0,p1 data:Selection", + "TableReader 1333.33 root partition:all data:Selection", "└─Selection 1333.33 cop[tikv] or(and(ge(test_partition.t1.a, 1), le(test_partition.t1.a, 6)), and(ge(test_partition.t1.a, 3), ge(test_partition.t1.b, 3)))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], "IndexPlan": [ - "IndexReader 2666.67 root partition:p0,p1 index:Selection", + "IndexReader 2666.67 root partition:all index:Selection", "└─Selection 2666.67 cop[tikv] or(and(ge(test_partition_1.t1.a, 1), le(test_partition_1.t1.a, 6)), and(ge(test_partition_1.t1.a, 3), ge(test_partition_1.t1.b, 3)))", " └─IndexRangeScan 3333.33 cop[tikv] table:t1, index:a(a, b, id) range:[1,+inf], keep order:false, stats:pseudo" ] @@ -1956,7 +1956,7 @@ "Plan": [ "Projection 1379.19 root test_partition.t1.id, test_partition.t1.a, test_partition.t1.b, test_partition.t2.id, test_partition.t2.a, test_partition.t2.b", "└─HashJoin 1379.19 root inner join, equal:[eq(test_partition.t2.id, test_partition.t1.id)]", - " ├─TableReader(Build) 1103.35 root partition:p0,p1 data:Selection", + " ├─TableReader(Build) 1103.35 root partition:all data:Selection", " │ └─Selection 1103.35 cop[tikv] le(test_partition.t2.a, 6), le(test_partition.t2.b, 6), not(isnull(test_partition.t2.id))", " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", " └─TableReader(Probe) 3320.01 root partition:p0 data:Selection", @@ -1966,7 +1966,7 @@ "IndexPlan": [ "Projection 1379.19 root test_partition_1.t1.id, test_partition_1.t1.a, test_partition_1.t1.b, test_partition_1.t2.id, test_partition_1.t2.a, test_partition_1.t2.b", "└─HashJoin 1379.19 root inner join, equal:[eq(test_partition_1.t2.id, test_partition_1.t1.id)]", - " ├─IndexReader(Build) 1103.35 root partition:p0,p1 index:Selection", + " ├─IndexReader(Build) 1103.35 root partition:all index:Selection", " │ └─Selection 1103.35 cop[tikv] le(test_partition_1.t2.b, 6), not(isnull(test_partition_1.t2.id))", " │ └─IndexRangeScan 3323.33 cop[tikv] table:t2, index:a(a, b, id) range:[-inf,6], keep order:false, stats:pseudo", " └─IndexReader(Probe) 3320.01 root partition:p0 index:Selection", @@ -1984,7 +1984,7 @@ "Plan": [ "Projection 1379.19 root test_partition.t1.id, test_partition.t1.a, test_partition.t1.b, test_partition.t1.id, test_partition.t1.a, test_partition.t1.b", "└─HashJoin 1379.19 root inner join, equal:[eq(test_partition.t1.id, test_partition.t1.id)]", - " ├─TableReader(Build) 1103.35 root partition:p0,p1 data:Selection", + " ├─TableReader(Build) 1103.35 root partition:all data:Selection", " │ └─Selection 1103.35 cop[tikv] le(test_partition.t1.a, 6), le(test_partition.t1.b, 6), not(isnull(test_partition.t1.id))", " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", " └─TableReader(Probe) 3320.01 root partition:p0 data:Selection", @@ -1994,7 +1994,7 @@ "IndexPlan": [ "Projection 1379.19 root test_partition_1.t1.id, test_partition_1.t1.a, test_partition_1.t1.b, test_partition_1.t1.id, test_partition_1.t1.a, test_partition_1.t1.b", "└─HashJoin 1379.19 root inner join, equal:[eq(test_partition_1.t1.id, test_partition_1.t1.id)]", - " ├─IndexReader(Build) 1103.35 root partition:p0,p1 index:Selection", + " ├─IndexReader(Build) 1103.35 root partition:all index:Selection", " │ └─Selection 1103.35 cop[tikv] le(test_partition_1.t1.b, 6), not(isnull(test_partition_1.t1.id))", " │ └─IndexRangeScan 3323.33 cop[tikv] table:t2, index:a(a, b, id) range:[-inf,6], keep order:false, stats:pseudo", " └─IndexReader(Probe) 3320.01 root partition:p0 index:Selection", @@ -2016,7 +2016,7 @@ " │ └─HashAgg 199.80 cop[tikv] group by:test_partition.t2.b, ", " │ └─Selection 249.75 cop[tikv] ge(test_partition.t2.a, 1), le(test_partition.t2.a, 2), not(isnull(test_partition.t2.b))", " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root partition:p0,p1 data:Selection", + " └─TableReader(Probe) 9990.00 root partition:all data:Selection", " └─Selection 9990.00 cop[tikv] not(isnull(test_partition.t1.a))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], @@ -2028,7 +2028,7 @@ " │ └─HashAgg 199.80 cop[tikv] group by:test_partition_1.t2.b, ", " │ └─Selection 249.75 cop[tikv] not(isnull(test_partition_1.t2.b))", " │ └─IndexRangeScan 250.00 cop[tikv] table:t2, index:a(a, b, id) range:[1,2], keep order:false, stats:pseudo", - " └─IndexReader(Probe) 249.75 root partition:p0,p1 index:Selection", + " └─IndexReader(Probe) 249.75 root partition:all index:Selection", " └─Selection 249.75 cop[tikv] not(isnull(test_partition_1.t1.a))", " └─IndexRangeScan 250.00 cop[tikv] table:t1, index:a(a, b, id) range: decided by [eq(test_partition_1.t1.a, test_partition_1.t2.b)], keep order:false, stats:pseudo" ] @@ -2047,7 +2047,7 @@ " │ └─HashAgg 199.80 cop[tikv] group by:test_partition.t1.b, ", " │ └─Selection 249.75 cop[tikv] ge(test_partition.t1.a, 1), le(test_partition.t1.a, 2), not(isnull(test_partition.t1.b))", " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root partition:p0,p1 data:Selection", + " └─TableReader(Probe) 9990.00 root partition:all data:Selection", " └─Selection 9990.00 cop[tikv] not(isnull(test_partition.t1.a))", " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" ], @@ -2059,7 +2059,7 @@ " │ └─HashAgg 199.80 cop[tikv] group by:test_partition_1.t1.b, ", " │ └─Selection 249.75 cop[tikv] not(isnull(test_partition_1.t1.b))", " │ └─IndexRangeScan 250.00 cop[tikv] table:t1, index:a(a, b, id) range:[1,2], keep order:false, stats:pseudo", - " └─IndexReader(Probe) 249.75 root partition:p0,p1 index:Selection", + " └─IndexReader(Probe) 249.75 root partition:all index:Selection", " └─Selection 249.75 cop[tikv] not(isnull(test_partition_1.t1.a))", " └─IndexRangeScan 250.00 cop[tikv] table:t1, index:a(a, b, id) range: decided by [eq(test_partition_1.t1.a, test_partition_1.t1.b)], keep order:false, stats:pseudo" ] @@ -2149,7 +2149,7 @@ "Plan": [ "StreamAgg 1.00 root funcs:count(1)->Column#9", "└─HashJoin 7.81 root inner join, equal:[eq(test_partition.t1.b, test_partition.t2.b)]", - " ├─TableReader(Build) 6.25 root partition:p0,p1 data:Selection", + " ├─TableReader(Build) 6.25 root partition:all data:Selection", " │ └─Selection 6.25 cop[tikv] ge(test_partition.t2.a, 1), ge(test_partition.t2.b, 1), le(test_partition.t2.a, 6), le(test_partition.t2.b, 6), not(isnull(test_partition.t2.b))", " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", " └─TableReader(Probe) 6.25 root partition:p0 data:Selection", @@ -2159,7 +2159,7 @@ "IndexPlan": [ "StreamAgg 1.00 root funcs:count(1)->Column#9", "└─HashJoin 7.81 root inner join, equal:[eq(test_partition_1.t1.b, test_partition_1.t2.b)]", - " ├─IndexReader(Build) 6.25 root partition:p0,p1 index:Selection", + " ├─IndexReader(Build) 6.25 root partition:all index:Selection", " │ └─Selection 6.25 cop[tikv] ge(test_partition_1.t2.b, 1), le(test_partition_1.t2.b, 6), not(isnull(test_partition_1.t2.b))", " │ └─IndexRangeScan 250.00 cop[tikv] table:t2, index:a(a, b, id) range:[1,6], keep order:false, stats:pseudo", " └─IndexReader(Probe) 6.25 root partition:p0 index:Selection", @@ -2175,7 +2175,7 @@ "Plan": [ "StreamAgg 1.00 root funcs:count(1)->Column#9", "└─HashJoin 7.81 root inner join, equal:[eq(test_partition.t2.b, test_partition.t1.b)]", - " ├─TableReader(Build) 6.25 root partition:p0,p1 data:Selection", + " ├─TableReader(Build) 6.25 root partition:all data:Selection", " │ └─Selection 6.25 cop[tikv] ge(test_partition.t1.a, 1), ge(test_partition.t1.b, 1), le(test_partition.t1.a, 6), le(test_partition.t1.b, 6), not(isnull(test_partition.t1.b))", " │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", " └─TableReader(Probe) 6.25 root partition:p0 data:Selection", @@ -2185,7 +2185,7 @@ "IndexPlan": [ "StreamAgg 1.00 root funcs:count(1)->Column#9", "└─HashJoin 7.81 root inner join, equal:[eq(test_partition_1.t2.b, test_partition_1.t1.b)]", - " ├─IndexReader(Build) 6.25 root partition:p0,p1 index:Selection", + " ├─IndexReader(Build) 6.25 root partition:all index:Selection", " │ └─Selection 6.25 cop[tikv] ge(test_partition_1.t1.b, 1), le(test_partition_1.t1.b, 6), not(isnull(test_partition_1.t1.b))", " │ └─IndexRangeScan 250.00 cop[tikv] table:t1, index:a(a, b, id) range:[1,6], keep order:false, stats:pseudo", " └─IndexReader(Probe) 6.25 root partition:p0 index:Selection", diff --git a/pkg/planner/core/partition_prune.go b/pkg/planner/core/partition_prune.go index d1194628ab806..194c1fb2b2d4b 100644 --- a/pkg/planner/core/partition_prune.go +++ b/pkg/planner/core/partition_prune.go @@ -16,6 +16,7 @@ package core import ( "github.com/pingcap/tidb/pkg/expression" + tmodel "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/planner/core/base" "github.com/pingcap/tidb/pkg/table" @@ -38,9 +39,57 @@ func PartitionPruning(ctx base.PlanContext, tbl table.PartitionedTable, conds [] return nil, err } ret := s.convertToIntSlice(rangeOr, pi, partitionNames) + ret = handleDroppingForRange(pi, partitionNames, ret) return ret, nil case model.PartitionTypeList: return s.pruneListPartition(ctx, tbl, partitionNames, conds, columns) } return []int{FullRange}, nil } + +func handleDroppingForRange(pi *tmodel.PartitionInfo, partitionNames []model.CIStr, usedPartitions []int) []int { + if pi.CanHaveOverlappingDroppingPartition() { + if len(usedPartitions) == 1 && usedPartitions[0] == FullRange { + usedPartitions = make([]int, 0, len(pi.Definitions)) + for i := range pi.Definitions { + usedPartitions = append(usedPartitions, i) + } + } + ret := make([]int, 0, len(usedPartitions)) + for i := range usedPartitions { + idx := pi.GetOverlappingDroppingPartitionIdx(usedPartitions[i]) + if idx == -1 { + // dropped without overlapping partition, skip it + continue + } + if idx == usedPartitions[i] { + // non-dropped partition + ret = append(ret, idx) + continue + } + // partition being dropped, remove the consecutive range of dropping partitions + // and add the overlapping partition. + end := i + 1 + for ; end < len(usedPartitions) && usedPartitions[end] < idx; end++ { + continue + } + // add the overlapping partition, if not already included + if end >= len(usedPartitions) || usedPartitions[end] != idx { + // It must also match partitionNames if explicitly given + s := PartitionProcessor{} + if len(partitionNames) == 0 || s.findByName(partitionNames, pi.Definitions[idx].Name.L) { + ret = append(ret, idx) + } + } + if end < len(usedPartitions) { + ret = append(ret, usedPartitions[end:]...) + } + break + } + usedPartitions = ret + } + if len(usedPartitions) == len(pi.Definitions) { + return []int{FullRange} + } + return usedPartitions +} diff --git a/pkg/planner/core/point_get_plan.go b/pkg/planner/core/point_get_plan.go index 20b6f70422e55..a004c02265ef7 100644 --- a/pkg/planner/core/point_get_plan.go +++ b/pkg/planner/core/point_get_plan.go @@ -401,26 +401,12 @@ func (p *PointGetPlan) PrunePartitions(sctx sessionctx.Context) bool { dVal.Copy(&row[p.HandleColOffset]) } partIdx, err := pt.GetPartitionIdxByRow(sctx.GetExprCtx().GetEvalCtx(), row) - if err != nil { + partIdx, err = pt.Meta().Partition.ReplaceWithOverlappingPartitionIdx(partIdx, err) + if err != nil || !isInExplicitPartitions(pi, partIdx, p.PartitionNames) { partIdx = -1 p.PartitionIdx = &partIdx return true } - if len(p.PartitionNames) > 0 { - found := false - partName := pi.Definitions[partIdx].Name.L - for _, name := range p.PartitionNames { - if name.L == partName { - found = true - break - } - } - if !found { - partIdx = -1 - p.PartitionIdx = &partIdx - return true - } - } p.PartitionIdx = &partIdx return false } @@ -684,6 +670,7 @@ func (p *BatchPointGetPlan) getPartitionIdxs(sctx sessionctx.Context) []int { rows[i][j].Copy(&r[p.IndexInfo.Columns[j].Offset]) } pIdx, err := pTbl.GetPartitionIdxByRow(sctx.GetExprCtx().GetEvalCtx(), r) + pIdx, err = pTbl.Meta().Partition.ReplaceWithOverlappingPartitionIdx(pIdx, err) if err != nil { // Skip on any error, like: // No matching partition, overflow etc. @@ -782,6 +769,7 @@ func (p *BatchPointGetPlan) PrunePartitionsAndValues(sctx sessionctx.Context) ([ } d.Copy(&r[p.HandleColOffset]) pIdx, err := pTbl.GetPartitionIdxByRow(sctx.GetExprCtx().GetEvalCtx(), r) + pIdx, err = pi.ReplaceWithOverlappingPartitionIdx(pIdx, err) if err != nil || !isInExplicitPartitions(pi, pIdx, p.PartitionNames) || (p.SinglePartition && diff --git a/pkg/planner/core/rule_partition_processor.go b/pkg/planner/core/rule_partition_processor.go index be33e00080570..524110d683c86 100644 --- a/pkg/planner/core/rule_partition_processor.go +++ b/pkg/planner/core/rule_partition_processor.go @@ -707,14 +707,27 @@ func (l *listPartitionPruner) locateColumnPartitionsByCondition(cond expression. } } for _, location := range locations { - if len(l.partitionNames) > 0 { - for _, pg := range location { + for _, pg := range location { + idx := l.pi.GetOverlappingDroppingPartitionIdx(pg.PartIdx) + if idx == -1 { + // Skip dropping partitions + continue + } + if idx != pg.PartIdx { + pg = tables.ListPartitionGroup{ + PartIdx: idx, + // TODO: Test this!!! + // How does it work with intersection for example? + GroupIdxs: []int{-1}, // Special group! + } + } + if len(l.partitionNames) > 0 { if l.findByName(l.partitionNames, l.pi.Definitions[pg.PartIdx].Name.L) { helper.UnionPartitionGroup(pg) } + } else { + helper.UnionPartitionGroup(pg) } - } else { - helper.Union(location) } } } @@ -779,6 +792,7 @@ func (l *listPartitionPruner) findUsedListPartitions(conds []expression.Expressi return nil, err } partitionIdx := l.listPrune.LocatePartition(value, isNull) + partitionIdx = l.pi.GetOverlappingDroppingPartitionIdx(partitionIdx) if partitionIdx == -1 { continue } @@ -807,8 +821,23 @@ func (s *PartitionProcessor) findUsedListPartitions(ctx base.PlanContext, tbl ta return nil, err } if _, ok := used[FullRange]; ok { - or := partitionRangeOR{partitionRange{0, len(pi.Definitions)}} - return s.convertToIntSlice(or, pi, partitionNames), nil + ret := make([]int, 0, len(pi.Definitions)) + for i := 0; i < len(pi.Definitions); i++ { + if len(partitionNames) > 0 && !listPruner.findByName(partitionNames, pi.Definitions[i].Name.L) { + continue + } + if i != pi.GetOverlappingDroppingPartitionIdx(i) { + continue + } + ret = append(ret, i) + } + if len(ret) == len(pi.Definitions) { + return []int{FullRange}, nil + } + return ret, nil + } + if len(used) == len(pi.Definitions) { + return []int{FullRange}, nil } ret := make([]int, 0, len(used)) for k := range used { @@ -1828,33 +1857,40 @@ func (s *PartitionProcessor) makeUnionAllChildren(ds *logicalop.DataSource, pi * usedDefinition := make(map[int64]model.PartitionDefinition) for _, r := range or { for i := r.start; i < r.end; i++ { + partIdx := pi.GetOverlappingDroppingPartitionIdx(i) + if partIdx < 0 { + continue + } + // This is for `table partition (p0,p1)` syntax, only union the specified partition if has specified partitions. if len(ds.PartitionNames) != 0 { - if !s.findByName(ds.PartitionNames, pi.Definitions[i].Name.L) { + if !s.findByName(ds.PartitionNames, pi.Definitions[partIdx].Name.L) { continue } } + if _, found := usedDefinition[pi.Definitions[partIdx].ID]; found { + continue + } // Not a deep copy. newDataSource := *ds newDataSource.BaseLogicalPlan = logicalop.NewBaseLogicalPlan(ds.SCtx(), plancodec.TypeTableScan, &newDataSource, ds.QueryBlockOffset()) newDataSource.SetSchema(ds.Schema().Clone()) newDataSource.Columns = make([]*model.ColumnInfo, len(ds.Columns)) copy(newDataSource.Columns, ds.Columns) - idx := i - newDataSource.PartitionDefIdx = &idx - newDataSource.PhysicalTableID = pi.Definitions[i].ID + newDataSource.PartitionDefIdx = &partIdx + newDataSource.PhysicalTableID = pi.Definitions[partIdx].ID // There are many expression nodes in the plan tree use the original datasource // id as FromID. So we set the id of the newDataSource with the original one to // avoid traversing the whole plan tree to update the references. newDataSource.SetID(ds.ID()) - err := s.resolveOptimizeHint(&newDataSource, pi.Definitions[i].Name) - partitionNameSet.Insert(pi.Definitions[i].Name.L) + err := s.resolveOptimizeHint(&newDataSource, pi.Definitions[partIdx].Name) + partitionNameSet.Insert(pi.Definitions[partIdx].Name.L) if err != nil { return nil, err } children = append(children, &newDataSource) - usedDefinition[pi.Definitions[i].ID] = pi.Definitions[i] + usedDefinition[pi.Definitions[partIdx].ID] = pi.Definitions[partIdx] } } s.checkHintsApplicable(ds, partitionNameSet) diff --git a/pkg/table/tables/partition.go b/pkg/table/tables/partition.go index e09dee8b3541d..43944c5528b5c 100644 --- a/pkg/table/tables/partition.go +++ b/pkg/table/tables/partition.go @@ -138,6 +138,11 @@ func newPartitionedTable(tbl *TableCommon, tblInfo *model.TableInfo) (table.Part partitions[p.ID] = &t } ret.partitions = partitions + if pi.DDLAction != model.ActionReorganizePartition && + pi.DDLAction != model.ActionRemovePartitioning && + pi.DDLAction != model.ActionAlterTablePartitioning { + return ret, nil + } // In StateWriteReorganization we are using the 'old' partition definitions // and if any new change happens in DroppingDefinitions, it needs to be done // also in AddingDefinitions (with new evaluation of the new expression) @@ -1312,6 +1317,19 @@ func (t *partitionedTable) locatePartitionCommon(ctx expression.EvalContext, tp } else { idx, err = t.locateRangePartition(ctx, partitionExpr, r) } + if err != nil { + return -1, err + } + pi := t.Meta().Partition + if pi.CanHaveOverlappingDroppingPartition() { + if pi.IsDropping(idx) { + // Give an error, since it should not be written to! + // For read it can check the Overlapping partition and ignore the error. + // One should use the next non-dropping partition for range, or the default + // partition for list partitioned table with default partition, for read. + return idx, table.ErrNoPartitionForGivenValue.GenWithStackByArgs(fmt.Sprintf("matching a partition being dropped, '%s'", pi.Definitions[idx].Name.String())) + } + } case pmodel.PartitionTypeHash: // Note that only LIST and RANGE supports REORGANIZE PARTITION idx, err = t.locateHashPartition(ctx, partitionExpr, num, r) @@ -1319,11 +1337,15 @@ func (t *partitionedTable) locatePartitionCommon(ctx expression.EvalContext, tp idx, err = partitionExpr.LocateKeyPartition(num, r) case pmodel.PartitionTypeList: idx, err = partitionExpr.locateListPartition(ctx, r) + pi := t.Meta().Partition + if idx != pi.GetOverlappingDroppingPartitionIdx(idx) { + return idx, table.ErrNoPartitionForGivenValue.GenWithStackByArgs(fmt.Sprintf("matching a partition being dropped, '%s'", pi.Definitions[idx].Name.String())) + } case pmodel.PartitionTypeNone: idx = 0 } if err != nil { - return 0, errors.Trace(err) + return -1, errors.Trace(err) } return idx, nil } @@ -1331,11 +1353,7 @@ func (t *partitionedTable) locatePartitionCommon(ctx expression.EvalContext, tp func (t *partitionedTable) locatePartitionIdx(ctx expression.EvalContext, r []types.Datum) (int, error) { pi := t.Meta().GetPartitionInfo() columnsSet := len(t.meta.Partition.Columns) > 0 - idx, err := t.locatePartitionCommon(ctx, pi.Type, t.partitionExpr, pi.Num, columnsSet, r) - if err != nil { - return -1, errors.Trace(err) - } - return idx, nil + return t.locatePartitionCommon(ctx, pi.Type, t.partitionExpr, pi.Num, columnsSet, r) } func (t *partitionedTable) locatePartition(ctx expression.EvalContext, r []types.Datum) (int64, error) { diff --git a/pkg/testkit/mockstore.go b/pkg/testkit/mockstore.go index 3db132a8d161d..bb71c566d4dfc 100644 --- a/pkg/testkit/mockstore.go +++ b/pkg/testkit/mockstore.go @@ -195,6 +195,11 @@ func (d *DistExecutionContext) GetDomainCnt() int { // NewDistExecutionContext create DistExecutionContext for testing. func NewDistExecutionContext(t testing.TB, serverNum int) *DistExecutionContext { + return NewDistExecutionContextWithLease(t, serverNum, 500*time.Millisecond) +} + +// NewDistExecutionContextWithLease create DistExecutionContext for testing. +func NewDistExecutionContextWithLease(t testing.TB, serverNum int, lease time.Duration) *DistExecutionContext { store, err := mockstore.NewMockStore() require.NoError(t, err) gctuner.GlobalMemoryLimitTuner.Stop() @@ -203,7 +208,7 @@ func NewDistExecutionContext(t testing.TB, serverNum int) *DistExecutionContext var domInfo []string for i := 0; i < serverNum; i++ { - dom := bootstrap4DistExecution(t, store, 500*time.Millisecond) + dom := bootstrap4DistExecution(t, store, lease) if i != serverNum-1 { dom.SetOnClose(func() { /* don't delete the store in domain map */ }) } diff --git a/tests/integrationtest/r/ddl/db_partition.result b/tests/integrationtest/r/ddl/db_partition.result index d85be09d2b988..d873d4198e610 100644 --- a/tests/integrationtest/r/ddl/db_partition.result +++ b/tests/integrationtest/r/ddl/db_partition.result @@ -550,7 +550,7 @@ b a 4 4 explain format = 'brief' select * from t where a = 4; id estRows task access object operator info -TableReader 4.80 root partition:p0,p1,p2 data:Selection +TableReader 4.80 root partition:all data:Selection └─Selection 4.80 cop[tikv] eq(cast(ddl__db_partition.t.a, double BINARY), 4) └─TableFullScan 6.00 cop[tikv] table:t keep order:false select * from t where a = 3; @@ -558,7 +558,7 @@ b a 3 3 explain format = 'brief' select * from t where a = 3; id estRows task access object operator info -TableReader 4.80 root partition:p0,p1,p2 data:Selection +TableReader 4.80 root partition:all data:Selection └─Selection 4.80 cop[tikv] eq(cast(ddl__db_partition.t.a, double BINARY), 3) └─TableFullScan 6.00 cop[tikv] table:t keep order:false explain format = 'brief' select * from t where a = "3"; diff --git a/tests/integrationtest/r/planner/core/integration_partition.result b/tests/integrationtest/r/planner/core/integration_partition.result index b6027ce5c0b20..5860055f294a5 100644 --- a/tests/integrationtest/r/planner/core/integration_partition.result +++ b/tests/integrationtest/r/planner/core/integration_partition.result @@ -423,7 +423,7 @@ create table tcollist (a int, b int, key(a)) partition by list columns (a) (part alter table tcollist alter index a invisible; explain select a from tcollist where a>=0 and a<=5; id estRows task access object operator info -TableReader_7 250.00 root partition:p0,p1 data:Selection_6 +TableReader_7 250.00 root partition:all data:Selection_6 └─Selection_6 250.00 cop[tikv] ge(list_partition_invisible_idx.tcollist.a, 0), le(list_partition_invisible_idx.tcollist.a, 5) └─TableFullScan_5 10000.00 cop[tikv] table:tcollist keep order:false, stats:pseudo create database list_partition_cte; diff --git a/tests/integrationtest/r/planner/core/partition_pruner.result b/tests/integrationtest/r/planner/core/partition_pruner.result index a5fc628c639e4..57ce0f431d41a 100644 --- a/tests/integrationtest/r/planner/core/partition_pruner.result +++ b/tests/integrationtest/r/planner/core/partition_pruner.result @@ -3002,7 +3002,7 @@ a b 2 2 explain format='brief' select * from t where a in (1,2) and b in (1,2); id estRows task access object operator info -TableReader 3.43 root partition:p1,p2,pDef data:Selection +TableReader 3.43 root partition:all data:Selection └─Selection 3.43 cop[tikv] in(listdefaultprune.t.a, 1, 2), in(listdefaultprune.t.b, 1, 2) └─TableFullScan 7.00 cop[tikv] table:t keep order:false select * from t where a in (1) and b in (1); @@ -3033,7 +3033,7 @@ a b 2 2 explain format='brief' select * from t where a in (1,2) and b in (1,2); id estRows task access object operator info -TableReader 3.43 root partition:p1,p2,pDef data:Selection +TableReader 3.43 root partition:all data:Selection └─Selection 3.43 cop[tikv] in(listdefaultprune.t.a, 1, 2), in(listdefaultprune.t.b, 1, 2) └─TableFullScan 7.00 cop[tikv] table:t keep order:false drop table t;