Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

planner,executor: fix 'select ...(join on partition table) for update' panic #21148

Merged
merged 20 commits into from
Jun 16, 2021
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 60 additions & 33 deletions executor/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -600,6 +600,16 @@ func (b *executorBuilder) buildSelectLock(v *plannercore.PhysicalLock) Executor
tblID2Handle: v.TblID2Handle,
partitionedTable: v.PartitionedTable,
}
if len(e.partitionedTable) > 0 {
schema := v.Schema()
e.tblID2PIDColumnIndex = make(map[int64]int)
for i := 0; i < len(v.ExtraPIDInfo.Columns); i++ {
col := v.ExtraPIDInfo.Columns[i]
tblID := v.ExtraPIDInfo.TblIDs[i]
offset := schema.ColumnIndex(col)
Copy link
Contributor

@XuHuaiyu XuHuaiyu Nov 27, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Check the column.ID == model.ExtraPidColID is better

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Check the column to be ExtraPidColID can get the column index in the schema, but can't build the mapping of

table ID => partition column index in the schema

Here we take each partition column (v.ExtraPIDInfo.Columns[i]) and find its index in the schema (offset), to build the
mapping tblID(v.ExtraPIDInfo.TblIDs[i]) => offset

e.tblID2PIDColumnIndex[tblID] = offset
}
}
return e
}

Expand Down Expand Up @@ -2482,21 +2492,25 @@ func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableRea
return nil, err
}
e := &TableReaderExecutor{
baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()),
dagPB: dagReq,
startTS: startTS,
table: tbl,
keepOrder: ts.KeepOrder,
desc: ts.Desc,
columns: ts.Columns,
streaming: streaming,
corColInFilter: b.corColInDistPlan(v.TablePlans),
corColInAccess: b.corColInAccess(v.TablePlans[0]),
plans: v.TablePlans,
tablePlan: v.GetTablePlan(),
storeType: v.StoreType,
baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()),
dagPB: dagReq,
startTS: startTS,
table: tbl,
keepOrder: ts.KeepOrder,
desc: ts.Desc,
columns: ts.Columns,
streaming: streaming,
corColInFilter: b.corColInDistPlan(v.TablePlans),
corColInAccess: b.corColInAccess(v.TablePlans[0]),
plans: v.TablePlans,
tablePlan: v.GetTablePlan(),
storeType: v.StoreType,
extraPIDColumnIndex: -1,
}
e.setBatchCop(v)
if isPartition {
e.extraPIDColumnIndex = extraPIDColumnIndex(v.Schema())
}
e.buildVirtualColumnInfo()
if containsLimit(dagReq.Executors) {
e.feedback = statistics.NewQueryFeedback(0, nil, 0, ts.Desc)
Expand All @@ -2523,6 +2537,15 @@ func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableRea
return e, nil
}

func extraPIDColumnIndex(schema *expression.Schema) int {
for idx, col := range schema.Columns {
if col.ID == model.ExtraPidColID {
return idx
}
}
return -1
}

func (b *executorBuilder) buildMPPGather(v *plannercore.PhysicalTableReader) Executor {
startTs, err := b.getSnapshotTS()
if err != nil {
Expand Down Expand Up @@ -2883,26 +2906,30 @@ func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plannercore.PhysicalIn
return nil, err
}
e := &IndexLookUpExecutor{
baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()),
dagPB: indexReq,
startTS: startTS,
table: tbl,
index: is.Index,
keepOrder: is.KeepOrder,
desc: is.Desc,
tableRequest: tableReq,
columns: ts.Columns,
indexStreaming: indexStreaming,
tableStreaming: tableStreaming,
dataReaderBuilder: &dataReaderBuilder{executorBuilder: b},
corColInIdxSide: b.corColInDistPlan(v.IndexPlans),
corColInTblSide: b.corColInDistPlan(v.TablePlans),
corColInAccess: b.corColInAccess(v.IndexPlans[0]),
idxCols: is.IdxCols,
colLens: is.IdxColLens,
idxPlans: v.IndexPlans,
tblPlans: v.TablePlans,
PushedLimit: v.PushedLimit,
baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()),
dagPB: indexReq,
startTS: startTS,
table: tbl,
index: is.Index,
keepOrder: is.KeepOrder,
desc: is.Desc,
tableRequest: tableReq,
columns: ts.Columns,
indexStreaming: indexStreaming,
tableStreaming: tableStreaming,
dataReaderBuilder: &dataReaderBuilder{executorBuilder: b},
corColInIdxSide: b.corColInDistPlan(v.IndexPlans),
corColInTblSide: b.corColInDistPlan(v.TablePlans),
corColInAccess: b.corColInAccess(v.IndexPlans[0]),
idxCols: is.IdxCols,
colLens: is.IdxColLens,
idxPlans: v.IndexPlans,
tblPlans: v.TablePlans,
PushedLimit: v.PushedLimit,
extraPIDColumnIndex: -1,
}
if ok, _ := ts.IsPartition(); ok {
e.extraPIDColumnIndex = extraPIDColumnIndex(v.Schema())
}

if containsLimit(indexReq.Executors) {
Expand Down
22 changes: 13 additions & 9 deletions executor/distsql.go
Original file line number Diff line number Diff line change
Expand Up @@ -399,6 +399,9 @@ type IndexLookUpExecutor struct {
PushedLimit *plannercore.PushedDownLimit

stats *IndexLookUpRunTimeStats

// extraPIDColumnIndex is used for partition reader to add an extra partition ID column, default -1
extraPIDColumnIndex int
}

type getHandleType int8
Expand Down Expand Up @@ -600,15 +603,16 @@ func (e *IndexLookUpExecutor) startTableWorker(ctx context.Context, workCh <-cha

func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, handles []kv.Handle) (Executor, error) {
tableReaderExec := &TableReaderExecutor{
baseExecutor: newBaseExecutor(e.ctx, e.schema, e.getTableRootPlanID()),
table: e.table,
dagPB: e.tableRequest,
startTS: e.startTS,
columns: e.columns,
streaming: e.tableStreaming,
feedback: statistics.NewQueryFeedback(0, nil, 0, false),
corColInFilter: e.corColInTblSide,
plans: e.tblPlans,
baseExecutor: newBaseExecutor(e.ctx, e.schema, e.getTableRootPlanID()),
table: e.table,
dagPB: e.tableRequest,
startTS: e.startTS,
columns: e.columns,
streaming: e.tableStreaming,
feedback: statistics.NewQueryFeedback(0, nil, 0, false),
corColInFilter: e.corColInTblSide,
plans: e.tblPlans,
extraPIDColumnIndex: e.extraPIDColumnIndex,
}
tableReaderExec.buildVirtualColumnInfo()
tableReader, err := e.dataReaderBuilder.buildTableReaderFromHandles(ctx, tableReaderExec, handles, true)
Expand Down
42 changes: 16 additions & 26 deletions executor/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -878,31 +878,22 @@ type SelectLockExec struct {
Lock *ast.SelectLockInfo
keys []kv.Key

tblID2Handle map[int64][]plannercore.HandleCols
tblID2Handle map[int64][]plannercore.HandleCols

// All the partition tables in the children of this executor.
partitionedTable []table.PartitionedTable

// tblID2Table is cached to reduce cost.
tblID2Table map[int64]table.PartitionedTable
// When SelectLock work on the partition table, we need the partition ID
// instead of table ID to calculate the lock KV. In that case, partition ID is store as an
// extra column in the chunk row.
// tblID2PIDColumnIndex stores the column index in the chunk row. The children may be join
// of multiple tables, so the map strcut is used.
tiancaiamao marked this conversation as resolved.
Show resolved Hide resolved
tblID2PIDColumnIndex map[int64]int
}

// Open implements the Executor Open interface.
func (e *SelectLockExec) Open(ctx context.Context) error {
if err := e.baseExecutor.Open(ctx); err != nil {
return err
}

if len(e.tblID2Handle) > 0 && len(e.partitionedTable) > 0 {
e.tblID2Table = make(map[int64]table.PartitionedTable, len(e.partitionedTable))
for id := range e.tblID2Handle {
for _, p := range e.partitionedTable {
if id == p.Meta().ID {
e.tblID2Table[id] = p
}
}
}
}

return nil
return e.baseExecutor.Open(ctx)
}

// Next implements the Executor Next interface.
Expand All @@ -920,15 +911,14 @@ func (e *SelectLockExec) Next(ctx context.Context, req *chunk.Chunk) error {
if req.NumRows() > 0 {
iter := chunk.NewIterator4Chunk(req)
for row := iter.Begin(); row != iter.End(); row = iter.Next() {

for id, cols := range e.tblID2Handle {
physicalID := id
if pt, ok := e.tblID2Table[id]; ok {
// On a partitioned table, we have to use physical ID to encode the lock key!
p, err := pt.GetPartitionByRow(e.ctx, row.GetDatumRow(e.base().retFieldTypes))
if err != nil {
return err
}
physicalID = p.GetPhysicalID()
if len(e.partitionedTable) > 0 {
// Replace the table ID with partition ID.
// The partition ID is returned as an extra column from the table reader.
offset := e.tblID2PIDColumnIndex[id]
physicalID = row.GetInt64(offset)
}

for _, col := range cols {
Expand Down
147 changes: 147 additions & 0 deletions executor/partition_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
package executor_test

import (
"time"

. "github.com/pingcap/check"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/testkit"
Expand Down Expand Up @@ -182,3 +184,148 @@ partition p2 values less than (10))`)
tk.MustExec("insert into p values (1,3), (3,4), (5,6), (7,9)")
tk.MustQuery("select * from p use index (idx)").Check(testkit.Rows("1 3", "3 4", "5 6", "7 9"))
}

func (s *partitionTableSuite) TestIssue20028(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("set @@tidb_partition_prune_mode='static-only'")
tk.MustExec(`create table t1 (c_datetime datetime, primary key (c_datetime))
partition by range (to_days(c_datetime)) ( partition p0 values less than (to_days('2020-02-01')),
partition p1 values less than (to_days('2020-04-01')),
partition p2 values less than (to_days('2020-06-01')),
partition p3 values less than maxvalue)`)
tk.MustExec("create table t2 (c_datetime datetime, unique key(c_datetime))")
tk.MustExec("insert into t1 values ('2020-06-26 03:24:00'), ('2020-02-21 07:15:33'), ('2020-04-27 13:50:58')")
tk.MustExec("insert into t2 values ('2020-01-10 09:36:00'), ('2020-02-04 06:00:00'), ('2020-06-12 03:45:18')")
tk.MustExec("begin")
tk.MustQuery("select * from t1 join t2 on t1.c_datetime >= t2.c_datetime for update").
Sort().
Check(testkit.Rows(
"2020-02-21 07:15:33 2020-01-10 09:36:00",
"2020-02-21 07:15:33 2020-02-04 06:00:00",
"2020-04-27 13:50:58 2020-01-10 09:36:00",
"2020-04-27 13:50:58 2020-02-04 06:00:00",
"2020-06-26 03:24:00 2020-01-10 09:36:00",
"2020-06-26 03:24:00 2020-02-04 06:00:00",
"2020-06-26 03:24:00 2020-06-12 03:45:18"))
tk.MustExec("rollback")
}

func (s *partitionTableSuite) TestSelectLockOnPartitionTable(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`create table pt (id int primary key, k int, c int, index(k))
partition by range (id) (
partition p0 values less than (4),
partition p1 values less than (7),
partition p2 values less than (11))`)
tk.MustExec("insert into pt values (5, 5, 5)")
// TODO: Fix bug when @@tidb_partition_prune_mode is 'dynamic-only', pay special
// attention to index join as it supported in that mode.
tk.MustExec("set tidb_partition_prune_mode='static-only'")
tk2 := testkit.NewTestKit(c, s.store)
tk2.MustExec("use test")
tk2.MustExec("set tidb_partition_prune_mode='static-only'")

optimisticTableReader := func() {
tk.MustExec("set @@tidb_txn_mode = 'optimistic'")
tk2.MustExec("set @@tidb_txn_mode = 'optimistic'")
tk.MustExec("begin")
tk.MustQuery("select id, k from pt ignore index (k) where k = 5 for update").Check(testkit.Rows("5 5"))
tk2.MustExec("update pt set c = c + 1 where k = 5")
_, err := tk.Exec("commit")
c.Assert(err, NotNil) // Write conflict
}

optimisticIndexReader := func() {
tk.MustExec("set @@tidb_txn_mode = 'optimistic'")
tk2.MustExec("set @@tidb_txn_mode = 'optimistic'")
tk.MustExec("begin")
// This is not index reader actually.
tk.MustQuery("select k from pt where k = 5 for update").Check(testkit.Rows("5"))
tk2.MustExec("update pt set c = c + 1 where k = 5")
_, err := tk.Exec("commit")
c.Assert(err, NotNil)
}

optimisticIndexLookUp := func() {
tk.MustExec("set @@tidb_txn_mode = 'optimistic'")
tk2.MustExec("set @@tidb_txn_mode = 'optimistic'")
tk.MustExec("begin")
tk.MustQuery("select id, k from pt use index (k) where k = 5 for update").Check(testkit.Rows("5 5"))
tiancaiamao marked this conversation as resolved.
Show resolved Hide resolved
tk2.MustExec("update pt set c = c + 1 where k = 5")
_, err := tk.Exec("commit")
c.Assert(err, NotNil)
}

pessimisticTableReader := func() {
tk.MustExec("set @@tidb_txn_mode = 'pessimistic'")
tk2.MustExec("set @@tidb_txn_mode = 'pessimistic'")
tk.MustExec("begin")
tk.MustQuery("select id, k from pt ignore index (k) where k = 5 for update").Check(testkit.Rows("5 5"))
ch := make(chan int, 2)
go func() {
tk2.MustExec("update pt set c = c + 1 where k = 5")
ch <- 1
}()
time.Sleep(100 * time.Millisecond)
ch <- 2

// Check the operation in the goroutine is blocked, if not the first result in
// the channel should be 1.
c.Assert(<-ch, Equals, 2)

tk.MustExec("commit")
tk.MustQuery("select c from pt where k = 5").Check(testkit.Rows("8"))
}

pessimisticIndexReader := func() {
tk.MustExec("set @@tidb_txn_mode = 'pessimistic'")
tk2.MustExec("set @@tidb_txn_mode = 'pessimistic'")
tk.MustExec("begin")
// This is not index reader actually.
tk.MustQuery("select k from pt where k = 5 for update").Check(testkit.Rows("5"))
ch := make(chan int, 2)
go func() {
tk2.MustExec("update pt set c = c + 1 where k = 5")
ch <- 1
}()
time.Sleep(100 * time.Millisecond)
ch <- 2

// Check the operation in the goroutine is blocked,
c.Assert(<-ch, Equals, 2)

tk.MustExec("commit")
tk.MustQuery("select c from pt where k = 5").Check(testkit.Rows("9"))
}

pessimisticIndexLookUp := func() {
tk.MustExec("set @@tidb_txn_mode = 'pessimistic'")
tk2.MustExec("set @@tidb_txn_mode = 'pessimistic'")
tk.MustExec("begin")
tk.MustQuery("select id, k from pt use index (k) where k = 5 for update").Check(testkit.Rows("5 5"))
ch := make(chan int, 2)
go func() {
tk2.MustExec("update pt set c = c + 1 where k = 5")
ch <- 1
}()
time.Sleep(100 * time.Millisecond)
ch <- 2

// Check the operation in the goroutine is blocked,
c.Assert(<-ch, Equals, 2)

tk.MustExec("commit")
tk.MustQuery("select c from pt where k = 5").Check(testkit.Rows("10"))
}

testCases := []func(){
optimisticTableReader, optimisticIndexLookUp, optimisticIndexReader,
pessimisticTableReader, pessimisticIndexReader, pessimisticIndexLookUp,
}
for _, c := range testCases {
c()
}
}
Loading