From a9d7577eccd310f148ae602b278af160b843b468 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E8=B6=85?= Date: Tue, 4 Apr 2023 18:47:20 +0800 Subject: [PATCH 01/12] disttask: merge `GlobalTaskManager` and `SubTaskManager` (#42786) --- disttask/framework/dispatcher/dispatcher.go | 36 +++--- .../framework/dispatcher/dispatcher_test.go | 37 +++---- disttask/framework/framework_test.go | 10 +- disttask/framework/scheduler/interface.go | 9 +- .../framework/scheduler/interface_mock.go | 25 ++--- disttask/framework/scheduler/manager.go | 28 +++-- disttask/framework/scheduler/manager_test.go | 60 +++++----- disttask/framework/scheduler/scheduler.go | 34 +++--- .../framework/scheduler/scheduler_test.go | 6 +- disttask/framework/storage/table_test.go | 42 +++---- disttask/framework/storage/task_table.go | 103 ++++++------------ domain/domain.go | 31 ++---- 12 files changed, 180 insertions(+), 241 deletions(-) diff --git a/disttask/framework/dispatcher/dispatcher.go b/disttask/framework/dispatcher/dispatcher.go index 55c61881ee1e5..9e1d0453c4928 100644 --- a/disttask/framework/dispatcher/dispatcher.go +++ b/disttask/framework/dispatcher/dispatcher.go @@ -92,12 +92,11 @@ func (d *dispatcher) delRunningGTask(globalTaskID int64) { } type dispatcher struct { - ctx context.Context - cancel context.CancelFunc - gTaskMgr *storage.GlobalTaskManager - subTaskMgr *storage.SubTaskManager - wg tidbutil.WaitGroupWrapper - gPool *spool.Pool + ctx context.Context + cancel context.CancelFunc + taskMgr *storage.TaskManager + wg tidbutil.WaitGroupWrapper + gPool *spool.Pool runningGTasks struct { syncutil.RWMutex @@ -107,10 +106,9 @@ type dispatcher struct { } // NewDispatcher creates a dispatcher struct. -func NewDispatcher(ctx context.Context, globalTaskTable *storage.GlobalTaskManager, subtaskTable *storage.SubTaskManager) (Dispatch, error) { +func NewDispatcher(ctx context.Context, taskTable *storage.TaskManager) (Dispatch, error) { dispatcher := &dispatcher{ - gTaskMgr: globalTaskTable, - subTaskMgr: subtaskTable, + taskMgr: taskTable, detectPendingGTaskCh: make(chan *proto.Task, DefaultDispatchConcurrency), } pool, err := spool.NewPool("dispatch_pool", int32(DefaultDispatchConcurrency), util.DistTask, spool.WithBlocking(true)) @@ -156,7 +154,7 @@ func (d *dispatcher) DispatchTaskLoop() { } // TODO: Consider getting these tasks, in addition to the task being worked on.. - gTasks, err := d.gTaskMgr.GetTasksInStates(proto.TaskStatePending, proto.TaskStateRunning, proto.TaskStateReverting) + gTasks, err := d.taskMgr.GetGlobalTasksInStates(proto.TaskStatePending, proto.TaskStateRunning, proto.TaskStateReverting) if err != nil { logutil.BgLogger().Warn("get unfinished(pending, running or reverting) tasks failed", zap.Error(err)) break @@ -199,7 +197,7 @@ func (d *dispatcher) probeTask(gTask *proto.Task) (isFinished bool, subTaskErr s // TODO: Consider putting the following operations into a transaction. // TODO: Consider collect some information about the tasks. if gTask.State != proto.TaskStateReverting { - cnt, err := d.subTaskMgr.GetSubtaskInStatesCnt(gTask.ID, proto.TaskStateFailed) + cnt, err := d.taskMgr.GetSubtaskInStatesCnt(gTask.ID, proto.TaskStateFailed) if err != nil { logutil.BgLogger().Warn("check task failed", zap.Int64("task ID", gTask.ID), zap.Error(err)) return false, "" @@ -208,7 +206,7 @@ func (d *dispatcher) probeTask(gTask *proto.Task) (isFinished bool, subTaskErr s return false, proto.TaskStateFailed } - cnt, err = d.subTaskMgr.GetSubtaskInStatesCnt(gTask.ID, proto.TaskStatePending, proto.TaskStateRunning) + cnt, err = d.taskMgr.GetSubtaskInStatesCnt(gTask.ID, proto.TaskStatePending, proto.TaskStateRunning) if err != nil { logutil.BgLogger().Warn("check task failed", zap.Int64("task ID", gTask.ID), zap.Error(err)) return false, "" @@ -220,7 +218,7 @@ func (d *dispatcher) probeTask(gTask *proto.Task) (isFinished bool, subTaskErr s return true, "" } - cnt, err := d.subTaskMgr.GetSubtaskInStatesCnt(gTask.ID, proto.TaskStateRevertPending, proto.TaskStateReverting) + cnt, err := d.taskMgr.GetSubtaskInStatesCnt(gTask.ID, proto.TaskStateRevertPending, proto.TaskStateReverting) if err != nil { logutil.BgLogger().Warn("check task failed", zap.Int64("task ID", gTask.ID), zap.Error(err)) return false, "" @@ -310,7 +308,7 @@ func (d *dispatcher) updateTask(gTask *proto.Task, gTaskState string, retryTimes for i := 0; i < retryTimes; i++ { gTask.State = gTaskState // Write the global task meta into the storage. - err = d.gTaskMgr.UpdateTask(gTask) + err = d.taskMgr.UpdateGlobalTask(gTask) if err == nil { break } @@ -347,7 +345,7 @@ func (d *dispatcher) processErrFlow(gTask *proto.Task, receiveErr string) error return d.updateTask(gTask, proto.TaskStateReverted, retrySQLTimes) } - // TODO: UpdateTask and AddNewTask in a txn. + // TODO: UpdateGlobalTask and AddNewSubTask in a txn. // Write the global task meta into the storage. err = d.updateTask(gTask, proto.TaskStateReverting, retrySQLTimes) if err != nil { @@ -357,7 +355,7 @@ func (d *dispatcher) processErrFlow(gTask *proto.Task, receiveErr string) error // New rollback subtasks and write into the storage. for _, id := range instanceIDs { subtask := proto.NewSubtask(gTask.ID, gTask.Type, id, meta) - err = d.subTaskMgr.AddNewTask(gTask.ID, subtask.SchedulerID, subtask.Meta, subtask.Type, true) + err = d.taskMgr.AddNewSubTask(gTask.ID, subtask.SchedulerID, subtask.Meta, subtask.Type, true) if err != nil { logutil.BgLogger().Warn("add subtask failed", zap.Int64("gTask ID", gTask.ID), zap.Error(err)) return err @@ -410,7 +408,7 @@ func (d *dispatcher) processNormalFlow(gTask *proto.Task) (err error) { return nil } - // TODO: UpdateTask and AddNewTask in a txn. + // TODO: UpdateGlobalTask and AddNewSubTask in a txn. // Write the global task meta into the storage. err = d.updateTask(gTask, gTask.State, retryTimes) if err != nil { @@ -428,7 +426,7 @@ func (d *dispatcher) processNormalFlow(gTask *proto.Task) (err error) { // TODO: Consider batch insert. // TODO: Synchronization interruption problem, e.g. AddNewTask failed. - err = d.subTaskMgr.AddNewTask(gTask.ID, subtask.SchedulerID, subtask.Meta, subtask.Type, false) + err = d.taskMgr.AddNewSubTask(gTask.ID, subtask.SchedulerID, subtask.Meta, subtask.Type, false) if err != nil { logutil.BgLogger().Warn("add subtask failed", zap.Int64("gTask ID", gTask.ID), zap.Error(err)) return err @@ -468,7 +466,7 @@ func (d *dispatcher) GetAllSchedulerIDs(ctx context.Context, gTaskID int64) ([]s return nil, nil } - schedulerIDs, err := d.subTaskMgr.GetSchedulerIDs(gTaskID) + schedulerIDs, err := d.taskMgr.GetSchedulerIDsByTaskID(gTaskID) if err != nil { return nil, err } diff --git a/disttask/framework/dispatcher/dispatcher_test.go b/disttask/framework/dispatcher/dispatcher_test.go index db3c1abca155b..7c37b33a86ead 100644 --- a/disttask/framework/dispatcher/dispatcher_test.go +++ b/disttask/framework/dispatcher/dispatcher_test.go @@ -34,18 +34,15 @@ import ( "github.com/tikv/client-go/v2/util" ) -func MockDispatcher(t *testing.T) (dispatcher.Dispatch, *storage.GlobalTaskManager, *storage.SubTaskManager, kv.Storage) { +func MockDispatcher(t *testing.T) (dispatcher.Dispatch, *storage.TaskManager, kv.Storage) { store := testkit.CreateMockStore(t) gtk := testkit.NewTestKit(t, store) - stk := testkit.NewTestKit(t, store) ctx := context.Background() - gm := storage.NewGlobalTaskManager(util.WithInternalSourceType(ctx, "globalTaskManager"), gtk.Session()) - storage.SetGlobalTaskManager(gm) - sm := storage.NewSubTaskManager(util.WithInternalSourceType(ctx, "subTaskManager"), stk.Session()) - storage.SetSubTaskManager(sm) - dsp, err := dispatcher.NewDispatcher(util.WithInternalSourceType(ctx, "dispatcher"), gm, sm) + mgr := storage.NewTaskManager(util.WithInternalSourceType(ctx, "taskManager"), gtk.Session()) + storage.SetTaskManager(mgr) + dsp, err := dispatcher.NewDispatcher(util.WithInternalSourceType(ctx, "dispatcher"), mgr) require.NoError(t, err) - return dsp, gm, sm, store + return dsp, mgr, store } func deleteTasks(t *testing.T, store kv.Storage, taskID int64) { @@ -55,7 +52,7 @@ func deleteTasks(t *testing.T, store kv.Storage, taskID int64) { func TestGetInstance(t *testing.T) { ctx := context.Background() - dsp, _, subTaskMgr, _ := MockDispatcher(t) + dsp, mgr, _ := MockDispatcher(t) makeFailpointRes := func(v interface{}) string { bytes, err := json.Marshal(v) @@ -103,7 +100,7 @@ func TestGetInstance(t *testing.T) { TaskID: gTaskID, SchedulerID: uuids[1], } - err = subTaskMgr.AddNewTask(gTaskID, subtask.SchedulerID, nil, subtask.Type, true) + err = mgr.AddNewSubTask(gTaskID, subtask.SchedulerID, nil, subtask.Type, true) require.NoError(t, err) instanceIDs, err = dsp.GetAllSchedulerIDs(ctx, gTaskID) require.NoError(t, err) @@ -115,7 +112,7 @@ func TestGetInstance(t *testing.T) { TaskID: gTaskID, SchedulerID: uuids[0], } - err = subTaskMgr.AddNewTask(gTaskID, subtask.SchedulerID, nil, subtask.Type, true) + err = mgr.AddNewSubTask(gTaskID, subtask.SchedulerID, nil, subtask.Type, true) require.NoError(t, err) instanceIDs, err = dsp.GetAllSchedulerIDs(ctx, gTaskID) require.NoError(t, err) @@ -142,7 +139,7 @@ func checkDispatch(t *testing.T, taskCnt int, isSucc bool) { dispatcher.DefaultDispatchConcurrency = 1 } - dsp, gTaskMgr, subTaskMgr, store := MockDispatcher(t) + dsp, mgr, store := MockDispatcher(t) dsp.Start() defer func() { dsp.Stop() @@ -171,24 +168,24 @@ func checkDispatch(t *testing.T, taskCnt int, isSucc bool) { // Mock add tasks. taskIDs := make([]int64, 0, taskCnt) for i := 0; i < taskCnt; i++ { - taskID, err := gTaskMgr.AddNewTask(fmt.Sprintf("%d", i), taskTypeExample, 0, nil) + taskID, err := mgr.AddNewGlobalTask(fmt.Sprintf("%d", i), taskTypeExample, 0, nil) require.NoError(t, err) taskIDs = append(taskIDs, taskID) } // test normal flow checkGetRunningGTaskCnt() - tasks, err := gTaskMgr.GetTasksInStates(proto.TaskStateRunning) + tasks, err := mgr.GetGlobalTasksInStates(proto.TaskStateRunning) require.NoError(t, err) require.Len(t, tasks, taskCnt) for i, taskID := range taskIDs { require.Equal(t, int64(i+1), tasks[i].ID) - subtasks, err := subTaskMgr.GetSubtaskInStatesCnt(taskID, proto.TaskStatePending) + subtasks, err := mgr.GetSubtaskInStatesCnt(taskID, proto.TaskStatePending) require.NoError(t, err) require.Equal(t, int64(subtaskCnt), subtasks, fmt.Sprintf("num:%d", i)) } // test parallelism control if taskCnt == 1 { - taskID, err := gTaskMgr.AddNewTask(fmt.Sprintf("%d", taskCnt), taskTypeExample, 0, nil) + taskID, err := mgr.AddNewGlobalTask(fmt.Sprintf("%d", taskCnt), taskTypeExample, 0, nil) require.NoError(t, err) checkGetRunningGTaskCnt() // Clean the task. @@ -199,7 +196,7 @@ func checkDispatch(t *testing.T, taskCnt int, isSucc bool) { // test DetectTaskLoop checkGetGTaskState := func(expectedState string) { for i := 0; i < cnt; i++ { - tasks, err = gTaskMgr.GetTasksInStates(expectedState) + tasks, err = mgr.GetGlobalTasksInStates(expectedState) require.NoError(t, err) if len(tasks) == taskCnt { break @@ -211,7 +208,7 @@ func checkDispatch(t *testing.T, taskCnt int, isSucc bool) { if isSucc { // Mock subtasks succeed. for i := 1; i <= subtaskCnt*taskCnt; i++ { - err = subTaskMgr.UpdateSubtaskState(int64(i), proto.TaskStateSucceed) + err = mgr.UpdateSubtaskState(int64(i), proto.TaskStateSucceed) require.NoError(t, err) } checkGetGTaskState(proto.TaskStateSucceed) @@ -227,7 +224,7 @@ func checkDispatch(t *testing.T, taskCnt int, isSucc bool) { }() // Mock a subtask fails. for i := 1; i <= subtaskCnt*taskCnt; i += subtaskCnt { - err = subTaskMgr.UpdateSubtaskState(int64(i), proto.TaskStateFailed) + err = mgr.UpdateSubtaskState(int64(i), proto.TaskStateFailed) require.NoError(t, err) } checkGetGTaskState(proto.TaskStateReverting) @@ -235,7 +232,7 @@ func checkDispatch(t *testing.T, taskCnt int, isSucc bool) { // Mock all subtask reverted. start := subtaskCnt * taskCnt for i := start; i <= start+subtaskCnt*taskCnt; i++ { - err = subTaskMgr.UpdateSubtaskState(int64(i), proto.TaskStateReverted) + err = mgr.UpdateSubtaskState(int64(i), proto.TaskStateReverted) require.NoError(t, err) } checkGetGTaskState(proto.TaskStateReverted) diff --git a/disttask/framework/framework_test.go b/disttask/framework/framework_test.go index 96beda32d7464..ec51ed8cece3a 100644 --- a/disttask/framework/framework_test.go +++ b/disttask/framework/framework_test.go @@ -90,10 +90,10 @@ func TestFrameworkStartUp(t *testing.T) { return &testSubtaskExecutor{v: &v}, nil }) - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - gm := storage.NewGlobalTaskManager(context.TODO(), tk.Session()) - taskID, err := gm.AddNewTask("key1", "type1", 8, nil) + _ = testkit.CreateMockStore(t) + mgr, err := storage.GetTaskManager() + require.NoError(t, err) + taskID, err := mgr.AddNewGlobalTask("key1", "type1", 8, nil) require.NoError(t, err) start := time.Now() @@ -104,7 +104,7 @@ func TestFrameworkStartUp(t *testing.T) { } time.Sleep(time.Second) - task, err = gm.GetTaskByID(taskID) + task, err = mgr.GetGlobalTaskByID(taskID) require.NoError(t, err) require.NotNil(t, task) if task.State != proto.TaskStatePending && task.State != proto.TaskStateRunning { diff --git a/disttask/framework/scheduler/interface.go b/disttask/framework/scheduler/interface.go index 6d1bdcb1df093..81db9823b787e 100644 --- a/disttask/framework/scheduler/interface.go +++ b/disttask/framework/scheduler/interface.go @@ -22,16 +22,11 @@ import ( // TaskTable defines the interface to access task table. type TaskTable interface { - GetTasksInStates(states ...interface{}) (task []*proto.Task, err error) - GetTaskByID(taskID int64) (task *proto.Task, err error) -} - -// SubtaskTable defines the interface to access subtask table. -type SubtaskTable interface { + GetGlobalTasksInStates(states ...interface{}) (task []*proto.Task, err error) + GetGlobalTaskByID(taskID int64) (task *proto.Task, err error) GetSubtaskInStates(instanceID string, taskID int64, states ...interface{}) (*proto.Subtask, error) UpdateSubtaskState(id int64, state string) error HasSubtasksInStates(instanceID string, taskID int64, states ...interface{}) (bool, error) - // UpdateHeartbeat(TiDB string, taskID int64, heartbeat time.Time) error } // Pool defines the interface of a pool. diff --git a/disttask/framework/scheduler/interface_mock.go b/disttask/framework/scheduler/interface_mock.go index b618aebb72f69..07e54687c3ab7 100644 --- a/disttask/framework/scheduler/interface_mock.go +++ b/disttask/framework/scheduler/interface_mock.go @@ -27,8 +27,8 @@ type MockTaskTable struct { mock.Mock } -// GetTasksInStates implements TaskTable.GetTasksInStates. -func (t *MockTaskTable) GetTasksInStates(states ...interface{}) ([]*proto.Task, error) { +// GetGlobalTasksInStates implements TaskTable.GetTasksInStates. +func (t *MockTaskTable) GetGlobalTasksInStates(states ...interface{}) ([]*proto.Task, error) { args := t.Called(states...) if args.Error(1) != nil { return nil, args.Error(1) @@ -39,8 +39,8 @@ func (t *MockTaskTable) GetTasksInStates(states ...interface{}) ([]*proto.Task, } } -// GetTaskByID implements TaskTable.GetTaskByID. -func (t *MockTaskTable) GetTaskByID(id int64) (*proto.Task, error) { +// GetGlobalTaskByID implements TaskTable.GetTaskByID. +func (t *MockTaskTable) GetGlobalTaskByID(id int64) (*proto.Task, error) { args := t.Called(id) if args.Error(1) != nil { return nil, args.Error(1) @@ -51,14 +51,9 @@ func (t *MockTaskTable) GetTaskByID(id int64) (*proto.Task, error) { } } -// MockSubtaskTable is a mock of SubtaskTable. -type MockSubtaskTable struct { - mock.Mock -} - // GetSubtaskInStates implements SubtaskTable.GetSubtaskInStates. -func (m *MockSubtaskTable) GetSubtaskInStates(instanceID string, taskID int64, states ...interface{}) (*proto.Subtask, error) { - args := m.Called(instanceID, taskID, states) +func (t *MockTaskTable) GetSubtaskInStates(instanceID string, taskID int64, states ...interface{}) (*proto.Subtask, error) { + args := t.Called(instanceID, taskID, states) if args.Error(1) != nil { return nil, args.Error(1) } else if args.Get(0) == nil { @@ -69,14 +64,14 @@ func (m *MockSubtaskTable) GetSubtaskInStates(instanceID string, taskID int64, s } // UpdateSubtaskState implements SubtaskTable.UpdateSubtaskState. -func (m *MockSubtaskTable) UpdateSubtaskState(id int64, state string) error { - args := m.Called(id, state) +func (t *MockTaskTable) UpdateSubtaskState(id int64, state string) error { + args := t.Called(id, state) return args.Error(0) } // HasSubtasksInStates implements SubtaskTable.HasSubtasksInStates. -func (m *MockSubtaskTable) HasSubtasksInStates(instanceID string, taskID int64, states ...interface{}) (bool, error) { - args := m.Called(instanceID, taskID, states) +func (t *MockTaskTable) HasSubtasksInStates(instanceID string, taskID int64, states ...interface{}) (bool, error) { + args := t.Called(instanceID, taskID, states) return args.Bool(0), args.Error(1) } diff --git a/disttask/framework/scheduler/manager.go b/disttask/framework/scheduler/manager.go index 7cdb6414937b8..fd5723d2684d7 100644 --- a/disttask/framework/scheduler/manager.go +++ b/disttask/framework/scheduler/manager.go @@ -36,7 +36,7 @@ var ( // ManagerBuilder is used to build a Manager. type ManagerBuilder struct { newPool func(name string, size int32, component util.Component, options ...spool.Option) (Pool, error) - newScheduler func(ctx context.Context, id string, taskID int64, subtaskTable SubtaskTable, pool Pool) InternalScheduler + newScheduler func(ctx context.Context, id string, taskID int64, taskTable TaskTable, pool Pool) InternalScheduler } // NewManagerBuilder creates a new ManagerBuilder. @@ -55,15 +55,14 @@ func (b *ManagerBuilder) setPoolFactory(poolFactory func(name string, size int32 } // setSchedulerFactory sets the schedulerFactory to mock the InternalScheduler in unit test. -func (b *ManagerBuilder) setSchedulerFactory(schedulerFactory func(ctx context.Context, id string, taskID int64, subtaskTable SubtaskTable, pool Pool) InternalScheduler) { +func (b *ManagerBuilder) setSchedulerFactory(schedulerFactory func(ctx context.Context, id string, taskID int64, taskTable TaskTable, pool Pool) InternalScheduler) { b.newScheduler = schedulerFactory } // Manager monitors the global task table and manages the schedulers. type Manager struct { - globalTaskTable TaskTable - subtaskTable SubtaskTable - schedulerPool Pool + taskTable TaskTable + schedulerPool Pool // taskType -> subtaskExecutorPool subtaskExecutorPools map[string]Pool mu struct { @@ -78,15 +77,14 @@ type Manager struct { cancel context.CancelFunc logCtx context.Context newPool func(name string, size int32, component util.Component, options ...spool.Option) (Pool, error) - newScheduler func(ctx context.Context, id string, taskID int64, subtaskTable SubtaskTable, pool Pool) InternalScheduler + newScheduler func(ctx context.Context, id string, taskID int64, taskTable TaskTable, pool Pool) InternalScheduler } // BuildManager builds a Manager. -func (b *ManagerBuilder) BuildManager(ctx context.Context, id string, globalTaskTable TaskTable, subtaskTable SubtaskTable) (*Manager, error) { +func (b *ManagerBuilder) BuildManager(ctx context.Context, id string, taskTable TaskTable) (*Manager, error) { m := &Manager{ id: id, - globalTaskTable: globalTaskTable, - subtaskTable: subtaskTable, + taskTable: taskTable, subtaskExecutorPools: make(map[string]Pool), logCtx: logutil.WithKeyValue(context.Background(), "dist_task_manager", id), newPool: b.newPool, @@ -149,7 +147,7 @@ func (m *Manager) fetchAndHandleRunnableTasks(ctx context.Context) { logutil.Logger(m.logCtx).Info("fetchAndHandleRunnableTasks done") return case <-ticker.C: - tasks, err := m.globalTaskTable.GetTasksInStates(proto.TaskStateRunning, proto.TaskStateReverting) + tasks, err := m.taskTable.GetGlobalTasksInStates(proto.TaskStateRunning, proto.TaskStateReverting) if err != nil { m.onError(err) continue @@ -169,7 +167,7 @@ func (m *Manager) fetchAndFastCancelTasks(ctx context.Context) { logutil.Logger(m.logCtx).Info("fetchAndFastCancelTasks done") return case <-ticker.C: - tasks, err := m.globalTaskTable.GetTasksInStates(proto.TaskStateReverting) + tasks, err := m.taskTable.GetGlobalTasksInStates(proto.TaskStateReverting) if err != nil { m.onError(err) continue @@ -188,7 +186,7 @@ func (m *Manager) onRunnableTasks(ctx context.Context, tasks []*proto.Task) { logutil.Logger(m.logCtx).Error("unknown task type", zap.String("type", task.Type)) continue } - exist, err := m.subtaskTable.HasSubtasksInStates(m.id, task.ID, proto.TaskStatePending, proto.TaskStateRevertPending) + exist, err := m.taskTable.HasSubtasksInStates(m.id, task.ID, proto.TaskStatePending, proto.TaskStateRevertPending) if err != nil { m.onError(err) continue @@ -259,7 +257,7 @@ func (m *Manager) onRunnableTask(ctx context.Context, taskID int64, taskType str return } // runCtx only used in scheduler.Run, cancel in m.fetchAndFastCancelTasks - scheduler := m.newScheduler(ctx, m.id, taskID, m.subtaskTable, m.subtaskExecutorPools[taskType]) + scheduler := m.newScheduler(ctx, m.id, taskID, m.taskTable, m.subtaskExecutorPools[taskType]) scheduler.Start() defer scheduler.Stop() for { @@ -268,7 +266,7 @@ func (m *Manager) onRunnableTask(ctx context.Context, taskID int64, taskType str return case <-time.After(checkTime): } - task, err := m.globalTaskTable.GetTaskByID(taskID) + task, err := m.taskTable.GetGlobalTaskByID(taskID) if err != nil { m.onError(err) return @@ -278,7 +276,7 @@ func (m *Manager) onRunnableTask(ctx context.Context, taskID int64, taskType str return } // TODO: intergrate with heartbeat mechanism - if exist, err := m.subtaskTable.HasSubtasksInStates(m.id, task.ID, proto.TaskStatePending, proto.TaskStateRevertPending); err != nil { + if exist, err := m.taskTable.HasSubtasksInStates(m.id, task.ID, proto.TaskStatePending, proto.TaskStateRevertPending); err != nil { m.onError(err) return } else if !exist { diff --git a/disttask/framework/scheduler/manager_test.go b/disttask/framework/scheduler/manager_test.go index a6f1e1836807a..583b8ae2bbd44 100644 --- a/disttask/framework/scheduler/manager_test.go +++ b/disttask/framework/scheduler/manager_test.go @@ -29,7 +29,7 @@ import ( func TestManageTask(t *testing.T) { b := NewManagerBuilder() - m, err := b.BuildManager(context.Background(), "test", nil, nil) + m, err := b.BuildManager(context.Background(), "test", nil) require.NoError(t, err) tasks := []*proto.Task{{ID: 1}, {ID: 2}} newTasks := m.filterAlreadyHandlingTasks(tasks) @@ -67,12 +67,11 @@ func TestManageTask(t *testing.T) { func TestOnRunnableTasks(t *testing.T) { mockTaskTable := &MockTaskTable{} - mockSubtaskTable := &MockSubtaskTable{} mockInternalScheduler := &MockInternalScheduler{} mockPool := &MockPool{} b := NewManagerBuilder() - b.setSchedulerFactory(func(ctx context.Context, id string, taskID int64, subtaskTable SubtaskTable, pool Pool) InternalScheduler { + b.setSchedulerFactory(func(ctx context.Context, id string, taskID int64, taskTable TaskTable, pool Pool) InternalScheduler { return mockInternalScheduler }) b.setPoolFactory(func(name string, size int32, component util.Component, options ...spool.Option) (Pool, error) { @@ -82,7 +81,7 @@ func TestOnRunnableTasks(t *testing.T) { taskID := int64(1) task := &proto.Task{ID: taskID, State: proto.TaskStateRunning, Step: 0, Type: "type"} - m, err := b.BuildManager(context.Background(), id, mockTaskTable, mockSubtaskTable) + m, err := b.BuildManager(context.Background(), id, mockTaskTable) require.NoError(t, err) // no task @@ -94,57 +93,56 @@ func TestOnRunnableTasks(t *testing.T) { m.subtaskExecutorPools["type"] = mockPool // get subtask failed - mockSubtaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(false, errors.New("get subtask failed")).Once() + mockTaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(false, errors.New("get subtask failed")).Once() m.onRunnableTasks(context.Background(), []*proto.Task{task}) // no subtask - mockSubtaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(false, nil).Once() + mockTaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(false, nil).Once() m.onRunnableTasks(context.Background(), []*proto.Task{task}) // pool error - mockSubtaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() + mockTaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() mockPool.On("Run", mock.Anything).Return(errors.New("pool error")).Once() m.onRunnableTasks(context.Background(), []*proto.Task{task}) // step 0 succeed - mockSubtaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() + mockTaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() mockPool.On("Run", mock.Anything).Return(nil).Once() mockInternalScheduler.On("Start").Once() - mockTaskTable.On("GetTaskByID", taskID).Return(task, nil).Once() - mockSubtaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() + mockTaskTable.On("GetGlobalTaskByID", taskID).Return(task, nil).Once() + mockTaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() mockInternalScheduler.On("Run", mock.Anything, task).Return(nil).Once() m.onRunnableTasks(context.Background(), []*proto.Task{task}) // step 1 canceled task1 := &proto.Task{ID: taskID, State: proto.TaskStateRunning, Step: 1} - mockTaskTable.On("GetTaskByID", taskID).Return(task1, nil).Once() - mockSubtaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() + mockTaskTable.On("GetGlobalTaskByID", taskID).Return(task1, nil).Once() + mockTaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() mockInternalScheduler.On("Run", mock.Anything, task1).Return(errors.New("run errr")).Once() task2 := &proto.Task{ID: taskID, State: proto.TaskStateReverting, Step: 1} - mockTaskTable.On("GetTaskByID", taskID).Return(task2, nil).Once() - mockSubtaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() + mockTaskTable.On("GetGlobalTaskByID", taskID).Return(task2, nil).Once() + mockTaskTable.On("HasSubtasksInStates", id, taskID, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() mockInternalScheduler.On("Rollback", mock.Anything, task2).Return(nil).Once() task3 := &proto.Task{ID: taskID, State: proto.TaskStateReverted, Step: 1} - mockTaskTable.On("GetTaskByID", taskID).Return(task3, nil).Once() + mockTaskTable.On("GetGlobalTaskByID", taskID).Return(task3, nil).Once() mockInternalScheduler.On("Stop").Return(nil).Once() time.Sleep(5 * time.Second) mockTaskTable.AssertExpectations(t) - mockSubtaskTable.AssertExpectations(t) + mockTaskTable.AssertExpectations(t) mockInternalScheduler.AssertExpectations(t) mockPool.AssertExpectations(t) } func TestManager(t *testing.T) { - mockTaskTable := &MockTaskTable{} // TODO(gmhdbjd): use real subtask table instead of mock - mockSubtaskTable := &MockSubtaskTable{} + mockTaskTable := &MockTaskTable{} mockInternalScheduler := &MockInternalScheduler{} mockPool := &MockPool{} b := NewManagerBuilder() - b.setSchedulerFactory(func(ctx context.Context, id string, taskID int64, subtaskTable SubtaskTable, pool Pool) InternalScheduler { + b.setSchedulerFactory(func(ctx context.Context, id string, taskID int64, taskTable TaskTable, pool Pool) InternalScheduler { return mockInternalScheduler }) b.setPoolFactory(func(name string, size int32, component util.Component, options ...spool.Option) (Pool, error) { @@ -161,25 +159,25 @@ func TestManager(t *testing.T) { task1 := &proto.Task{ID: taskID1, State: proto.TaskStateRunning, Step: 0, Type: "type"} task2 := &proto.Task{ID: taskID2, State: proto.TaskStateReverting, Step: 0, Type: "type"} - mockTaskTable.On("GetTasksInStates", proto.TaskStateRunning, proto.TaskStateReverting).Return([]*proto.Task{task1, task2}, nil) - mockTaskTable.On("GetTasksInStates", proto.TaskStateReverting).Return([]*proto.Task{task2}, nil) + mockTaskTable.On("GetGlobalTasksInStates", proto.TaskStateRunning, proto.TaskStateReverting).Return([]*proto.Task{task1, task2}, nil) + mockTaskTable.On("GetGlobalTasksInStates", proto.TaskStateReverting).Return([]*proto.Task{task2}, nil) // task1 - mockSubtaskTable.On("HasSubtasksInStates", id, taskID1, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() + mockTaskTable.On("HasSubtasksInStates", id, taskID1, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() mockPool.On("Run", mock.Anything).Return(nil).Once() mockInternalScheduler.On("Start").Once() - mockTaskTable.On("GetTaskByID", taskID1).Return(task1, nil) - mockSubtaskTable.On("HasSubtasksInStates", id, taskID1, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() + mockTaskTable.On("GetGlobalTaskByID", taskID1).Return(task1, nil) + mockTaskTable.On("HasSubtasksInStates", id, taskID1, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() mockInternalScheduler.On("Run", mock.Anything, task1).Return(nil).Once() - mockSubtaskTable.On("HasSubtasksInStates", id, taskID1, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(false, nil) + mockTaskTable.On("HasSubtasksInStates", id, taskID1, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(false, nil) mockInternalScheduler.On("Stop").Once() // task2 - mockSubtaskTable.On("HasSubtasksInStates", id, taskID2, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() + mockTaskTable.On("HasSubtasksInStates", id, taskID2, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() mockPool.On("Run", mock.Anything).Return(nil).Once() mockInternalScheduler.On("Start").Once() - mockTaskTable.On("GetTaskByID", taskID2).Return(task2, nil) - mockSubtaskTable.On("HasSubtasksInStates", id, taskID2, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() + mockTaskTable.On("GetGlobalTaskByID", taskID2).Return(task2, nil) + mockTaskTable.On("HasSubtasksInStates", id, taskID2, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(true, nil).Once() mockInternalScheduler.On("Rollback", mock.Anything, task2).Return(nil).Once() - mockSubtaskTable.On("HasSubtasksInStates", id, taskID2, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(false, nil) + mockTaskTable.On("HasSubtasksInStates", id, taskID2, []interface{}{proto.TaskStatePending, proto.TaskStateRevertPending}).Return(false, nil) mockInternalScheduler.On("Stop").Once() mockPool.On("ReleaseAndWait").Twice() RegisterSubtaskExectorConstructor("type", func(minimalTask proto.MinimalTask, step int64) (SubtaskExecutor, error) { @@ -187,14 +185,14 @@ func TestManager(t *testing.T) { }, func(opts *subtaskExecutorRegisterOptions) { opts.PoolSize = 1 }) - m, err := b.BuildManager(context.Background(), id, mockTaskTable, mockSubtaskTable) + m, err := b.BuildManager(context.Background(), id, mockTaskTable) require.NoError(t, err) m.Start() time.Sleep(5 * time.Second) m.Stop() time.Sleep(5 * time.Second) mockTaskTable.AssertExpectations(t) - mockSubtaskTable.AssertExpectations(t) + mockTaskTable.AssertExpectations(t) mockInternalScheduler.AssertExpectations(t) mockPool.AssertExpectations(t) } diff --git a/disttask/framework/scheduler/scheduler.go b/disttask/framework/scheduler/scheduler.go index 230a9e2bbd370..b479beaed42e1 100644 --- a/disttask/framework/scheduler/scheduler.go +++ b/disttask/framework/scheduler/scheduler.go @@ -27,14 +27,14 @@ import ( // InternalSchedulerImpl is the implementation of InternalScheduler. type InternalSchedulerImpl struct { - ctx context.Context - cancel context.CancelFunc - id string - taskID int64 - subtaskTable SubtaskTable - pool Pool - wg sync.WaitGroup - logCtx context.Context + ctx context.Context + cancel context.CancelFunc + id string + taskID int64 + taskTable TaskTable + pool Pool + wg sync.WaitGroup + logCtx context.Context mu struct { sync.RWMutex @@ -45,14 +45,14 @@ type InternalSchedulerImpl struct { } // NewInternalScheduler creates a new InternalScheduler. -func NewInternalScheduler(ctx context.Context, id string, taskID int64, subtaskTable SubtaskTable, pool Pool) InternalScheduler { +func NewInternalScheduler(ctx context.Context, id string, taskID int64, taskTable TaskTable, pool Pool) InternalScheduler { logPrefix := fmt.Sprintf("id: %s, task_id: %d", id, taskID) schedulerImpl := &InternalSchedulerImpl{ - id: id, - taskID: taskID, - subtaskTable: subtaskTable, - pool: pool, - logCtx: logutil.WithKeyValue(context.Background(), "scheduler", logPrefix), + id: id, + taskID: taskID, + taskTable: taskTable, + pool: pool, + logCtx: logutil.WithKeyValue(context.Background(), "scheduler", logPrefix), } schedulerImpl.ctx, schedulerImpl.cancel = context.WithCancel(ctx) @@ -125,7 +125,7 @@ func (s *InternalSchedulerImpl) Run(ctx context.Context, task *proto.Task) error } for { - subtask, err := s.subtaskTable.GetSubtaskInStates(s.id, task.ID, proto.TaskStatePending) + subtask, err := s.taskTable.GetSubtaskInStates(s.id, task.ID, proto.TaskStatePending) if err != nil { s.onError(err) break @@ -200,7 +200,7 @@ func (s *InternalSchedulerImpl) Rollback(ctx context.Context, task *proto.Task) s.onError(err) return s.getError() } - subtask, err := s.subtaskTable.GetSubtaskInStates(s.id, task.ID, proto.TaskStateRevertPending) + subtask, err := s.taskTable.GetSubtaskInStates(s.id, task.ID, proto.TaskStateRevertPending) if err != nil { s.onError(err) return s.getError() @@ -277,7 +277,7 @@ func (s *InternalSchedulerImpl) resetError() { } func (s *InternalSchedulerImpl) updateSubtaskState(id int64, state string) { - err := s.subtaskTable.UpdateSubtaskState(id, state) + err := s.taskTable.UpdateSubtaskState(id, state) if err != nil { s.onError(err) } diff --git a/disttask/framework/scheduler/scheduler_test.go b/disttask/framework/scheduler/scheduler_test.go index 311b78de09b4f..7ed4c1a44a315 100644 --- a/disttask/framework/scheduler/scheduler_test.go +++ b/disttask/framework/scheduler/scheduler_test.go @@ -32,7 +32,7 @@ func TestSchedulerRun(t *testing.T) { defer cancel() runCtx, runCancel := context.WithCancel(ctx) defer runCancel() - mockSubtaskTable := &MockSubtaskTable{} + mockSubtaskTable := &MockTaskTable{} mockPool := &MockPool{} mockScheduler := &MockScheduler{} mockSubtaskExecutor := &MockSubtaskExecutor{} @@ -159,7 +159,7 @@ func TestSchedulerRollback(t *testing.T) { defer cancel() runCtx, runCancel := context.WithCancel(ctx) defer runCancel() - mockSubtaskTable := &MockSubtaskTable{} + mockSubtaskTable := &MockTaskTable{} mockPool := &MockPool{} mockScheduler := &MockScheduler{} @@ -222,7 +222,7 @@ func TestScheduler(t *testing.T) { defer cancel() runCtx, runCancel := context.WithCancel(ctx) defer runCancel() - mockSubtaskTable := &MockSubtaskTable{} + mockSubtaskTable := &MockTaskTable{} mockPool := &MockPool{} mockScheduler := &MockScheduler{} mockSubtaskExecutor := &MockSubtaskExecutor{} diff --git a/disttask/framework/storage/table_test.go b/disttask/framework/storage/table_test.go index 91d1bd03ca442..ca3058fb0ba89 100644 --- a/disttask/framework/storage/table_test.go +++ b/disttask/framework/storage/table_test.go @@ -42,17 +42,17 @@ func TestGlobalTaskTable(t *testing.T) { tk := testkit.NewTestKit(t, store) - gm := storage.NewGlobalTaskManager(context.Background(), tk.Session()) + gm := storage.NewTaskManager(context.Background(), tk.Session()) - storage.SetGlobalTaskManager(gm) - gm, err := storage.GetGlobalTaskManager() + storage.SetTaskManager(gm) + gm, err := storage.GetTaskManager() require.NoError(t, err) - id, err := gm.AddNewTask("key1", "test", 4, []byte("test")) + id, err := gm.AddNewGlobalTask("key1", "test", 4, []byte("test")) require.NoError(t, err) require.Equal(t, int64(1), id) - task, err := gm.GetNewTask() + task, err := gm.GetNewGlobalTask() require.NoError(t, err) require.Equal(t, int64(1), task.ID) require.Equal(t, "key1", task.Key) @@ -61,35 +61,35 @@ func TestGlobalTaskTable(t *testing.T) { require.Equal(t, uint64(4), task.Concurrency) require.Equal(t, []byte("test"), task.Meta) - task2, err := gm.GetTaskByID(1) + task2, err := gm.GetGlobalTaskByID(1) require.NoError(t, err) require.Equal(t, task, task2) - task3, err := gm.GetTasksInStates(proto.TaskStatePending) + task3, err := gm.GetGlobalTasksInStates(proto.TaskStatePending) require.NoError(t, err) require.Len(t, task3, 1) require.Equal(t, task, task3[0]) - task4, err := gm.GetTasksInStates(proto.TaskStatePending, proto.TaskStateRunning) + task4, err := gm.GetGlobalTasksInStates(proto.TaskStatePending, proto.TaskStateRunning) require.NoError(t, err) require.Len(t, task4, 1) require.Equal(t, task, task4[0]) task.State = proto.TaskStateRunning - err = gm.UpdateTask(task) + err = gm.UpdateGlobalTask(task) require.NoError(t, err) - task5, err := gm.GetTasksInStates(proto.TaskStateRunning) + task5, err := gm.GetGlobalTasksInStates(proto.TaskStateRunning) require.NoError(t, err) require.Len(t, task5, 1) require.Equal(t, task, task5[0]) - task6, err := gm.GetTaskByKey("key1") + task6, err := gm.GetGlobalTaskByKey("key1") require.NoError(t, err) require.Equal(t, task, task6) // test cannot insert task with dup key - _, err = gm.AddNewTask("key1", "test2", 4, []byte("test2")) + _, err = gm.AddNewGlobalTask("key1", "test2", 4, []byte("test2")) require.EqualError(t, err, "[kv:1062]Duplicate entry 'key1' for key 'tidb_global_task.task_key'") } @@ -98,13 +98,13 @@ func TestSubTaskTable(t *testing.T) { tk := testkit.NewTestKit(t, store) - sm := storage.NewSubTaskManager(context.Background(), tk.Session()) + sm := storage.NewTaskManager(context.Background(), tk.Session()) - storage.SetSubTaskManager(sm) - sm, err := storage.GetSubTaskManager() + storage.SetTaskManager(sm) + sm, err := storage.GetTaskManager() require.NoError(t, err) - err = sm.AddNewTask(1, "tidb1", []byte("test"), proto.TaskTypeExample, false) + err = sm.AddNewSubTask(1, "tidb1", []byte("test"), proto.TaskTypeExample, false) require.NoError(t, err) nilTask, err := sm.GetSubtaskInStates("tidb2", 1, proto.TaskStatePending) @@ -123,12 +123,12 @@ func TestSubTaskTable(t *testing.T) { require.NoError(t, err) require.Equal(t, task, task2) - ids, err := sm.GetSchedulerIDs(1) + ids, err := sm.GetSchedulerIDsByTaskID(1) require.NoError(t, err) require.Len(t, ids, 1) require.Equal(t, "tidb1", ids[0]) - ids, err = sm.GetSchedulerIDs(3) + ids, err = sm.GetSchedulerIDsByTaskID(3) require.NoError(t, err) require.Len(t, ids, 0) @@ -144,7 +144,7 @@ func TestSubTaskTable(t *testing.T) { require.NoError(t, err) require.True(t, ok) - err = sm.UpdateHeartbeat("tidb1", 1, time.Now()) + err = sm.UpdateSubtaskHeartbeat("tidb1", 1, time.Now()) require.NoError(t, err) err = sm.UpdateSubtaskState(1, proto.TaskStateRunning) @@ -170,14 +170,14 @@ func TestSubTaskTable(t *testing.T) { require.NoError(t, err) require.False(t, ok) - err = sm.DeleteTasks(1) + err = sm.DeleteSubtasksByTaskID(1) require.NoError(t, err) ok, err = sm.HasSubtasksInStates("tidb1", 1, proto.TaskStatePending, proto.TaskStateRunning) require.NoError(t, err) require.False(t, ok) - err = sm.AddNewTask(2, "tidb1", []byte("test"), proto.TaskTypeExample, true) + err = sm.AddNewSubTask(2, "tidb1", []byte("test"), proto.TaskTypeExample, true) require.NoError(t, err) cnt, err = sm.GetSubtaskInStatesCnt(2, proto.TaskStateRevertPending) diff --git a/disttask/framework/storage/task_table.go b/disttask/framework/storage/task_table.go index 1e2329f87a034..42208a9972b48 100644 --- a/disttask/framework/storage/task_table.go +++ b/disttask/framework/storage/task_table.go @@ -34,60 +34,36 @@ import ( "go.uber.org/zap" ) -// GlobalTaskManager is the manager of global task. -type GlobalTaskManager struct { +// TaskManager is the manager of global/sub task. +type TaskManager struct { ctx context.Context se sessionctx.Context mu sync.Mutex } -var globalTaskManagerInstance atomic.Pointer[GlobalTaskManager] -var subTaskManagerInstance atomic.Pointer[SubTaskManager] +var taskManagerInstance atomic.Pointer[TaskManager] -// NewGlobalTaskManager creates a new global task manager. -func NewGlobalTaskManager(ctx context.Context, se sessionctx.Context) *GlobalTaskManager { +// NewTaskManager creates a new task manager. +func NewTaskManager(ctx context.Context, se sessionctx.Context) *TaskManager { ctx = util.WithInternalSourceType(ctx, kv.InternalDistTask) - return &GlobalTaskManager{ + return &TaskManager{ ctx: ctx, se: se, } } -// NewSubTaskManager creates a new sub task manager. -func NewSubTaskManager(ctx context.Context, se sessionctx.Context) *SubTaskManager { - ctx = util.WithInternalSourceType(ctx, kv.InternalDistTask) - return &SubTaskManager{ - ctx: ctx, - se: se, - } -} - -// GetGlobalTaskManager gets the global task manager. -func GetGlobalTaskManager() (*GlobalTaskManager, error) { - v := globalTaskManagerInstance.Load() +// GetTaskManager gets the task manager. +func GetTaskManager() (*TaskManager, error) { + v := taskManagerInstance.Load() if v == nil { return nil, errors.New("global task manager is not initialized") } return v, nil } -// SetGlobalTaskManager sets the global task manager. -func SetGlobalTaskManager(is *GlobalTaskManager) { - globalTaskManagerInstance.Store(is) -} - -// GetSubTaskManager gets the sub task manager. -func GetSubTaskManager() (*SubTaskManager, error) { - v := subTaskManagerInstance.Load() - if v == nil { - return nil, errors.New("subTask manager is not initialized") - } - return v, nil -} - -// SetSubTaskManager sets the sub task manager. -func SetSubTaskManager(is *SubTaskManager) { - subTaskManagerInstance.Store(is) +// SetTaskManager sets the task manager. +func SetTaskManager(is *TaskManager) { + taskManagerInstance.Store(is) } // execSQL executes the sql and returns the result. @@ -129,8 +105,8 @@ func row2GlobeTask(r chunk.Row) *proto.Task { return task } -// AddNewTask adds a new task to global task table. -func (stm *GlobalTaskManager) AddNewTask(key, tp string, concurrency int, meta []byte) (int64, error) { +// AddNewGlobalTask adds a new task to global task table. +func (stm *TaskManager) AddNewGlobalTask(key, tp string, concurrency int, meta []byte) (int64, error) { stm.mu.Lock() defer stm.mu.Unlock() @@ -147,8 +123,8 @@ func (stm *GlobalTaskManager) AddNewTask(key, tp string, concurrency int, meta [ return strconv.ParseInt(rs[0].GetString(0), 10, 64) } -// GetNewTask get a new task from global task table, it's used by dispatcher only. -func (stm *GlobalTaskManager) GetNewTask() (task *proto.Task, err error) { +// GetNewGlobalTask get a new task from global task table, it's used by dispatcher only. +func (stm *TaskManager) GetNewGlobalTask() (task *proto.Task, err error) { stm.mu.Lock() defer stm.mu.Unlock() @@ -164,8 +140,8 @@ func (stm *GlobalTaskManager) GetNewTask() (task *proto.Task, err error) { return row2GlobeTask(rs[0]), nil } -// UpdateTask updates the global task. -func (stm *GlobalTaskManager) UpdateTask(task *proto.Task) error { +// UpdateGlobalTask updates the global task. +func (stm *TaskManager) UpdateGlobalTask(task *proto.Task) error { failpoint.Inject("MockUpdateTaskErr", func(val failpoint.Value) { if val.(bool) { failpoint.Return(errors.New("updateTaskErr")) @@ -183,8 +159,8 @@ func (stm *GlobalTaskManager) UpdateTask(task *proto.Task) error { return nil } -// GetTasksInStates gets the tasks in the states. -func (stm *GlobalTaskManager) GetTasksInStates(states ...interface{}) (task []*proto.Task, err error) { +// GetGlobalTasksInStates gets the tasks in the states. +func (stm *TaskManager) GetGlobalTasksInStates(states ...interface{}) (task []*proto.Task, err error) { stm.mu.Lock() defer stm.mu.Unlock() @@ -203,8 +179,8 @@ func (stm *GlobalTaskManager) GetTasksInStates(states ...interface{}) (task []*p return task, nil } -// GetTaskByID gets the task by the global task ID. -func (stm *GlobalTaskManager) GetTaskByID(taskID int64) (task *proto.Task, err error) { +// GetGlobalTaskByID gets the task by the global task ID. +func (stm *TaskManager) GetGlobalTaskByID(taskID int64) (task *proto.Task, err error) { stm.mu.Lock() defer stm.mu.Unlock() @@ -219,8 +195,8 @@ func (stm *GlobalTaskManager) GetTaskByID(taskID int64) (task *proto.Task, err e return row2GlobeTask(rs[0]), nil } -// GetTaskByKey gets the task by the task key -func (stm *GlobalTaskManager) GetTaskByKey(key string) (task *proto.Task, err error) { +// GetGlobalTaskByKey gets the task by the task key +func (stm *TaskManager) GetGlobalTaskByKey(key string) (task *proto.Task, err error) { stm.mu.Lock() defer stm.mu.Unlock() @@ -235,13 +211,6 @@ func (stm *GlobalTaskManager) GetTaskByKey(key string) (task *proto.Task, err er return row2GlobeTask(rs[0]), nil } -// SubTaskManager is the manager of subtask. -type SubTaskManager struct { - ctx context.Context - se sessionctx.Context - mu sync.Mutex -} - // row2SubTask converts a row to a subtask. func row2SubTask(r chunk.Row) *proto.Subtask { task := &proto.Subtask{ @@ -260,8 +229,8 @@ func row2SubTask(r chunk.Row) *proto.Subtask { return task } -// AddNewTask adds a new task to subtask table. -func (stm *SubTaskManager) AddNewTask(globalTaskID int64, designatedTiDBID string, meta []byte, tp string, isRevert bool) error { +// AddNewSubTask adds a new task to subtask table. +func (stm *TaskManager) AddNewSubTask(globalTaskID int64, designatedTiDBID string, meta []byte, tp string, isRevert bool) error { stm.mu.Lock() defer stm.mu.Unlock() @@ -279,7 +248,7 @@ func (stm *SubTaskManager) AddNewTask(globalTaskID int64, designatedTiDBID strin } // GetSubtaskInStates gets the subtask in the states. -func (stm *SubTaskManager) GetSubtaskInStates(tidbID string, taskID int64, states ...interface{}) (*proto.Subtask, error) { +func (stm *TaskManager) GetSubtaskInStates(tidbID string, taskID int64, states ...interface{}) (*proto.Subtask, error) { stm.mu.Lock() defer stm.mu.Unlock() @@ -297,7 +266,7 @@ func (stm *SubTaskManager) GetSubtaskInStates(tidbID string, taskID int64, state } // GetSubtaskInStatesCnt gets the subtask count in the states. -func (stm *SubTaskManager) GetSubtaskInStatesCnt(taskID int64, states ...interface{}) (int64, error) { +func (stm *TaskManager) GetSubtaskInStatesCnt(taskID int64, states ...interface{}) (int64, error) { stm.mu.Lock() defer stm.mu.Unlock() @@ -312,7 +281,7 @@ func (stm *SubTaskManager) GetSubtaskInStatesCnt(taskID int64, states ...interfa } // HasSubtasksInStates checks if there are subtasks in the states. -func (stm *SubTaskManager) HasSubtasksInStates(tidbID string, taskID int64, states ...interface{}) (bool, error) { +func (stm *TaskManager) HasSubtasksInStates(tidbID string, taskID int64, states ...interface{}) (bool, error) { stm.mu.Lock() defer stm.mu.Unlock() @@ -327,7 +296,7 @@ func (stm *SubTaskManager) HasSubtasksInStates(tidbID string, taskID int64, stat } // UpdateSubtaskState updates the subtask state. -func (stm *SubTaskManager) UpdateSubtaskState(id int64, state string) error { +func (stm *TaskManager) UpdateSubtaskState(id int64, state string) error { stm.mu.Lock() defer stm.mu.Unlock() @@ -335,8 +304,8 @@ func (stm *SubTaskManager) UpdateSubtaskState(id int64, state string) error { return err } -// UpdateHeartbeat updates the heartbeat of the subtask. -func (stm *SubTaskManager) UpdateHeartbeat(instanceID string, taskID int64, heartbeat time.Time) error { +// UpdateSubtaskHeartbeat updates the heartbeat of the subtask. +func (stm *TaskManager) UpdateSubtaskHeartbeat(instanceID string, taskID int64, heartbeat time.Time) error { stm.mu.Lock() defer stm.mu.Unlock() @@ -344,8 +313,8 @@ func (stm *SubTaskManager) UpdateHeartbeat(instanceID string, taskID int64, hear return err } -// DeleteTasks deletes the subtask of the given global task ID. -func (stm *SubTaskManager) DeleteTasks(taskID int64) error { +// DeleteSubtasksByTaskID deletes the subtask of the given global task ID. +func (stm *TaskManager) DeleteSubtasksByTaskID(taskID int64) error { stm.mu.Lock() defer stm.mu.Unlock() @@ -357,8 +326,8 @@ func (stm *SubTaskManager) DeleteTasks(taskID int64) error { return nil } -// GetSchedulerIDs gets the scheduler IDs of the given global task ID. -func (stm *SubTaskManager) GetSchedulerIDs(taskID int64) ([]string, error) { +// GetSchedulerIDsByTaskID gets the scheduler IDs of the given global task ID. +func (stm *TaskManager) GetSchedulerIDsByTaskID(taskID int64) ([]string, error) { stm.mu.Lock() defer stm.mu.Unlock() diff --git a/domain/domain.go b/domain/domain.go index 37a1b5ca951f8..360f55d9763ec 100644 --- a/domain/domain.go +++ b/domain/domain.go @@ -1351,41 +1351,30 @@ func (do *Domain) initDistTaskLoop(ctx context.Context) error { failpoint.Return(nil) } }) - se1, err := do.sysExecutorFactory(do) + se, err := do.sysExecutorFactory(do) if err != nil { return err } - se2, err := do.sysExecutorFactory(do) + taskManager := storage.NewTaskManager(kv.WithInternalSourceType(ctx, kv.InternalDistTask), se.(sessionctx.Context)) + schedulerManager, err := scheduler.NewManagerBuilder().BuildManager(ctx, do.ddl.GetID(), taskManager) if err != nil { - se1.Close() + se.Close() return err } - gm := storage.NewGlobalTaskManager(kv.WithInternalSourceType(ctx, kv.InternalDistTask), se1.(sessionctx.Context)) - sm := storage.NewSubTaskManager(kv.WithInternalSourceType(ctx, kv.InternalDistTask), se2.(sessionctx.Context)) - schedulerManager, err := scheduler.NewManagerBuilder().BuildManager(ctx, do.ddl.GetID(), gm, sm) - if err != nil { - se1.Close() - se2.Close() - return err - } - - storage.SetGlobalTaskManager(gm) - storage.SetSubTaskManager(sm) + storage.SetTaskManager(taskManager) do.wg.Run(func() { defer func() { - storage.SetGlobalTaskManager(nil) - storage.SetSubTaskManager(nil) - se1.Close() - se2.Close() + storage.SetTaskManager(nil) + se.Close() }() - do.distTaskFrameworkLoop(ctx, gm, sm, schedulerManager) + do.distTaskFrameworkLoop(ctx, taskManager, schedulerManager) }, "distTaskFrameworkLoop") return nil } -func (do *Domain) distTaskFrameworkLoop(ctx context.Context, globalTaskManager *storage.GlobalTaskManager, subtaskManager *storage.SubTaskManager, schedulerManager *scheduler.Manager) { +func (do *Domain) distTaskFrameworkLoop(ctx context.Context, taskManager *storage.TaskManager, schedulerManager *scheduler.Manager) { schedulerManager.Start() logutil.BgLogger().Info("dist task scheduler started") defer func() { @@ -1400,7 +1389,7 @@ func (do *Domain) distTaskFrameworkLoop(ctx context.Context, globalTaskManager * return } - newDispatch, err := dispatcher.NewDispatcher(ctx, globalTaskManager, subtaskManager) + newDispatch, err := dispatcher.NewDispatcher(ctx, taskManager) if err != nil { logutil.BgLogger().Error("failed to create a disttask dispatcher", zap.Error(err)) return From 9f326531d79a719055a70638e88a9e6cc190bcd5 Mon Sep 17 00:00:00 2001 From: tangenta Date: Tue, 4 Apr 2023 19:06:58 +0800 Subject: [PATCH 02/12] ddl: extract sessionPool to a separate package (#42808) --- ddl/BUILD.bazel | 2 +- ddl/backfilling.go | 15 ++--- ddl/backfilling_scheduler.go | 7 ++- ddl/cluster.go | 8 +-- ddl/column.go | 12 ++-- ddl/ddl.go | 52 ++++++++-------- ddl/ddl_api.go | 4 +- ddl/ddl_tiflash_api.go | 6 +- ddl/ddl_worker.go | 72 +++++++++------------- ddl/delete_range.go | 17 ++--- ddl/dist_backfilling.go | 4 +- ddl/dist_owner.go | 24 ++++---- ddl/foreign_key.go | 4 +- ddl/index.go | 40 ++---------- ddl/internal/session/BUILD.bazel | 17 +++++ ddl/{ => internal/session}/session_pool.go | 35 ++++++----- ddl/job_table.go | 28 ++++----- ddl/partition.go | 16 ++--- ddl/reorg.go | 25 +++----- ddl/sanity_check.go | 7 ++- ddl/stat.go | 4 +- ddl/table.go | 35 ++++++----- 22 files changed, 202 insertions(+), 232 deletions(-) create mode 100644 ddl/internal/session/BUILD.bazel rename ddl/{ => internal/session}/session_pool.go (68%) diff --git a/ddl/BUILD.bazel b/ddl/BUILD.bazel index 41e881af195f6..be970766d10d7 100644 --- a/ddl/BUILD.bazel +++ b/ddl/BUILD.bazel @@ -45,7 +45,6 @@ go_library( "sanity_check.go", "schema.go", "sequence.go", - "session_pool.go", "split_region.go", "stat.go", "table.go", @@ -60,6 +59,7 @@ go_library( "//br/pkg/lightning/common", "//config", "//ddl/ingest", + "//ddl/internal/session", "//ddl/label", "//ddl/placement", "//ddl/resourcegroup", diff --git a/ddl/backfilling.go b/ddl/backfilling.go index 251333a39e794..56de409d4c969 100644 --- a/ddl/backfilling.go +++ b/ddl/backfilling.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + sess "github.com/pingcap/tidb/ddl/internal/session" ddlutil "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" @@ -522,11 +523,11 @@ type resultConsumer struct { wg *sync.WaitGroup err error hasError *atomic.Bool - reorgInfo *reorgInfo // reorgInfo is used to update the reorg handle. - sessPool *sessionPool // sessPool is used to get the session to update the reorg handle. + reorgInfo *reorgInfo // reorgInfo is used to update the reorg handle. + sessPool *sess.Pool // sessPool is used to get the session to update the reorg handle. } -func newResultConsumer(dc *ddlCtx, reorgInfo *reorgInfo, sessPool *sessionPool) *resultConsumer { +func newResultConsumer(dc *ddlCtx, reorgInfo *reorgInfo, sessPool *sess.Pool) *resultConsumer { return &resultConsumer{ dc: dc, wg: &sync.WaitGroup{}, @@ -693,13 +694,13 @@ var ( TestCheckReorgTimeout = int32(0) ) -func loadDDLReorgVars(ctx context.Context, sessPool *sessionPool) error { +func loadDDLReorgVars(ctx context.Context, sessPool *sess.Pool) error { // Get sessionctx from context resource pool. - sCtx, err := sessPool.get() + sCtx, err := sessPool.Get() if err != nil { return errors.Trace(err) } - defer sessPool.put(sCtx) + defer sessPool.Put(sCtx) return ddlutil.LoadDDLReorgVars(ctx, sCtx) } @@ -759,7 +760,7 @@ func SetBackfillTaskChanSizeForTest(n int) { // // The above operations are completed in a transaction. // Finally, update the concurrent processing of the total number of rows, and store the completed handle value. -func (dc *ddlCtx) writePhysicalTableRecord(sessPool *sessionPool, t table.PhysicalTable, bfWorkerType backfillerType, reorgInfo *reorgInfo) error { +func (dc *ddlCtx) writePhysicalTableRecord(sessPool *sess.Pool, t table.PhysicalTable, bfWorkerType backfillerType, reorgInfo *reorgInfo) error { job := reorgInfo.Job totalAddedCount := job.GetRowCount() diff --git a/ddl/backfilling_scheduler.go b/ddl/backfilling_scheduler.go index 65f77898596f9..a4aa6e54fd62b 100644 --- a/ddl/backfilling_scheduler.go +++ b/ddl/backfilling_scheduler.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl/ingest" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -60,7 +61,7 @@ const maxBackfillWorkerSize = 16 type txnBackfillScheduler struct { ctx context.Context reorgInfo *reorgInfo - sessPool *sessionPool + sessPool *sess.Pool tp backfillerType tbl table.PhysicalTable decodeColMap map[int64]decoder.Column @@ -74,7 +75,7 @@ type txnBackfillScheduler struct { closed bool } -func newBackfillScheduler(ctx context.Context, info *reorgInfo, sessPool *sessionPool, +func newBackfillScheduler(ctx context.Context, info *reorgInfo, sessPool *sess.Pool, tp backfillerType, tbl table.PhysicalTable, sessCtx sessionctx.Context, jobCtx *JobContext) (backfillScheduler, error) { if tp == typeAddIndexWorker && info.ReorgMeta.ReorgTp == model.ReorgTypeLitMerge { @@ -83,7 +84,7 @@ func newBackfillScheduler(ctx context.Context, info *reorgInfo, sessPool *sessio return newTxnBackfillScheduler(ctx, info, sessPool, tp, tbl, sessCtx, jobCtx) } -func newTxnBackfillScheduler(ctx context.Context, info *reorgInfo, sessPool *sessionPool, +func newTxnBackfillScheduler(ctx context.Context, info *reorgInfo, sessPool *sess.Pool, tp backfillerType, tbl table.PhysicalTable, sessCtx sessionctx.Context, jobCtx *JobContext) (backfillScheduler, error) { decColMap, err := makeupDecodeColMap(sessCtx, info.dbInfo.Name, tbl) diff --git a/ddl/cluster.go b/ddl/cluster.go index ea0567dfb9c73..b4331040e2e54 100644 --- a/ddl/cluster.go +++ b/ddl/cluster.go @@ -652,12 +652,12 @@ func (w *worker) onFlashbackCluster(d *ddlCtx, t *meta.Meta, job *model.Job) (ve var totalRegions, completedRegions atomic.Uint64 totalRegions.Store(lockedRegions) - sess, err := w.sessPool.get() + sess, err := w.sessPool.Get() if err != nil { job.State = model.JobStateCancelled return ver, errors.Trace(err) } - defer w.sessPool.put(sess) + defer w.sessPool.Put(sess) switch job.SchemaState { // Stage 1, check and set FlashbackClusterJobID, and update job args. @@ -792,11 +792,11 @@ func finishFlashbackCluster(w *worker, job *model.Job) error { if err := job.DecodeArgs(&flashbackTS, &pdScheduleValue, &gcEnabled, &autoAnalyzeValue, &readOnlyValue, &lockedRegions, &startTS, &commitTS, &ttlJobEnableValue); err != nil { return errors.Trace(err) } - sess, err := w.sessPool.get() + sess, err := w.sessPool.Get() if err != nil { return errors.Trace(err) } - defer w.sessPool.put(sess) + defer w.sessPool.Put(sess) err = kv.RunInNewTxn(w.ctx, w.store, true, func(ctx context.Context, txn kv.Transaction) error { if err = recoverPDSchedule(pdScheduleValue); err != nil { diff --git a/ddl/column.go b/ddl/column.go index 09585212881b5..ec8f3570043ac 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -697,11 +697,11 @@ func (w *worker) doModifyColumnTypeWithData( failpoint.Inject("mockInsertValueAfterCheckNull", func(val failpoint.Value) { if valStr, ok := val.(string); ok { var sctx sessionctx.Context - sctx, err := w.sessPool.get() + sctx, err := w.sessPool.Get() if err != nil { failpoint.Return(ver, err) } - defer w.sessPool.put(sctx) + defer w.sessPool.Put(sctx) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) //nolint:forcetypeassert @@ -810,12 +810,12 @@ func doReorgWorkForModifyColumnMultiSchema(w *worker, d *ddlCtx, t *meta.Meta, j func doReorgWorkForModifyColumn(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, tbl table.Table, oldCol, changingCol *model.ColumnInfo, changingIdxs []*model.IndexInfo) (done bool, ver int64, err error) { job.ReorgMeta.ReorgTp = model.ReorgTypeTxn - sctx, err1 := w.sessPool.get() + sctx, err1 := w.sessPool.Get() if err1 != nil { err = errors.Trace(err1) return } - defer w.sessPool.put(sctx) + defer w.sessPool.Put(sctx) rh := newReorgHandler(newSession(sctx)) dbInfo, err := t.GetDatabase(job.SchemaID) if err != nil { @@ -1826,11 +1826,11 @@ func rollbackModifyColumnJob(d *ddlCtx, t *meta.Meta, tblInfo *model.TableInfo, func modifyColsFromNull2NotNull(w *worker, dbInfo *model.DBInfo, tblInfo *model.TableInfo, cols []*model.ColumnInfo, newCol *model.ColumnInfo, isDataTruncated bool) error { // Get sessionctx from context resource pool. var sctx sessionctx.Context - sctx, err := w.sessPool.get() + sctx, err := w.sessPool.Get() if err != nil { return errors.Trace(err) } - defer w.sessPool.put(sctx) + defer w.sessPool.Put(sctx) skipCheck := false failpoint.Inject("skipMockContextDoExec", func(val failpoint.Value) { diff --git a/ddl/ddl.go b/ddl/ddl.go index f67272de3994c..3d0e24ba30b70 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/ingest" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/ddl/syncer" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain/infosync" @@ -272,7 +273,7 @@ type ddl struct { limitJobCh chan *limitJobTask *ddlCtx - sessPool *sessionPool + sessPool *sess.Pool delRangeMgr delRangeManager enableTiFlashPoll *atomicutil.Bool // used in the concurrency ddl. @@ -718,7 +719,7 @@ func (d *ddl) prepareWorkers4ConcurrencyDDL() { workerFactory := func(tp workerType) func() (pools.Resource, error) { return func() (pools.Resource, error) { wk := newWorker(d.ctx, tp, d.sessPool, d.delRangeMgr, d.ddlCtx) - sessForJob, err := d.sessPool.get() + sessForJob, err := d.sessPool.Get() if err != nil { return nil, err } @@ -763,7 +764,7 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", config.GetGlobalConfig().Instance.TiDBEnableDDL.Load())) d.wg.Run(d.limitDDLJobs) - d.sessPool = newSessionPool(ctxPool, d.store) + d.sessPool = sess.NewSessionPool(ctxPool, d.store) d.ownerManager.SetBeOwnerHook(func() { var err error d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() @@ -877,7 +878,7 @@ func (d *ddl) close() { d.delRangeMgr.clear() } if d.sessPool != nil { - d.sessPool.close() + d.sessPool.Close() } variable.UnregisterStatistics(d) @@ -1106,14 +1107,14 @@ func (d *ddl) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { // If the connection being killed, we need to CANCEL the DDL job. if atomic.LoadUint32(&sessVars.Killed) == 1 { if sessVars.StmtCtx.DDLJobID != 0 { - se, err := d.sessPool.get() + se, err := d.sessPool.Get() if err != nil { logutil.BgLogger().Error("[ddl] get session failed, check again", zap.Error(err)) continue } sessVars.StmtCtx.DDLJobID = 0 // Avoid repeat. errs, err := CancelJobs(se, []int64{jobID}) - d.sessPool.put(se) + d.sessPool.Put(se) if len(errs) > 0 { logutil.BgLogger().Warn("error canceling DDL job", zap.Error(errs[0])) } @@ -1124,13 +1125,13 @@ func (d *ddl) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { } } - se, err := d.sessPool.get() + se, err := d.sessPool.Get() if err != nil { logutil.BgLogger().Error("[ddl] get session failed, check again", zap.Error(err)) continue } historyJob, err = GetHistoryJobByID(se, jobID) - d.sessPool.put(se) + d.sessPool.Put(se) if err != nil { logutil.BgLogger().Error("[ddl] get history DDL job failed, check again", zap.Error(err)) continue @@ -1251,12 +1252,12 @@ func (d *ddl) SwitchMDL(enable bool) error { // Check if there is any DDL running. // This check can not cover every corner cases, so users need to guarantee that there is no DDL running by themselves. - sess, err := d.sessPool.get() + sessCtx, err := d.sessPool.Get() if err != nil { return err } - defer d.sessPool.put(sess) - se := newSession(sess) + defer d.sessPool.Put(sessCtx) + se := newSession(sessCtx) rows, err := se.execute(ctx, "select 1 from mysql.tidb_ddl_job", "check job") if err != nil { return err @@ -1372,13 +1373,13 @@ type Info struct { // GetDDLInfoWithNewTxn returns DDL information using a new txn. func GetDDLInfoWithNewTxn(s sessionctx.Context) (*Info, error) { - sess := newSession(s) - err := sess.begin() + se := newSession(s) + err := se.begin() if err != nil { return nil, err } info, err := GetDDLInfo(s) - sess.rollback() + se.rollback() return info, err } @@ -1386,15 +1387,15 @@ func GetDDLInfoWithNewTxn(s sessionctx.Context) (*Info, error) { func GetDDLInfo(s sessionctx.Context) (*Info, error) { var err error info := &Info{} - sess := newSession(s) - txn, err := sess.txn() + se := newSession(s) + txn, err := se.txn() if err != nil { return nil, errors.Trace(err) } t := meta.NewMeta(txn) info.Jobs = make([]*model.Job, 0, 2) var generalJob, reorgJob *model.Job - generalJob, reorgJob, err = get2JobsFromTable(sess) + generalJob, reorgJob, err = get2JobsFromTable(se) if err != nil { return nil, errors.Trace(err) } @@ -1415,7 +1416,7 @@ func GetDDLInfo(s sessionctx.Context) (*Info, error) { return info, nil } - _, info.ReorgHandle, _, _, err = newReorgHandler(sess).GetDDLReorgHandle(reorgJob) + _, info.ReorgHandle, _, _, err = newReorgHandler(se).GetDDLReorgHandle(reorgJob) if err != nil { if meta.ErrDDLReorgElementNotExist.Equal(err) { return info, nil @@ -1463,8 +1464,8 @@ func cancelConcurrencyJobs(se sessionctx.Context, ids []int64) ([]error, error) } var jobMap = make(map[int64]int) // jobID -> error index - sess := newSession(se) - err := sess.begin() + sessCtx := newSession(se) + err := sessCtx.begin() if err != nil { return nil, err } @@ -1475,9 +1476,9 @@ func cancelConcurrencyJobs(se sessionctx.Context, ids []int64) ([]error, error) idsStr = append(idsStr, strconv.FormatInt(id, 10)) } - jobs, err := getJobsBySQL(sess, JobTable, fmt.Sprintf("job_id in (%s) order by job_id", strings.Join(idsStr, ", "))) + jobs, err := getJobsBySQL(sessCtx, JobTable, fmt.Sprintf("job_id in (%s) order by job_id", strings.Join(idsStr, ", "))) if err != nil { - sess.rollback() + sessCtx.rollback() return nil, err } @@ -1512,12 +1513,12 @@ func cancelConcurrencyJobs(se sessionctx.Context, ids []int64) ([]error, error) errs[i] = errors.Trace(err) continue } - err = updateDDLJob2Table(sess, job, true) + err = updateDDLJob2Table(sessCtx, job, true) if err != nil { errs[i] = errors.Trace(err) } } - err = sess.commit() + err = sessCtx.commit() if err != nil { return nil, err } @@ -1532,9 +1533,6 @@ func GetAllDDLJobs(sess sessionctx.Context, t *meta.Meta) ([]*model.Job, error) return getJobsBySQL(newSession(sess), JobTable, "1 order by job_id") } -// MaxHistoryJobs is exported for testing. -const MaxHistoryJobs = 10 - // DefNumHistoryJobs is default value of the default number of history job const DefNumHistoryJobs = 10 diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index ff3c47b40c622..37cf2f1e44879 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -7276,11 +7276,11 @@ func (d *ddl) CleanDeadTableLock(unlockTables []model.TableLockTpInfo, se model. Args: []interface{}{arg}, } - ctx, err := d.sessPool.get() + ctx, err := d.sessPool.Get() if err != nil { return err } - defer d.sessPool.put(ctx) + defer d.sessPool.Put(ctx) err = d.DoDDLJob(ctx, job) err = d.callHookOnChanged(job, err) return errors.Trace(err) diff --git a/ddl/ddl_tiflash_api.go b/ddl/ddl_tiflash_api.go index c601093ca5963..7de8f308de14b 100644 --- a/ddl/ddl_tiflash_api.go +++ b/ddl/ddl_tiflash_api.go @@ -584,7 +584,7 @@ func (d *ddl) PollTiFlashRoutine() { } } - sctx, err := d.sessPool.get() + sctx, err := d.sessPool.Get() if err == nil { if d.ownerManager.IsOwner() { err := d.refreshTiFlashTicker(sctx, pollTiflashContext) @@ -599,10 +599,10 @@ func (d *ddl) PollTiFlashRoutine() { } else { infosync.CleanTiFlashProgressCache() } - d.sessPool.put(sctx) + d.sessPool.Put(sctx) } else { if sctx != nil { - d.sessPool.put(sctx) + d.sessPool.Put(sctx) } logutil.BgLogger().Error("failed to get session for pollTiFlashReplicaStatus", zap.Error(err)) } diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 07deb569f0127..0711287eea055 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/kvrpcpb" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" @@ -97,8 +98,8 @@ type worker struct { ctx context.Context wg sync.WaitGroup - sessPool *sessionPool // sessPool is used to new sessions to execute SQL in ddl package. - sess *session // sess is used and only used in running DDL job. + sessPool *sess.Pool // sessPool is used to new sessions to execute SQL in ddl package. + sess *session // sess is used and only used in running DDL job. delRangeManager delRangeManager logCtx context.Context lockSeqNum bool @@ -127,7 +128,7 @@ func NewJobContext() *JobContext { } } -func newWorker(ctx context.Context, tp workerType, sessPool *sessionPool, delRangeMgr delRangeManager, dCtx *ddlCtx) *worker { +func newWorker(ctx context.Context, tp workerType, sessPool *sess.Pool, delRangeMgr delRangeManager, dCtx *ddlCtx) *worker { worker := &worker{ id: ddlWorkerID.Add(1), tp: tp, @@ -162,20 +163,20 @@ func (w *worker) String() string { func (w *worker) Close() { startTime := time.Now() if w.sess != nil { - w.sessPool.put(w.sess.session()) + w.sessPool.Put(w.sess.session()) } w.wg.Wait() logutil.Logger(w.logCtx).Info("[ddl] DDL worker closed", zap.Duration("take time", time.Since(startTime))) } -func (d *ddlCtx) asyncNotifyByEtcd(etcdPath string, jobID int64, jobType string) { - if d.etcdCli == nil { +func (dc *ddlCtx) asyncNotifyByEtcd(etcdPath string, jobID int64, jobType string) { + if dc.etcdCli == nil { return } jobIDStr := strconv.FormatInt(jobID, 10) timeStart := time.Now() - err := util.PutKVToEtcd(d.ctx, d.etcdCli, 1, etcdPath, jobIDStr) + err := util.PutKVToEtcd(dc.ctx, dc.etcdCli, 1, etcdPath, jobIDStr) if err != nil { logutil.BgLogger().Info("[ddl] notify handling DDL job failed", zap.String("etcdPath", etcdPath), zap.Int64("jobID", jobID), zap.String("type", jobType), zap.Error(err)) @@ -341,12 +342,12 @@ func (d *ddl) addBatchDDLJobs2Table(tasks []*limitJobTask) error { var ids []int64 var err error - sess, err := d.sessPool.get() + se, err := d.sessPool.Get() if err != nil { return errors.Trace(err) } - defer d.sessPool.put(sess) - job, err := getJobsBySQL(newSession(sess), JobTable, fmt.Sprintf("type = %d", model.ActionFlashbackCluster)) + defer d.sessPool.Put(se) + job, err := getJobsBySQL(newSession(se), JobTable, fmt.Sprintf("type = %d", model.ActionFlashbackCluster)) if err != nil { return errors.Trace(err) } @@ -377,8 +378,8 @@ func (d *ddl) addBatchDDLJobs2Table(tasks []*limitJobTask) error { injectModifyJobArgFailPoint(job) } - sess.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) - err = insertDDLJobs2Table(newSession(sess), true, jobTasks...) + se.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) + err = insertDDLJobs2Table(newSession(se), true, jobTasks...) } return errors.Trace(err) } @@ -451,16 +452,16 @@ func (w *worker) registerMDLInfo(job *model.Job, ver int64) error { } // cleanMDLInfo cleans metadata lock info. -func cleanMDLInfo(pool *sessionPool, jobID int64, ec *clientv3.Client) { +func cleanMDLInfo(pool *sess.Pool, jobID int64, ec *clientv3.Client) { if !variable.EnableMDL.Load() { return } sql := fmt.Sprintf("delete from mysql.tidb_mdl_info where job_id = %d", jobID) - sctx, _ := pool.get() - defer pool.put(sctx) - sess := newSession(sctx) - sess.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) - _, err := sess.execute(context.Background(), sql, "delete-mdl-info") + sctx, _ := pool.Get() + defer pool.Put(sctx) + se := newSession(sctx) + se.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) + _, err := se.execute(context.Background(), sql, "delete-mdl-info") if err != nil { logutil.BgLogger().Warn("unexpected error when clean mdl info", zap.Error(err)) } @@ -474,12 +475,12 @@ func cleanMDLInfo(pool *sessionPool, jobID int64, ec *clientv3.Client) { } // checkMDLInfo checks if metadata lock info exists. It means the schema is locked by some TiDBs if exists. -func checkMDLInfo(jobID int64, pool *sessionPool) (bool, int64, error) { +func checkMDLInfo(jobID int64, pool *sess.Pool) (bool, int64, error) { sql := fmt.Sprintf("select version from mysql.tidb_mdl_info where job_id = %d", jobID) - sctx, _ := pool.get() - defer pool.put(sctx) - sess := newSession(sctx) - rows, err := sess.execute(context.Background(), sql, "check-mdl-info") + sctx, _ := pool.Get() + defer pool.Put(sctx) + se := newSession(sctx) + rows, err := se.execute(context.Background(), sql, "check-mdl-info") if err != nil { return false, 0, err } @@ -632,23 +633,6 @@ func finishRecoverSchema(w *worker, job *model.Job) error { return nil } -func isDependencyJobDone(t *meta.Meta, job *model.Job) (bool, error) { - if job.DependencyID == noneDependencyJob { - return true, nil - } - - historyJob, err := t.GetHistoryDDLJob(job.DependencyID) - if err != nil { - return false, errors.Trace(err) - } - if historyJob == nil { - return false, nil - } - logutil.BgLogger().Info("[ddl] current DDL job dependent job is finished", zap.String("currentJob", job.String()), zap.Int64("dependentJobID", job.DependencyID)) - job.DependencyID = noneDependencyJob - return true, nil -} - func (w *JobContext) setDDLLabelForTopSQL(jobQuery string) { if !topsqlstate.TopSQLEnabled() || jobQuery == "" { return @@ -982,7 +966,7 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, // For every type, `schema/table` modification and `job` modification are conducted // in the one kv transaction. The `schema/table` modification can be always discarded - // by kv reset when meets a unhandled error, but the `job` modification can't. + // by kv reset when meets an unhandled error, but the `job` modification can't. // So make sure job state and args change is after all other checks or make sure these // change has no effect when retrying it. switch job.Type { @@ -1118,11 +1102,11 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, func loadDDLVars(w *worker) error { // Get sessionctx from context resource pool. var ctx sessionctx.Context - ctx, err := w.sessPool.get() + ctx, err := w.sessPool.Get() if err != nil { return errors.Trace(err) } - defer w.sessPool.put(ctx) + defer w.sessPool.Put(ctx) return util.LoadDDLVars(ctx) } @@ -1391,7 +1375,7 @@ func updateSchemaVersion(d *ddlCtx, t *meta.Meta, job *model.Job, multiInfos ... diff.TableID = job.TableID if len(job.Args) > 0 { tbInfo, _ := job.Args[0].(*model.TableInfo) - // When create table with foreign key, we actually has two schema status change: + // When create table with foreign key, there are two schema status change: // 1. none -> write-only // 2. write-only -> public // In the second status change write-only -> public, infoschema loader should apply drop old table first, then diff --git a/ddl/delete_range.go b/ddl/delete_range.go index 0d957b1eb53d8..5bd7fec83d4c9 100644 --- a/ddl/delete_range.go +++ b/ddl/delete_range.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/kvrpcpb" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -63,7 +64,7 @@ type delRangeManager interface { type delRange struct { store kv.Storage - sessPool *sessionPool + sessPool *sess.Pool emulatorCh chan struct{} keys []kv.Key quitCh chan struct{} @@ -73,7 +74,7 @@ type delRange struct { } // newDelRangeManager returns a delRangeManager. -func newDelRangeManager(store kv.Storage, sessPool *sessionPool) delRangeManager { +func newDelRangeManager(store kv.Storage, sessPool *sess.Pool) delRangeManager { dr := &delRange{ store: store, sessPool: sessPool, @@ -89,11 +90,11 @@ func newDelRangeManager(store kv.Storage, sessPool *sessionPool) delRangeManager // addDelRangeJob implements delRangeManager interface. func (dr *delRange) addDelRangeJob(ctx context.Context, job *model.Job) error { - sctx, err := dr.sessPool.get() + sctx, err := dr.sessPool.Get() if err != nil { return errors.Trace(err) } - defer dr.sessPool.put(sctx) + defer dr.sessPool.Put(sctx) if job.MultiSchemaInfo != nil { err = insertJobIntoDeleteRangeTableMultiSchema(ctx, sctx, job) @@ -127,11 +128,11 @@ func insertJobIntoDeleteRangeTableMultiSchema(ctx context.Context, sctx sessionc // removeFromGCDeleteRange implements delRangeManager interface. func (dr *delRange) removeFromGCDeleteRange(ctx context.Context, jobID int64) error { - sctx, err := dr.sessPool.get() + sctx, err := dr.sessPool.Get() if err != nil { return errors.Trace(err) } - defer dr.sessPool.put(sctx) + defer dr.sessPool.Put(sctx) err = util.RemoveMultiFromGCDeleteRange(ctx, sctx, jobID) return errors.Trace(err) } @@ -171,12 +172,12 @@ func (dr *delRange) startEmulator() { } func (dr *delRange) doDelRangeWork() error { - sctx, err := dr.sessPool.get() + sctx, err := dr.sessPool.Get() if err != nil { logutil.BgLogger().Error("[ddl] delRange emulator get session failed", zap.Error(err)) return errors.Trace(err) } - defer dr.sessPool.put(sctx) + defer dr.sessPool.Put(sctx) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) ranges, err := util.LoadDeleteRanges(ctx, sctx, math.MaxInt64) diff --git a/ddl/dist_backfilling.go b/ddl/dist_backfilling.go index 46a7cc5e1fe2a..f4993ef63c80c 100644 --- a/ddl/dist_backfilling.go +++ b/ddl/dist_backfilling.go @@ -142,7 +142,7 @@ func newBackfillWorkerContext(d *ddl, schemaName string, tbl table.Table, worker for i := 0; i < workerCnt; i++ { var se sessionctx.Context - se, err = d.sessPool.get() + se, err = d.sessPool.Get() if err != nil { logutil.BgLogger().Error("[ddl] new backfill worker context, get a session failed", zap.Int64("jobID", jobID), zap.Error(err)) return nil, errors.Trace(err) @@ -228,7 +228,7 @@ func runBackfillJobs(d *ddl, sess *session, ingestBackendCtx *ingest.BackendCont func (bwCtx *backfillWorkerContext) close(d *ddl) { for _, s := range bwCtx.sessCtxs { - d.sessPool.put(s) + d.sessPool.Put(s) } for _, w := range bwCtx.backfillWorkers { d.backfillCtxPool.put(w) diff --git a/ddl/dist_owner.go b/ddl/dist_owner.go index 0a7dfe211c765..549c43cd9dc69 100644 --- a/ddl/dist_owner.go +++ b/ddl/dist_owner.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/metrics" @@ -40,10 +41,7 @@ import ( ) // CheckBackfillJobFinishInterval is export for test. -var ( - CheckBackfillJobFinishInterval = 300 * time.Millisecond - telemetryDistReorgUsage = metrics.TelemetryDistReorgCnt -) +var CheckBackfillJobFinishInterval = 300 * time.Millisecond const ( distPhysicalTableConcurrency = 16 @@ -186,7 +184,7 @@ func (dc *ddlCtx) sendPhysicalTableMetas(reorgInfo *reorgInfo, t table.Table, sJ } } -func (dc *ddlCtx) controlWriteTableRecord(sessPool *sessionPool, t table.Table, bfWorkerType backfillerType, reorgInfo *reorgInfo) error { +func (dc *ddlCtx) controlWriteTableRecord(sessPool *sess.Pool, t table.Table, bfWorkerType backfillerType, reorgInfo *reorgInfo) error { startKey, endKey := reorgInfo.StartKey, reorgInfo.EndKey if startKey == nil && endKey == nil { return nil @@ -197,12 +195,12 @@ func (dc *ddlCtx) controlWriteTableRecord(sessPool *sessionPool, t table.Table, logutil.BgLogger().Info("[ddl] control write table record start", zap.Int64("jobID", ddlJobID), zap.Stringer("ele", currEle), zap.Int64("tblID", t.Meta().ID), zap.Int64("currPID", reorgInfo.PhysicalTableID)) - sCtx, err := sessPool.get() + sCtx, err := sessPool.Get() if err != nil { return errors.Trace(err) } - defer sessPool.put(sCtx) - sess := newSession(sCtx) + defer sessPool.Put(sCtx) + se := newSession(sCtx) if err := dc.isReorgRunnable(ddlJobID, true); err != nil { return errors.Trace(err) @@ -238,18 +236,18 @@ func (dc *ddlCtx) controlWriteTableRecord(sessPool *sessionPool, t table.Table, sJobCtx.minBatchSize = minGenPhysicalTableTaskBatch } - err = checkAndHandleInterruptedBackfillJobs(sess, ddlJobID, currEle.ID, currEle.TypeKey) + err = checkAndHandleInterruptedBackfillJobs(se, ddlJobID, currEle.ID, currEle.TypeKey) if err != nil { return errors.Trace(err) } - phyTblMetas, err := getRunningPhysicalTableMetas(sess, sJobCtx, reorgInfo) + phyTblMetas, err := getRunningPhysicalTableMetas(se, sJobCtx, reorgInfo) if err != nil { return err } sCtxs := make([]sessionctx.Context, 0, concurrency) for i := 0; i < concurrency; i++ { - sCtx, err := sessPool.get() + sCtx, err := sessPool.Get() if err != nil { return err } @@ -273,9 +271,9 @@ func (dc *ddlCtx) controlWriteTableRecord(sessPool *sessionPool, t table.Table, } wg.Wait() for _, sCtx := range sCtxs { - sessPool.put(sCtx) + sessPool.Put(sCtx) } - return checkReorgJobFinished(dc.ctx, sess, &dc.reorgCtx, ddlJobID, currEle) + return checkReorgJobFinished(dc.ctx, se, &dc.reorgCtx, ddlJobID, currEle) } func addBatchBackfillJobs(sess *session, reorgInfo *reorgInfo, sJobCtx *splitJobContext, phyTblID int64, notDistTask bool, diff --git a/ddl/foreign_key.go b/ddl/foreign_key.go index 1a06719cb404b..9d9688b95afc0 100644 --- a/ddl/foreign_key.go +++ b/ddl/foreign_key.go @@ -673,7 +673,7 @@ func checkForeignKeyConstrain(w *worker, schema, table string, fkInfo *model.FKI if !fkCheck { return nil } - sctx, err := w.sessPool.get() + sctx, err := w.sessPool.Get() if err != nil { return errors.Trace(err) } @@ -681,7 +681,7 @@ func checkForeignKeyConstrain(w *worker, schema, table string, fkInfo *model.FKI sctx.GetSessionVars().OptimizerEnableNAAJ = true defer func() { sctx.GetSessionVars().OptimizerEnableNAAJ = originValue - w.sessPool.put(sctx) + w.sessPool.Put(sctx) }() var buf strings.Builder diff --git a/ddl/index.go b/ddl/index.go index a3ea894cd6a19..21c5b430f03bf 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -29,10 +29,10 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/ingest" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" - "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/charset" @@ -748,10 +748,10 @@ func canUseIngest() bool { // IngestJobsNotExisted checks the ddl about `add index` with ingest method not existed. func IngestJobsNotExisted(ctx sessionctx.Context) bool { - sess := session{ctx} + se := session{ctx} template := "select job_meta from mysql.tidb_ddl_job where reorg and (type = %d or type = %d) and processing;" sql := fmt.Sprintf(template, model.ActionAddIndex, model.ActionAddPrimaryKey) - rows, err := sess.execute(context.Background(), sql, "check-pitr") + rows, err := se.execute(context.Background(), sql, "check-pitr") if err != nil { logutil.BgLogger().Warn("cannot check ingest job", zap.Error(err)) return false @@ -969,12 +969,12 @@ func runReorgJobAndHandleErr(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, } }) - sctx, err1 := w.sessPool.get() + sctx, err1 := w.sessPool.Get() if err1 != nil { err = err1 return } - defer w.sessPool.put(sctx) + defer w.sessPool.Put(sctx) rh := newReorgHandler(newSession(sctx)) dbInfo, err := t.GetDatabase(job.SchemaID) if err != nil { @@ -1197,34 +1197,6 @@ func checkInvisibleIndexesOnPK(tblInfo *model.TableInfo, indexInfos []*model.Ind return nil } -// CheckDropIndexOnAutoIncrementColumn checks if the index to drop is on auto_increment column. -func CheckDropIndexOnAutoIncrementColumn(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) error { - cols := tblInfo.Columns - for _, idxCol := range indexInfo.Columns { - flag := cols[idxCol.Offset].GetFlag() - if !mysql.HasAutoIncrementFlag(flag) { - continue - } - // check the count of index on auto_increment column. - count := 0 - for _, idx := range tblInfo.Indices { - for _, c := range idx.Columns { - if c.Name.L == idxCol.Name.L { - count++ - break - } - } - } - if tblInfo.PKIsHandle && mysql.HasPriKeyFlag(flag) { - count++ - } - if count < 2 { - return autoid.ErrWrongAutoKey - } - } - return nil -} - func checkRenameIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, model.CIStr, model.CIStr, error) { var from, to model.CIStr schemaID := job.SchemaID @@ -1946,7 +1918,7 @@ func getNextPartitionInfo(reorg *reorgInfo, t table.PartitionedTable, currPhysic // updateReorgInfo will find the next partition according to current reorgInfo. // If no more partitions, or table t is not a partitioned table, returns true to // indicate that the reorganize work is finished. -func updateReorgInfo(sessPool *sessionPool, t table.PartitionedTable, reorg *reorgInfo) (bool, error) { +func updateReorgInfo(sessPool *sess.Pool, t table.PartitionedTable, reorg *reorgInfo) (bool, error) { pid, startKey, endKey, err := getNextPartitionInfo(reorg, t, reorg.PhysicalTableID) if err != nil { return false, errors.Trace(err) diff --git a/ddl/internal/session/BUILD.bazel b/ddl/internal/session/BUILD.bazel new file mode 100644 index 0000000000000..27739611c4ef5 --- /dev/null +++ b/ddl/internal/session/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "session", + srcs = ["session_pool.go"], + importpath = "github.com/pingcap/tidb/ddl/internal/session", + visibility = ["//ddl:__subpackages__"], + deps = [ + "//kv", + "//parser/mysql", + "//sessionctx", + "//util/logutil", + "//util/mock", + "@com_github_ngaut_pools//:pools", + "@com_github_pingcap_errors//:errors", + ], +) diff --git a/ddl/session_pool.go b/ddl/internal/session/session_pool.go similarity index 68% rename from ddl/session_pool.go rename to ddl/internal/session/session_pool.go index 2e372dcef9f26..9602d0acaeb60 100644 --- a/ddl/session_pool.go +++ b/ddl/internal/session/session_pool.go @@ -1,4 +1,4 @@ -// Copyright 2018 PingCAP, Inc. +// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package ddl +package session import ( "fmt" @@ -27,8 +27,8 @@ import ( "github.com/pingcap/tidb/util/mock" ) -// sessionPool is used to new session. -type sessionPool struct { +// Pool is used to new session. +type Pool struct { mu struct { sync.Mutex closed bool @@ -37,13 +37,14 @@ type sessionPool struct { store kv.Storage } -func newSessionPool(resPool *pools.ResourcePool, store kv.Storage) *sessionPool { - return &sessionPool{resPool: resPool, store: store} +// NewSessionPool creates a new session pool. +func NewSessionPool(resPool *pools.ResourcePool, store kv.Storage) *Pool { + return &Pool{resPool: resPool, store: store} } -// get gets sessionctx from context resource pool. -// Please remember to call put after you finished using sessionctx. -func (sg *sessionPool) get() (sessionctx.Context, error) { +// Get gets sessionCtx from context resource pool. +// Please remember to call Put after you finished using sessionCtx. +func (sg *Pool) Get() (sessionctx.Context, error) { if sg.resPool == nil { ctx := mock.NewContext() ctx.Store = sg.store @@ -53,7 +54,7 @@ func (sg *sessionPool) get() (sessionctx.Context, error) { sg.mu.Lock() if sg.mu.closed { sg.mu.Unlock() - return nil, errors.Errorf("sessionPool is closed") + return nil, errors.Errorf("session pool is closed") } sg.mu.Unlock() @@ -65,33 +66,33 @@ func (sg *sessionPool) get() (sessionctx.Context, error) { ctx, ok := resource.(sessionctx.Context) if !ok { - return nil, fmt.Errorf("sessionPool resource get %v", ctx) + return nil, fmt.Errorf("session pool resource get %v", ctx) } ctx.GetSessionVars().SetStatusFlag(mysql.ServerStatusAutocommit, true) ctx.GetSessionVars().InRestrictedSQL = true return ctx, nil } -// put returns sessionctx to context resource pool. -func (sg *sessionPool) put(ctx sessionctx.Context) { +// Put returns sessionCtx to context resource pool. +func (sg *Pool) Put(ctx sessionctx.Context) { if sg.resPool == nil { return } // no need to protect sg.resPool, even the sg.resPool is closed, the ctx still need to - // put into resPool, because when resPool is closing, it will wait all the ctx returns, then resPool finish closing. + // Put into resPool, because when resPool is closing, it will wait all the ctx returns, then resPool finish closing. sg.resPool.Put(ctx.(pools.Resource)) } -// close clean up the sessionPool. -func (sg *sessionPool) close() { +// Close clean up the Pool. +func (sg *Pool) Close() { sg.mu.Lock() defer sg.mu.Unlock() // prevent closing resPool twice. if sg.mu.closed || sg.resPool == nil { return } - logutil.BgLogger().Info("[ddl] closing sessionPool") + logutil.BgLogger().Info("[ddl] closing session pool") sg.resPool.Close() sg.mu.closed = true } diff --git a/ddl/job_table.go b/ddl/job_table.go index 7d66d441077f5..3c8abefef731e 100644 --- a/ddl/job_table.go +++ b/ddl/job_table.go @@ -172,11 +172,11 @@ func (d *ddl) getReorgJob(sess *session) (*model.Job, error) { } func (d *ddl) startDispatchLoop() { - se, err := d.sessPool.get() + se, err := d.sessPool.Get() if err != nil { logutil.BgLogger().Fatal("dispatch loop get session failed, it should not happen, please try restart TiDB", zap.Error(err)) } - defer d.sessPool.put(se) + defer d.sessPool.Put(se) sess := newSession(se) var notifyDDLJobByEtcdCh clientv3.WatchChan if d.etcdCli != nil { @@ -378,7 +378,7 @@ func (d *ddl) loadBackfillJobAndRun() { if !isDistReorg { return } - se, err := d.sessPool.get() + se, err := d.sessPool.Get() if err != nil { logutil.BgLogger().Fatal("dispatch backfill jobs loop get session failed, it should not happen, please try restart TiDB", zap.Error(err)) } @@ -386,7 +386,7 @@ func (d *ddl) loadBackfillJobAndRun() { runningJobIDs := d.backfillCtxJobIDs() if len(runningJobIDs) >= reorgWorkerCnt { - d.sessPool.put(se) + d.sessPool.Put(se) return } @@ -399,14 +399,14 @@ func (d *ddl) loadBackfillJobAndRun() { } else { logutil.BgLogger().Debug("[ddl] get no backfill job in this instance") } - d.sessPool.put(se) + d.sessPool.Put(se) return } jobCtx, existent := d.setBackfillCtxJobContext(bfJob.JobID, bfJob.Meta.Query, bfJob.Meta.Type) if existent { logutil.BgLogger().Warn("[ddl] get the type of backfill job is running in this instance", zap.String("backfill job", bfJob.AbbrStr())) - d.sessPool.put(se) + d.sessPool.Put(se) return } // TODO: Adjust how the non-owner uses ReorgCtx. @@ -416,7 +416,7 @@ func (d *ddl) loadBackfillJobAndRun() { tidbutil.Recover(metrics.LabelDistReorg, "runBackfillJobs", nil, false) d.removeBackfillCtxJobCtx(bfJob.JobID) d.removeReorgCtx(genBackfillJobReorgCtxID(bfJob.JobID)) - d.sessPool.put(se) + d.sessPool.Put(se) }() if bfJob.Meta.ReorgTp == model.ReorgTypeLitMerge { @@ -816,12 +816,12 @@ func GetBackfillMetas(sess *session, tblName, condition string, label string) ([ metas := make([]*model.BackfillMeta, 0, len(rows)) for _, r := range rows { - meta := &model.BackfillMeta{} - err = meta.Decode(r.GetBytes(0)) + m := &model.BackfillMeta{} + err = m.Decode(r.GetBytes(0)) if err != nil { return nil, errors.Trace(err) } - metas = append(metas, meta) + metas = append(metas, m) } return metas, nil @@ -848,15 +848,15 @@ func GetBackfillIDAndMetas(sess *session, tblName, condition string, label strin if err != nil { return nil, err } - meta := &model.BackfillMeta{} - err = meta.Decode(r.GetBytes(1)) + m := &model.BackfillMeta{} + err = m.Decode(r.GetBytes(1)) if err != nil { return nil, err } pTblMeta := BackfillJobRangeMeta{ ID: id, - StartKey: meta.StartKey, - EndKey: meta.EndKey, + StartKey: m.StartKey, + EndKey: m.EndKey, PhyTblID: r.GetInt64(2), } pTblMetas = append(pTblMetas, &pTblMeta) diff --git a/ddl/partition.go b/ddl/partition.go index 3644f9b5997aa..aab5095dfcb64 100644 --- a/ddl/partition.go +++ b/ddl/partition.go @@ -1822,11 +1822,11 @@ func (w *worker) onDropTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) ( elements = append(elements, &meta.Element{ID: idxInfo.ID, TypeKey: meta.IndexElementKey}) } } - sctx, err1 := w.sessPool.get() + sctx, err1 := w.sessPool.Get() if err1 != nil { return ver, err1 } - defer w.sessPool.put(sctx) + defer w.sessPool.Put(sctx) rh := newReorgHandler(newSession(sctx)) reorgInfo, err := getReorgInfoFromPartitions(d.jobContext(job.ID), d, rh, job, dbInfo, pt, physicalTableIDs, elements) @@ -2146,8 +2146,8 @@ func (w *worker) onExchangeTablePartition(d *ddlCtx, t *meta.Meta, job *model.Jo failpoint.Inject("exchangePartitionAutoID", func(val failpoint.Value) { if val.(bool) { - se, err := w.sessPool.get() - defer w.sessPool.put(se) + se, err := w.sessPool.Get() + defer w.sessPool.Put(se) if err != nil { failpoint.Return(ver, err) } @@ -2529,11 +2529,11 @@ func (w *worker) onReorganizePartition(d *ddlCtx, t *meta.Meta, job *model.Job) func doPartitionReorgWork(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, tbl table.Table, physTblIDs []int64) (done bool, ver int64, err error) { job.ReorgMeta.ReorgTp = model.ReorgTypeTxn - sctx, err1 := w.sessPool.get() + sctx, err1 := w.sessPool.Get() if err1 != nil { return done, ver, err1 } - defer w.sessPool.put(sctx) + defer w.sessPool.Put(sctx) rh := newReorgHandler(newSession(sctx)) elements := BuildElements(tbl.Meta().Columns[0], tbl.Meta().Indices) partTbl, ok := tbl.(table.PartitionedTable) @@ -2959,11 +2959,11 @@ func checkExchangePartitionRecordValidation(w *worker, pt *model.TableInfo, inde } var ctx sessionctx.Context - ctx, err := w.sessPool.get() + ctx, err := w.sessPool.Get() if err != nil { return errors.Trace(err) } - defer w.sessPool.put(ctx) + defer w.sessPool.Put(ctx) rows, _, err := ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(w.ctx, nil, sql, paramList...) if err != nil { diff --git a/ddl/reorg.go b/ddl/reorg.go index 17ee2889aae3b..13f874acb3deb 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl/ingest" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" @@ -80,12 +81,6 @@ type reorgCtx struct { references atomicutil.Int32 } -// nullableKey can store kv.Key. -// Storing a nil object to atomic.Value can lead to panic. This is a workaround. -type nullableKey struct { - key kv.Key -} - // newContext gets a context. It is only used for adding column in reorganization state. func newContext(store kv.Storage) sessionctx.Context { c := mock.NewContext() @@ -323,11 +318,11 @@ func updateBackfillProgress(w *worker, reorgInfo *reorgInfo, tblInfo *model.Tabl func getTableTotalCount(w *worker, tblInfo *model.TableInfo) int64 { var ctx sessionctx.Context - ctx, err := w.sessPool.get() + ctx, err := w.sessPool.Get() if err != nil { return statistics.PseudoRowCount } - defer w.sessPool.put(ctx) + defer w.sessPool.Put(ctx) executor, ok := ctx.(sqlexec.RestrictedSQLExecutor) // `mock.Context` is used in tests, which doesn't implement RestrictedSQLExecutor @@ -761,24 +756,24 @@ func getReorgInfoFromPartitions(ctx *JobContext, d *ddlCtx, rh *reorgHandler, jo // UpdateReorgMeta creates a new transaction and updates tidb_ddl_reorg table, // so the reorg can restart in case of issues. -func (r *reorgInfo) UpdateReorgMeta(startKey kv.Key, pool *sessionPool) (err error) { +func (r *reorgInfo) UpdateReorgMeta(startKey kv.Key, pool *sess.Pool) (err error) { if startKey == nil && r.EndKey == nil { return nil } - sctx, err := pool.get() + sctx, err := pool.Get() if err != nil { return } - defer pool.put(sctx) + defer pool.Put(sctx) - sess := newSession(sctx) - err = sess.begin() + se := newSession(sctx) + err = se.begin() if err != nil { return } - rh := newReorgHandler(sess) + rh := newReorgHandler(se) err = updateDDLReorgHandle(rh.s, r.Job.ID, startKey, r.EndKey, r.PhysicalTableID, r.currElement) - err1 := sess.commit() + err1 := se.commit() if err == nil { err = err1 } diff --git a/ddl/sanity_check.go b/ddl/sanity_check.go index 151eb8c44b469..8dbdb640a2b09 100644 --- a/ddl/sanity_check.go +++ b/ddl/sanity_check.go @@ -20,6 +20,7 @@ import ( "strings" "github.com/pingcap/errors" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" @@ -51,11 +52,11 @@ func (d *ddl) checkDeleteRangeCnt(job *model.Job) { } } -func queryDeleteRangeCnt(sessPool *sessionPool, jobID int64) (int, error) { - sctx, _ := sessPool.get() +func queryDeleteRangeCnt(sessPool *sess.Pool, jobID int64) (int, error) { + sctx, _ := sessPool.Get() s, _ := sctx.(sqlexec.SQLExecutor) defer func() { - sessPool.put(sctx) + sessPool.Put(sctx) }() ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) diff --git a/ddl/stat.go b/ddl/stat.go index 15be82d6719ae..e6551842aeea2 100644 --- a/ddl/stat.go +++ b/ddl/stat.go @@ -50,11 +50,11 @@ func (d *ddl) Stats(vars *variable.SessionVars) (map[string]interface{}, error) m[serverID] = d.uuid var ddlInfo *Info - s, err := d.sessPool.get() + s, err := d.sessPool.Get() if err != nil { return nil, errors.Trace(err) } - defer d.sessPool.put(s) + defer d.sessPool.Put(s) ddlInfo, err = GetDDLInfoWithNewTxn(s) if err != nil { return nil, errors.Trace(err) diff --git a/ddl/table.go b/ddl/table.go index 74cd0d332f4e1..906fc12932442 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/ddl/label" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/ddl/util" @@ -204,7 +205,7 @@ func createTableWithForeignKeys(d *ddlCtx, t *meta.Meta, job *model.Job, tbInfo func onCreateTables(d *ddlCtx, t *meta.Meta, job *model.Job) (int64, error) { var ver int64 - args := []*model.TableInfo{} + var args []*model.TableInfo fkCheck := false err := job.DecodeArgs(&args, &fkCheck) if err != nil { @@ -580,41 +581,41 @@ func clearTablePlacementAndBundles(tblInfo *model.TableInfo) error { var mockRecoverTableCommitErrOnce uint32 func enableGC(w *worker) error { - ctx, err := w.sessPool.get() + ctx, err := w.sessPool.Get() if err != nil { return errors.Trace(err) } - defer w.sessPool.put(ctx) + defer w.sessPool.Put(ctx) return gcutil.EnableGC(ctx) } func disableGC(w *worker) error { - ctx, err := w.sessPool.get() + ctx, err := w.sessPool.Get() if err != nil { return errors.Trace(err) } - defer w.sessPool.put(ctx) + defer w.sessPool.Put(ctx) return gcutil.DisableGC(ctx) } func checkGCEnable(w *worker) (enable bool, err error) { - ctx, err := w.sessPool.get() + ctx, err := w.sessPool.Get() if err != nil { return false, errors.Trace(err) } - defer w.sessPool.put(ctx) + defer w.sessPool.Put(ctx) return gcutil.CheckGCEnable(ctx) } func checkSafePoint(w *worker, snapshotTS uint64) error { - ctx, err := w.sessPool.get() + ctx, err := w.sessPool.Get() if err != nil { return errors.Trace(err) } - defer w.sessPool.put(ctx) + defer w.sessPool.Put(ctx) return gcutil.ValidateSnapshot(ctx, snapshotTS) } @@ -679,7 +680,7 @@ func getTableInfo(t *meta.Meta, tableID, schemaID int64) (*model.TableInfo, erro } // onTruncateTable delete old table meta, and creates a new table identical to old table except for table ID. -// As all the old data is encoded with old table ID, it can not be accessed any more. +// As all the old data is encoded with old table ID, it can not be accessed anymore. // A background job will be created to delete old data. func onTruncateTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { schemaID := job.SchemaID @@ -958,15 +959,15 @@ func (w *worker) onShardRowID(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int6 return ver, nil } -func verifyNoOverflowShardBits(s *sessionPool, tbl table.Table, shardRowIDBits uint64) error { +func verifyNoOverflowShardBits(s *sess.Pool, tbl table.Table, shardRowIDBits uint64) error { if shardRowIDBits == 0 { return nil } - ctx, err := s.get() + ctx, err := s.Get() if err != nil { return errors.Trace(err) } - defer s.put(ctx) + defer s.Put(ctx) // Check next global max auto ID first. autoIncID, err := tbl.Allocators(ctx).Get(autoid.RowIDAllocType).NextGlobalAutoID() if err != nil { @@ -1304,11 +1305,11 @@ func (w *worker) onSetTableFlashReplica(d *ddlCtx, t *meta.Meta, job *model.Job) } func (w *worker) checkTiFlashReplicaCount(replicaCount uint64) error { - ctx, err := w.sessPool.get() + ctx, err := w.sessPool.Get() if err != nil { return errors.Trace(err) } - defer w.sessPool.put(ctx) + defer w.sessPool.Put(ctx) return checkTiFlashReplicaCount(ctx, replicaCount) } @@ -1415,7 +1416,7 @@ func checkTableNotExistsFromInfoSchema(is infoschema.InfoSchema, schemaID int64, func checkTableNotExistsFromStore(t *meta.Meta, schemaID int64, tableName string) error { // Check this table's database. - tables, err := t.ListTables(schemaID) + tbls, err := t.ListTables(schemaID) if err != nil { if meta.ErrDBNotExists.Equal(err) { return infoschema.ErrDatabaseNotExists.GenWithStackByArgs("") @@ -1424,7 +1425,7 @@ func checkTableNotExistsFromStore(t *meta.Meta, schemaID int64, tableName string } // Check the table. - for _, tbl := range tables { + for _, tbl := range tbls { if tbl.Name.L == tableName { return infoschema.ErrTableExists.GenWithStackByArgs(tbl.Name) } From 0c3a9526177faaa4cbced10c4cde096f73cef153 Mon Sep 17 00:00:00 2001 From: Lynn Date: Tue, 4 Apr 2023 19:07:09 +0800 Subject: [PATCH 03/12] disttask: make TestParallelErrFlow more stable (#42809) close pingcap/tidb#42791 --- disttask/framework/dispatcher/dispatcher.go | 5 +++-- disttask/framework/dispatcher/dispatcher_test.go | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/disttask/framework/dispatcher/dispatcher.go b/disttask/framework/dispatcher/dispatcher.go index 9e1d0453c4928..7495c27e937d7 100644 --- a/disttask/framework/dispatcher/dispatcher.go +++ b/disttask/framework/dispatcher/dispatcher.go @@ -391,9 +391,10 @@ func (d *dispatcher) processNormalFlow(gTask *proto.Task) (err error) { // Special handling for the new tasks. if gTask.State == proto.TaskStatePending { // TODO: Consider using TS. - gTask.StartTime = time.Now().UTC() + nowTime := time.Now().UTC() + gTask.StartTime = nowTime gTask.State = proto.TaskStateRunning - gTask.StateUpdateTime = time.Now().UTC() + gTask.StateUpdateTime = nowTime retryTimes = nonRetrySQLTime } diff --git a/disttask/framework/dispatcher/dispatcher_test.go b/disttask/framework/dispatcher/dispatcher_test.go index 7c37b33a86ead..f915cad317cba 100644 --- a/disttask/framework/dispatcher/dispatcher_test.go +++ b/disttask/framework/dispatcher/dispatcher_test.go @@ -123,7 +123,7 @@ func TestGetInstance(t *testing.T) { } const ( - subtaskCnt = 10 + subtaskCnt = 3 ) func checkDispatch(t *testing.T, taskCnt int, isSucc bool) { @@ -151,8 +151,8 @@ func checkDispatch(t *testing.T, taskCnt int, isSucc bool) { dispatcher.RegisterTaskFlowHandle(taskTypeExample, NumberExampleHandle{}) - // 2s - cnt := 40 + // 3s + cnt := 60 checkGetRunningGTaskCnt := func() { var retCnt int for i := 0; i < cnt; i++ { From 462441e9c1d3d5a3a360b1f52420f1ae7015dc43 Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Tue, 4 Apr 2023 19:07:16 +0800 Subject: [PATCH 04/12] *: enable revive for ddl/internal (#42810) --- build/nogo_config.json | 1 + 1 file changed, 1 insertion(+) diff --git a/build/nogo_config.json b/build/nogo_config.json index ab88720632912..13a7712f14234 100644 --- a/build/nogo_config.json +++ b/build/nogo_config.json @@ -369,6 +369,7 @@ "ddl/ttl.go": "ddl/ttl.go", "ddl/ttl_test.go": "ddl/ttl_test.go", "ddl/ingest/": "ddl/ingest/", + "ddl/internal/": "ddl/internal/", "expression/builtin_cast.go": "expression/builtin_cast code", "server/conn.go": "server/conn.go", "server/conn_stmt.go": "server/conn_stmt.go", From 4675e4e1699f37f224525d7d36446d211f86b3f4 Mon Sep 17 00:00:00 2001 From: Yuanjia Zhang Date: Tue, 4 Apr 2023 19:28:57 +0800 Subject: [PATCH 05/12] planner: add more test cases for non-prep plan cache (#42801) ref pingcap/tidb#36598 --- planner/core/plan_cache_test.go | 20 ++++++++++++++++++++ planner/optimize.go | 1 + server/conn.go | 2 ++ session/session.go | 1 + sessionctx/variable/session.go | 3 +++ 5 files changed, 27 insertions(+) diff --git a/planner/core/plan_cache_test.go b/planner/core/plan_cache_test.go index 544a2fbe52050..ab1f9ee7e741b 100644 --- a/planner/core/plan_cache_test.go +++ b/planner/core/plan_cache_test.go @@ -1785,6 +1785,26 @@ func TestNonPreparedPlanCachePanic(t *testing.T) { } } +func TestNonPreparedPlanCacheMultiStmt(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`set tidb_enable_non_prepared_plan_cache=1`) + tk.MustExec("create table t (a int)") + + tk.MustExec("update t set a=1 where a<10") + tk.MustExec("update t set a=2 where a<12") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + // multi-stmt SQL cannot hit the cache + tk.MustExec("update t set a=1 where a<10; update t set a=2 where a<12") + tk.MustExec("update t set a=1 where a<10; update t set a=2 where a<12") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("update t set a=2 where a<12") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) +} + func TestNonPreparedPlanCacheJoin(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/planner/optimize.go b/planner/optimize.go index 83679091e6e58..7c0cf08774421 100644 --- a/planner/optimize.go +++ b/planner/optimize.go @@ -83,6 +83,7 @@ func getPlanFromNonPreparedPlanCache(ctx context.Context, sctx sessionctx.Contex stmtCtx.InRestrictedSQL || // is internal SQL isExplain || // explain external !sctx.GetSessionVars().DisableTxnAutoRetry || // txn-auto-retry + sctx.GetSessionVars().InMultiStmts || // in multi-stmt (stmtCtx.InExplainStmt && stmtCtx.ExplainFormat != types.ExplainFormatPlanCache) { // in explain internal return nil, nil, false, nil } diff --git a/server/conn.go b/server/conn.go index 59348900af4ea..6eacc3cfaf186 100644 --- a/server/conn.go +++ b/server/conn.go @@ -1798,6 +1798,7 @@ func (cc *clientConn) handleQuery(ctx context.Context, sql string) (err error) { parserWarns := warns[len(prevWarns):] var pointPlans []plannercore.Plan + cc.ctx.GetSessionVars().InMultiStmts = false if len(stmts) > 1 { // The client gets to choose if it allows multi-statements, and // probably defaults OFF. This helps prevent against SQL injection attacks @@ -1819,6 +1820,7 @@ func (cc *clientConn) handleQuery(ctx context.Context, sql string) (err error) { parserWarns = append(parserWarns, warn) } } + cc.ctx.GetSessionVars().InMultiStmts = true // Only pre-build point plans for multi-statement query pointPlans, err = cc.prefetchPointPlanKeys(ctx, stmts) diff --git a/session/session.go b/session/session.go index 6c7164d7782f8..08a7533805f92 100644 --- a/session/session.go +++ b/session/session.go @@ -2123,6 +2123,7 @@ func (s *session) ExecuteStmt(ctx context.Context, stmtNode ast.StmtNode) (sqlex // Uncorrelated subqueries will execute once when building plan, so we reset process info before building plan. cmd32 := atomic.LoadUint32(&s.GetSessionVars().CommandValue) + s.currentPlan = nil // reset current plan s.SetProcessInfo(stmtNode.Text(), time.Now(), byte(cmd32), 0) s.txn.onStmtStart(digest.String()) defer sessiontxn.GetTxnManager(s).OnStmtEnd() diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index f097cb82b02d4..d2a10a37726f0 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -785,6 +785,9 @@ type SessionVars struct { // MultiStatementMode permits incorrect client library usage. Not recommended to be turned on. MultiStatementMode int + // InMultiStmts indicates whether the statement is a multi-statement like `update t set a=1; update t set b=2;`. + InMultiStmts bool + // AllowWriteRowID variable is currently not recommended to be turned on. AllowWriteRowID bool From e18d7fcab37217485b1d80f670db7567077b4060 Mon Sep 17 00:00:00 2001 From: tangenta Date: Tue, 4 Apr 2023 22:48:57 +0800 Subject: [PATCH 06/12] ddl: extract session to a separate internal package (#42819) --- ddl/BUILD.bazel | 1 + ddl/cluster.go | 21 ++-- ddl/column.go | 3 +- ddl/ddl.go | 129 +++------------------ ddl/ddl_test.go | 3 - ddl/ddl_worker.go | 42 +++---- ddl/dist_backfilling.go | 13 ++- ddl/dist_owner.go | 87 +++++++-------- ddl/index.go | 18 +-- ddl/internal/session/BUILD.bazel | 11 +- ddl/internal/session/session.go | 137 +++++++++++++++++++++++ ddl/internal/session/session_pool.go | 10 +- ddl/job_table.go | 161 ++++++++++++++------------- ddl/job_table_test.go | 29 ++--- ddl/partition.go | 13 ++- ddl/reorg.go | 16 +-- 16 files changed, 374 insertions(+), 320 deletions(-) create mode 100644 ddl/internal/session/session.go diff --git a/ddl/BUILD.bazel b/ddl/BUILD.bazel index be970766d10d7..e463908d31266 100644 --- a/ddl/BUILD.bazel +++ b/ddl/BUILD.bazel @@ -221,6 +221,7 @@ go_test( "//autoid_service", "//config", "//ddl/internal/callback", + "//ddl/internal/session", "//ddl/placement", "//ddl/schematracker", "//ddl/testutil", diff --git a/ddl/cluster.go b/ddl/cluster.go index b4331040e2e54..a70b144708d3d 100644 --- a/ddl/cluster.go +++ b/ddl/cluster.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/errorpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/infoschema" @@ -218,24 +219,24 @@ func checkSystemSchemaID(t *meta.Meta, schemaID int64, flashbackTSString string) return nil } -func checkAndSetFlashbackClusterInfo(sess sessionctx.Context, d *ddlCtx, t *meta.Meta, job *model.Job, flashbackTS uint64) (err error) { - if err = ValidateFlashbackTS(d.ctx, sess, flashbackTS); err != nil { +func checkAndSetFlashbackClusterInfo(se sessionctx.Context, d *ddlCtx, t *meta.Meta, job *model.Job, flashbackTS uint64) (err error) { + if err = ValidateFlashbackTS(d.ctx, se, flashbackTS); err != nil { return err } - if err = gcutil.DisableGC(sess); err != nil { + if err = gcutil.DisableGC(se); err != nil { return err } if err = closePDSchedule(); err != nil { return err } - if err = setTiDBEnableAutoAnalyze(d.ctx, sess, variable.Off); err != nil { + if err = setTiDBEnableAutoAnalyze(d.ctx, se, variable.Off); err != nil { return err } - if err = setTiDBSuperReadOnly(d.ctx, sess, variable.On); err != nil { + if err = setTiDBSuperReadOnly(d.ctx, se, variable.On); err != nil { return err } - if err = setTiDBTTLJobEnable(d.ctx, sess, variable.Off); err != nil { + if err = setTiDBTTLJobEnable(d.ctx, se, variable.Off); err != nil { return err } @@ -254,12 +255,12 @@ func checkAndSetFlashbackClusterInfo(sess sessionctx.Context, d *ddlCtx, t *meta // Check if there is an upgrade during [flashbackTS, now) sql := fmt.Sprintf("select VARIABLE_VALUE from mysql.tidb as of timestamp '%s' where VARIABLE_NAME='tidb_server_version'", flashbackTSString) - rows, err := newSession(sess).execute(d.ctx, sql, "check_tidb_server_version") + rows, err := sess.NewSession(se).Execute(d.ctx, sql, "check_tidb_server_version") if err != nil || len(rows) == 0 { return errors.Errorf("Get history `tidb_server_version` failed, can't do flashback") } sql = fmt.Sprintf("select 1 from mysql.tidb where VARIABLE_NAME='tidb_server_version' and VARIABLE_VALUE=%s", rows[0].GetString(0)) - rows, err = newSession(sess).execute(d.ctx, sql, "check_tidb_server_version") + rows, err = sess.NewSession(se).Execute(d.ctx, sql, "check_tidb_server_version") if err != nil { return errors.Trace(err) } @@ -269,7 +270,7 @@ func checkAndSetFlashbackClusterInfo(sess sessionctx.Context, d *ddlCtx, t *meta // Check is there a DDL task at flashbackTS. sql = fmt.Sprintf("select count(*) from mysql.%s as of timestamp '%s'", JobTable, flashbackTSString) - rows, err = newSession(sess).execute(d.ctx, sql, "check_history_job") + rows, err = sess.NewSession(se).Execute(d.ctx, sql, "check_history_job") if err != nil || len(rows) == 0 { return errors.Errorf("Get history ddl jobs failed, can't do flashback") } @@ -295,7 +296,7 @@ func checkAndSetFlashbackClusterInfo(sess sessionctx.Context, d *ddlCtx, t *meta } } - jobs, err := GetAllDDLJobs(sess, t) + jobs, err := GetAllDDLJobs(se, t) if err != nil { return errors.Trace(err) } diff --git a/ddl/column.go b/ddl/column.go index ec8f3570043ac..45862a56d6355 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/config" + sess "github.com/pingcap/tidb/ddl/internal/session" ddlutil "github.com/pingcap/tidb/ddl/util" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" @@ -816,7 +817,7 @@ func doReorgWorkForModifyColumn(w *worker, d *ddlCtx, t *meta.Meta, job *model.J return } defer w.sessPool.Put(sctx) - rh := newReorgHandler(newSession(sctx)) + rh := newReorgHandler(sess.NewSession(sctx)) dbInfo, err := t.GetDatabase(job.SchemaID) if err != nil { return false, ver, errors.Trace(err) diff --git a/ddl/ddl.go b/ddl/ddl.go index 3d0e24ba30b70..9c429551284b9 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -58,13 +58,11 @@ import ( "github.com/pingcap/tidb/table" pumpcli "github.com/pingcap/tidb/tidb-binlog/pump_client" tidbutil "github.com/pingcap/tidb/util" - "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/gcutil" "github.com/pingcap/tidb/util/gpool/spmc" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mathutil" - "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/syncutil" "github.com/tikv/client-go/v2/tikvrpc" clientv3 "go.etcd.io/etcd/client/v3" @@ -724,7 +722,7 @@ func (d *ddl) prepareWorkers4ConcurrencyDDL() { return nil, err } sessForJob.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) - wk.sess = newSession(sessForJob) + wk.sess = sess.NewSession(sessForJob) metrics.DDLCounter.WithLabelValues(fmt.Sprintf("%s_%s", metrics.CreateDDL, wk.String())).Inc() return wk, nil } @@ -1257,8 +1255,8 @@ func (d *ddl) SwitchMDL(enable bool) error { return err } defer d.sessPool.Put(sessCtx) - se := newSession(sessCtx) - rows, err := se.execute(ctx, "select 1 from mysql.tidb_ddl_job", "check job") + se := sess.NewSession(sessCtx) + rows, err := se.Execute(ctx, "select 1 from mysql.tidb_ddl_job", "check job") if err != nil { return err } @@ -1373,13 +1371,13 @@ type Info struct { // GetDDLInfoWithNewTxn returns DDL information using a new txn. func GetDDLInfoWithNewTxn(s sessionctx.Context) (*Info, error) { - se := newSession(s) - err := se.begin() + se := sess.NewSession(s) + err := se.Begin() if err != nil { return nil, err } info, err := GetDDLInfo(s) - se.rollback() + se.Rollback() return info, err } @@ -1387,8 +1385,8 @@ func GetDDLInfoWithNewTxn(s sessionctx.Context) (*Info, error) { func GetDDLInfo(s sessionctx.Context) (*Info, error) { var err error info := &Info{} - se := newSession(s) - txn, err := se.txn() + se := sess.NewSession(s) + txn, err := se.Txn() if err != nil { return nil, errors.Trace(err) } @@ -1427,7 +1425,7 @@ func GetDDLInfo(s sessionctx.Context) (*Info, error) { return info, nil } -func get2JobsFromTable(sess *session) (*model.Job, *model.Job, error) { +func get2JobsFromTable(sess *sess.Session) (*model.Job, *model.Job, error) { var generalJob, reorgJob *model.Job jobs, err := getJobsBySQL(sess, JobTable, "not reorg order by job_id limit 1") if err != nil { @@ -1464,8 +1462,8 @@ func cancelConcurrencyJobs(se sessionctx.Context, ids []int64) ([]error, error) } var jobMap = make(map[int64]int) // jobID -> error index - sessCtx := newSession(se) - err := sessCtx.begin() + sessCtx := sess.NewSession(se) + err := sessCtx.Begin() if err != nil { return nil, err } @@ -1478,7 +1476,7 @@ func cancelConcurrencyJobs(se sessionctx.Context, ids []int64) ([]error, error) jobs, err := getJobsBySQL(sessCtx, JobTable, fmt.Sprintf("job_id in (%s) order by job_id", strings.Join(idsStr, ", "))) if err != nil { - sessCtx.rollback() + sessCtx.Rollback() return nil, err } @@ -1518,7 +1516,7 @@ func cancelConcurrencyJobs(se sessionctx.Context, ids []int64) ([]error, error) errs[i] = errors.Trace(err) } } - err = sessCtx.commit() + err = sessCtx.Commit() if err != nil { return nil, err } @@ -1529,8 +1527,8 @@ func cancelConcurrencyJobs(se sessionctx.Context, ids []int64) ([]error, error) } // GetAllDDLJobs get all DDL jobs and sorts jobs by job.ID. -func GetAllDDLJobs(sess sessionctx.Context, t *meta.Meta) ([]*model.Job, error) { - return getJobsBySQL(newSession(sess), JobTable, "1 order by job_id") +func GetAllDDLJobs(se sessionctx.Context, t *meta.Meta) ([]*model.Job, error) { + return getJobsBySQL(sess.NewSession(se), JobTable, "1 order by job_id") } // DefNumHistoryJobs is default value of the default number of history job @@ -1588,97 +1586,6 @@ func GetLastHistoryDDLJobsIterator(m *meta.Meta) (meta.LastJobIterator, error) { return m.GetLastHistoryDDLJobsIterator() } -// session wraps sessionctx.Context for transaction usage. -type session struct { - sessionctx.Context -} - -func newSession(s sessionctx.Context) *session { - return &session{s} -} - -func (s *session) begin() error { - err := sessiontxn.NewTxn(context.Background(), s) - if err != nil { - return err - } - s.GetSessionVars().SetInTxn(true) - return nil -} - -func (s *session) commit() error { - s.StmtCommit(context.Background()) - return s.CommitTxn(context.Background()) -} - -func (s *session) txn() (kv.Transaction, error) { - return s.Txn(true) -} - -func (s *session) rollback() { - s.StmtRollback(context.Background(), false) - s.RollbackTxn(context.Background()) -} - -func (s *session) reset() { - s.StmtRollback(context.Background(), false) -} - -func (s *session) execute(ctx context.Context, query string, label string) ([]chunk.Row, error) { - startTime := time.Now() - var err error - defer func() { - metrics.DDLJobTableDuration.WithLabelValues(label + "-" + metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) - }() - - if ctx.Value(kv.RequestSourceKey) == nil { - ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) - } - rs, err := s.Context.(sqlexec.SQLExecutor).ExecuteInternal(ctx, query) - if err != nil { - return nil, errors.Trace(err) - } - - if rs == nil { - return nil, nil - } - var rows []chunk.Row - defer terror.Call(rs.Close) - if rows, err = sqlexec.DrainRecordSet(ctx, rs, 8); err != nil { - return nil, errors.Trace(err) - } - return rows, nil -} - -func (s *session) session() sessionctx.Context { - return s.Context -} - -func (s *session) runInTxn(f func(*session) error) (err error) { - err = s.begin() - if err != nil { - return err - } - failpoint.Inject("NotifyBeginTxnCh", func(val failpoint.Value) { - //nolint:forcetypeassert - v := val.(int) - if v == 1 { - mockDDLErrOnce = 1 - TestNotifyBeginTxnCh <- struct{}{} - } else if v == 2 && mockDDLErrOnce == 1 { - <-TestNotifyBeginTxnCh - mockDDLErrOnce = 0 - } - }) - - err = f(s) - if err != nil { - s.rollback() - return - } - return errors.Trace(s.commit()) -} - // GetAllHistoryDDLJobs get all the done DDL jobs. func GetAllHistoryDDLJobs(m *meta.Meta) ([]*model.Job, error) { iterator, err := GetLastHistoryDDLJobsIterator(m) @@ -1743,7 +1650,7 @@ func GetHistoryJobByID(sess sessionctx.Context, id int64) (*model.Job, error) { } // AddHistoryDDLJob record the history job. -func AddHistoryDDLJob(sess *session, t *meta.Meta, job *model.Job, updateRawArgs bool) error { +func AddHistoryDDLJob(sess *sess.Session, t *meta.Meta, job *model.Job, updateRawArgs bool) error { err := addHistoryDDLJob2Table(sess, job, updateRawArgs) if err != nil { logutil.BgLogger().Info("[ddl] failed to add DDL job to history table", zap.Error(err)) @@ -1753,12 +1660,12 @@ func AddHistoryDDLJob(sess *session, t *meta.Meta, job *model.Job, updateRawArgs } // addHistoryDDLJob2Table adds DDL job to history table. -func addHistoryDDLJob2Table(sess *session, job *model.Job, updateRawArgs bool) error { +func addHistoryDDLJob2Table(sess *sess.Session, job *model.Job, updateRawArgs bool) error { b, err := job.Encode(updateRawArgs) if err != nil { return err } - _, err = sess.execute(context.Background(), + _, err = sess.Execute(context.Background(), fmt.Sprintf("insert ignore into mysql.tidb_ddl_history(job_id, job_meta, db_name, table_name, schema_ids, table_ids, create_time) values (%d, %s, %s, %s, %s, %s, %v)", job.ID, wrapKey2String(b), strconv.Quote(job.SchemaName), strconv.Quote(job.TableName), strconv.Quote(strconv.FormatInt(job.SchemaID, 10)), diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index 4fff76d15c737..41ca99f6c8e57 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -79,9 +79,6 @@ func (d *ddl) RemoveReorgCtx(id int64) { // JobNeedGCForTest is only used for test. var JobNeedGCForTest = jobNeedGC -// NewSession is only used for test. -var NewSession = newSession - // GetJobWithoutPartition is only used for test. const GetJobWithoutPartition = getJobWithoutPartition diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 0711287eea055..5536582a8576f 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -98,8 +98,8 @@ type worker struct { ctx context.Context wg sync.WaitGroup - sessPool *sess.Pool // sessPool is used to new sessions to execute SQL in ddl package. - sess *session // sess is used and only used in running DDL job. + sessPool *sess.Pool // sessPool is used to new sessions to execute SQL in ddl package. + sess *sess.Session // sess is used and only used in running DDL job. delRangeManager delRangeManager logCtx context.Context lockSeqNum bool @@ -163,7 +163,7 @@ func (w *worker) String() string { func (w *worker) Close() { startTime := time.Now() if w.sess != nil { - w.sessPool.Put(w.sess.session()) + w.sessPool.Put(w.sess.Session()) } w.wg.Wait() logutil.Logger(w.logCtx).Info("[ddl] DDL worker closed", zap.Duration("take time", time.Since(startTime))) @@ -347,7 +347,7 @@ func (d *ddl) addBatchDDLJobs2Table(tasks []*limitJobTask) error { return errors.Trace(err) } defer d.sessPool.Put(se) - job, err := getJobsBySQL(newSession(se), JobTable, fmt.Sprintf("type = %d", model.ActionFlashbackCluster)) + job, err := getJobsBySQL(sess.NewSession(se), JobTable, fmt.Sprintf("type = %d", model.ActionFlashbackCluster)) if err != nil { return errors.Trace(err) } @@ -379,7 +379,7 @@ func (d *ddl) addBatchDDLJobs2Table(tasks []*limitJobTask) error { } se.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) - err = insertDDLJobs2Table(newSession(se), true, jobTasks...) + err = insertDDLJobs2Table(sess.NewSession(se), true, jobTasks...) } return errors.Trace(err) } @@ -438,7 +438,7 @@ func (w *worker) registerMDLInfo(job *model.Job, ver int64) error { if ver == 0 { return nil } - rows, err := w.sess.execute(context.Background(), fmt.Sprintf("select table_ids from mysql.tidb_ddl_job where job_id = %d", job.ID), "register-mdl-info") + rows, err := w.sess.Execute(context.Background(), fmt.Sprintf("select table_ids from mysql.tidb_ddl_job where job_id = %d", job.ID), "register-mdl-info") if err != nil { return err } @@ -447,7 +447,7 @@ func (w *worker) registerMDLInfo(job *model.Job, ver int64) error { } ids := rows[0].GetString(0) sql := fmt.Sprintf("replace into mysql.tidb_mdl_info (job_id, version, table_ids) values (%d, %d, '%s')", job.ID, ver, ids) - _, err = w.sess.execute(context.Background(), sql, "register-mdl-info") + _, err = w.sess.Execute(context.Background(), sql, "register-mdl-info") return err } @@ -459,9 +459,9 @@ func cleanMDLInfo(pool *sess.Pool, jobID int64, ec *clientv3.Client) { sql := fmt.Sprintf("delete from mysql.tidb_mdl_info where job_id = %d", jobID) sctx, _ := pool.Get() defer pool.Put(sctx) - se := newSession(sctx) + se := sess.NewSession(sctx) se.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) - _, err := se.execute(context.Background(), sql, "delete-mdl-info") + _, err := se.Execute(context.Background(), sql, "delete-mdl-info") if err != nil { logutil.BgLogger().Warn("unexpected error when clean mdl info", zap.Error(err)) } @@ -479,8 +479,8 @@ func checkMDLInfo(jobID int64, pool *sess.Pool) (bool, int64, error) { sql := fmt.Sprintf("select version from mysql.tidb_mdl_info where job_id = %d", jobID) sctx, _ := pool.Get() defer pool.Put(sctx) - se := newSession(sctx) - rows, err := se.execute(context.Background(), sql, "check-mdl-info") + se := sess.NewSession(sctx) + rows, err := se.Execute(context.Background(), sql, "check-mdl-info") if err != nil { return false, 0, err } @@ -684,11 +684,11 @@ func (w *JobContext) setDDLLabelForDiagnosis(jobType model.ActionType) { func (w *worker) HandleJobDone(d *ddlCtx, job *model.Job, t *meta.Meta) error { err := w.finishDDLJob(t, job) if err != nil { - w.sess.rollback() + w.sess.Rollback() return err } - err = w.sess.commit() + err = w.sess.Commit() if err != nil { return err } @@ -707,7 +707,7 @@ func (w *worker) HandleDDLJobTable(d *ddlCtx, job *model.Job) (int64, error) { w.unlockSeqNum(err) }() - err = w.sess.begin() + err = w.sess.Begin() if err != nil { return 0, err } @@ -716,9 +716,9 @@ func (w *worker) HandleDDLJobTable(d *ddlCtx, job *model.Job) (int64, error) { time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond) // #nosec G404 } }) - txn, err := w.sess.txn() + txn, err := w.sess.Txn() if err != nil { - w.sess.rollback() + w.sess.Rollback() return 0, err } // Only general DDLs are allowed to be executed when TiKV is disk full. @@ -757,7 +757,7 @@ func (w *worker) HandleDDLJobTable(d *ddlCtx, job *model.Job) (int64, error) { if job.IsCancelled() { defer d.unlockSchemaVersion(job.ID) - w.sess.reset() + w.sess.Reset() err = w.HandleJobDone(d, job, t) return 0, err } @@ -769,7 +769,7 @@ func (w *worker) HandleDDLJobTable(d *ddlCtx, job *model.Job) (int64, error) { // then shouldn't discard the KV modification. // And the job state is rollback done, it means the job was already finished, also shouldn't discard too. // Otherwise, we should discard the KV modification when running job. - w.sess.reset() + w.sess.Reset() // If error happens after updateSchemaVersion(), then the schemaVer is updated. // Result in the retry duration is up to 2 * lease. schemaVer = 0 @@ -777,20 +777,20 @@ func (w *worker) HandleDDLJobTable(d *ddlCtx, job *model.Job) (int64, error) { err = w.registerMDLInfo(job, schemaVer) if err != nil { - w.sess.rollback() + w.sess.Rollback() d.unlockSchemaVersion(job.ID) return 0, err } err = w.updateDDLJob(job, runJobErr != nil) if err = w.handleUpdateJobError(t, job, err); err != nil { - w.sess.rollback() + w.sess.Rollback() d.unlockSchemaVersion(job.ID) return 0, err } writeBinlog(d.binlogCli, txn, job) // reset the SQL digest to make topsql work right. w.sess.GetSessionVars().StmtCtx.ResetSQLDigest(job.Query) - err = w.sess.commit() + err = w.sess.Commit() d.unlockSchemaVersion(job.ID) if err != nil { return 0, err diff --git a/ddl/dist_backfilling.go b/ddl/dist_backfilling.go index f4993ef63c80c..4340b84768d46 100644 --- a/ddl/dist_backfilling.go +++ b/ddl/dist_backfilling.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl/ingest" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -94,8 +95,8 @@ func (bj *BackfillJob) AbbrStr() string { } // GetOracleTimeWithStartTS returns the current time with txn's startTS. -func GetOracleTimeWithStartTS(se *session) (time.Time, error) { - txn, err := se.Txn(true) +func GetOracleTimeWithStartTS(se *sess.Session) (time.Time, error) { + txn, err := se.Txn() if err != nil { return time.Time{}, err } @@ -186,7 +187,7 @@ func (bwCtx *backfillWorkerContext) GetContext() *backfillWorker { return bw } -func runBackfillJobs(d *ddl, sess *session, ingestBackendCtx *ingest.BackendContext, bJob *BackfillJob, jobCtx *JobContext) (table.Table, error) { +func runBackfillJobs(d *ddl, se *sess.Session, ingestBackendCtx *ingest.BackendContext, bJob *BackfillJob, jobCtx *JobContext) (table.Table, error) { dbInfo, tbl, err := d.getTableByTxn(d.store, bJob.Meta.SchemaID, bJob.Meta.TableID) if err != nil { logutil.BgLogger().Warn("[ddl] runBackfillJobs gets table failed", zap.String("bfJob", bJob.AbbrStr()), zap.Error(err)) @@ -213,7 +214,7 @@ func runBackfillJobs(d *ddl, sess *session, ingestBackendCtx *ingest.BackendCont } proFunc := func() ([]*reorgBackfillTask, error) { // TODO: After BackfillJob replaces reorgBackfillTask, use backfiller's GetTasks instead of it. - return GetTasks(d.ddlCtx, sess, tbl, bJob.JobID, &runningPID, workerCnt+5) + return GetTasks(d.ddlCtx, se, tbl, bJob.JobID, &runningPID, workerCnt+5) } // add new task resultCh, control := d.backfillWorkerPool.AddProduceBySlice(proFunc, 0, workerCtx, spmc.WithConcurrency(workerCnt)) @@ -314,11 +315,11 @@ func (dc *ddlCtx) backfillJob2Task(t table.Table, bfJob *BackfillJob) (*reorgBac } // GetTasks gets the backfill tasks associated with the non-runningJobID. -func GetTasks(d *ddlCtx, sess *session, tbl table.Table, runningJobID int64, runningPID *int64, concurrency int) ([]*reorgBackfillTask, error) { +func GetTasks(d *ddlCtx, se *sess.Session, tbl table.Table, runningJobID int64, runningPID *int64, concurrency int) ([]*reorgBackfillTask, error) { // TODO: At present, only add index is processed. In the future, different elements need to be distinguished. var err error for i := 0; i < retrySQLTimes; i++ { - bJobs, err := GetAndMarkBackfillJobsForOneEle(sess, concurrency, runningJobID, d.uuid, *runningPID, InstanceLease) + bJobs, err := GetAndMarkBackfillJobsForOneEle(se, concurrency, runningJobID, d.uuid, *runningPID, InstanceLease) if err != nil { // TODO: add test: if all tidbs can't get the unmark backfill job(a tidb mark a backfill job, other tidbs returned, then the tidb can't handle this job.) if dbterror.ErrDDLJobNotFound.Equal(err) { diff --git a/ddl/dist_owner.go b/ddl/dist_owner.go index 549c43cd9dc69..ba6ec15fbb827 100644 --- a/ddl/dist_owner.go +++ b/ddl/dist_owner.go @@ -86,9 +86,9 @@ type splitJobContext struct { resultCh chan error } -func getRunningPhysicalTableMetas(sess *session, sJobCtx *splitJobContext, reorgInfo *reorgInfo) ([]*BackfillJobRangeMeta, error) { +func getRunningPhysicalTableMetas(se *sess.Session, sJobCtx *splitJobContext, reorgInfo *reorgInfo) ([]*BackfillJobRangeMeta, error) { ddlJobID, eleID, eleKey, currPID := reorgInfo.Job.ID, reorgInfo.currElement.ID, reorgInfo.currElement.TypeKey, reorgInfo.PhysicalTableID - pTblMetas, err := GetPhysicalTableMetas(sess, ddlJobID, eleID, eleKey) + pTblMetas, err := GetPhysicalTableMetas(se, ddlJobID, eleID, eleKey) if err != nil { return nil, errors.Trace(err) } @@ -200,7 +200,7 @@ func (dc *ddlCtx) controlWriteTableRecord(sessPool *sess.Pool, t table.Table, bf return errors.Trace(err) } defer sessPool.Put(sCtx) - se := newSession(sCtx) + se := sess.NewSession(sCtx) if err := dc.isReorgRunnable(ddlJobID, true); err != nil { return errors.Trace(err) @@ -264,7 +264,7 @@ func (dc *ddlCtx) controlWriteTableRecord(sessPool *sess.Pool, t table.Table, bf defer func() { tidbutil.Recover(metrics.LabelDistReorg, "splitTableToBackfillJobs", nil, false) }() - se := newSession(ctx) + se := sess.NewSession(ctx) dc.splitPhysicalTableToBackfillJobs(se, reorgInfo, sJobCtx) }) }(sCtx) @@ -276,7 +276,7 @@ func (dc *ddlCtx) controlWriteTableRecord(sessPool *sess.Pool, t table.Table, bf return checkReorgJobFinished(dc.ctx, se, &dc.reorgCtx, ddlJobID, currEle) } -func addBatchBackfillJobs(sess *session, reorgInfo *reorgInfo, sJobCtx *splitJobContext, phyTblID int64, notDistTask bool, +func addBatchBackfillJobs(se *sess.Session, reorgInfo *reorgInfo, sJobCtx *splitJobContext, phyTblID int64, notDistTask bool, batchTasks []*reorgBackfillTask, bJobs []*BackfillJob) error { bJobs = bJobs[:0] instanceID := "" @@ -315,13 +315,13 @@ func addBatchBackfillJobs(sess *session, reorgInfo *reorgInfo, sJobCtx *splitJob bj.Meta.CurrKey = task.startKey bJobs = append(bJobs, bj) } - if err := AddBackfillJobs(sess, bJobs); err != nil { + if err := AddBackfillJobs(se, bJobs); err != nil { return errors.Trace(err) } return nil } -func (dc *ddlCtx) splitTableToBackfillJobs(sess *session, reorgInfo *reorgInfo, sJobCtx *splitJobContext, pTblMeta *BackfillJobRangeMeta) error { +func (dc *ddlCtx) splitTableToBackfillJobs(se *sess.Session, reorgInfo *reorgInfo, sJobCtx *splitJobContext, pTblMeta *BackfillJobRangeMeta) error { isFirstOps := !sJobCtx.isMultiPhyTbl batchSize := sJobCtx.batchSize startKey, endKey := kv.Key(pTblMeta.StartKey), kv.Key(pTblMeta.EndKey) @@ -336,7 +336,7 @@ func (dc *ddlCtx) splitTableToBackfillJobs(sess *session, reorgInfo *reorgInfo, break } notNeedDistProcess := isFirstOps && (len(kvRanges) < minDistTaskCnt) - if err = addBatchBackfillJobs(sess, reorgInfo, sJobCtx, pTblMeta.PhyTblID, notNeedDistProcess, batchTasks, bJobs); err != nil { + if err = addBatchBackfillJobs(se, reorgInfo, sJobCtx, pTblMeta.PhyTblID, notNeedDistProcess, batchTasks, bJobs); err != nil { return errors.Trace(err) } isFirstOps = false @@ -364,7 +364,7 @@ func (dc *ddlCtx) splitTableToBackfillJobs(sess *session, reorgInfo *reorgInfo, } for { - bJobCnt, err := CheckBackfillJobCountWithPhyID(sess, reorgInfo.Job.ID, reorgInfo.currElement.ID, reorgInfo.currElement.TypeKey, pTblMeta.PhyTblID) + bJobCnt, err := CheckBackfillJobCountWithPhyID(se, reorgInfo.Job.ID, reorgInfo.currElement.ID, reorgInfo.currElement.TypeKey, pTblMeta.PhyTblID) if err != nil { return errors.Trace(err) } @@ -377,11 +377,11 @@ func (dc *ddlCtx) splitTableToBackfillJobs(sess *session, reorgInfo *reorgInfo, return nil } -func (dc *ddlCtx) splitPhysicalTableToBackfillJobs(sess *session, reorgInfo *reorgInfo, sJobCtx *splitJobContext) { - defaultSQLMode := sess.GetSessionVars().SQLMode - defer func() { sess.GetSessionVars().SQLMode = defaultSQLMode }() +func (dc *ddlCtx) splitPhysicalTableToBackfillJobs(se *sess.Session, reorgInfo *reorgInfo, sJobCtx *splitJobContext) { + defaultSQLMode := se.GetSessionVars().SQLMode + defer func() { se.GetSessionVars().SQLMode = defaultSQLMode }() // Make timestamp type can be inserted ZeroTimestamp. - sess.GetSessionVars().SQLMode = mysql.ModeNone + se.GetSessionVars().SQLMode = mysql.ModeNone var err error var pTblMetaCnt int @@ -407,7 +407,7 @@ func (dc *ddlCtx) splitPhysicalTableToBackfillJobs(sess *session, reorgInfo *reo return } - err = dc.splitTableToBackfillJobs(sess, reorgInfo, sJobCtx, pTblMeta) + err = dc.splitTableToBackfillJobs(se, reorgInfo, sJobCtx, pTblMeta) if err != nil { return } @@ -416,7 +416,7 @@ func (dc *ddlCtx) splitPhysicalTableToBackfillJobs(sess *session, reorgInfo *reo } } -func checkReorgJobFinished(ctx context.Context, sess *session, reorgCtxs *reorgContexts, ddlJobID int64, currEle *meta.Element) error { +func checkReorgJobFinished(ctx context.Context, se *sess.Session, reorgCtxs *reorgContexts, ddlJobID int64, currEle *meta.Element) error { var times int64 var bfJob *BackfillJob var backfillJobFinished bool @@ -428,7 +428,7 @@ func checkReorgJobFinished(ctx context.Context, sess *session, reorgCtxs *reorgC getReorgCtx(reorgCtxs, ddlJobID).notifyReorgCancel() }) if getReorgCtx(reorgCtxs, ddlJobID).isReorgCanceled() { - err := cleanupBackfillJobs(sess, bjPrefixKey) + err := cleanupBackfillJobs(se, bjPrefixKey) if err != nil { return err } @@ -445,13 +445,13 @@ func checkReorgJobFinished(ctx context.Context, sess *session, reorgCtxs *reorgC zap.Int64("job ID", ddlJobID), zap.Bool("isFinished", backfillJobFinished), zap.Reflect("bfJob", bfJob)) } if !backfillJobFinished { - err := checkAndHandleInterruptedBackfillJobs(sess, ddlJobID, currEle.ID, currEle.TypeKey) + err := checkAndHandleInterruptedBackfillJobs(se, ddlJobID, currEle.ID, currEle.TypeKey) if err != nil { logutil.BgLogger().Warn("[ddl] finish interrupted backfill jobs", zap.Int64("job ID", ddlJobID), zap.Stringer("ele", currEle), zap.Error(err)) return errors.Trace(err) } - bfJobs, err := getBackfillJobWithRetry(sess, BackgroundSubtaskTable, bjPrefixKey) + bfJobs, err := getBackfillJobWithRetry(se, BackgroundSubtaskTable, bjPrefixKey) if err != nil { logutil.BgLogger().Info("[ddl] getBackfillJobWithRetry failed", zap.Int64("job ID", ddlJobID), zap.Stringer("ele", currEle), zap.Error(err)) return errors.Trace(err) @@ -463,18 +463,18 @@ func checkReorgJobFinished(ctx context.Context, sess *session, reorgCtxs *reorgC } if backfillJobFinished { // TODO: Consider whether these backfill jobs are always out of sync. - isSynced, err := checkJobIsFinished(sess, ddlJobID) + isSynced, err := checkJobIsFinished(se, ddlJobID) if err != nil { logutil.BgLogger().Warn("[ddl] checkJobIsFinished failed", zap.Int64("job ID", ddlJobID), zap.Stringer("ele", currEle), zap.Error(err)) return errors.Trace(err) } if isSynced { logutil.BgLogger().Info("[ddl] finish all backfill jobs and put them to history", zap.Int64("job ID", ddlJobID), zap.Stringer("ele", currEle)) - return GetBackfillErr(sess, bjPrefixKey) + return GetBackfillErr(se, bjPrefixKey) } } case <-ctx.Done(): - err := cleanupBackfillJobs(sess, bjPrefixKey) + err := cleanupBackfillJobs(se, bjPrefixKey) if err != nil { return err } @@ -483,11 +483,11 @@ func checkReorgJobFinished(ctx context.Context, sess *session, reorgCtxs *reorgC } } -func checkJobIsFinished(sess *session, ddlJobID int64) (bool, error) { +func checkJobIsFinished(se *sess.Session, ddlJobID int64) (bool, error) { var err error var unsyncedInstanceIDs []string for i := 0; i < retrySQLTimes; i++ { - unsyncedInstanceIDs, err = getUnsyncedInstanceIDs(sess, ddlJobID, "check_backfill_history_job_sync") + unsyncedInstanceIDs, err = getUnsyncedInstanceIDs(se, ddlJobID, "check_backfill_history_job_sync") if err == nil && len(unsyncedInstanceIDs) == 0 { return true, nil } @@ -501,11 +501,11 @@ func checkJobIsFinished(sess *session, ddlJobID int64) (bool, error) { } // GetBackfillErr gets the error in backfill job. -func GetBackfillErr(sess *session, bjPrefixKey string) error { +func GetBackfillErr(se *sess.Session, bjPrefixKey string) error { var err error var metas []*model.BackfillMeta for i := 0; i < retrySQLTimes; i++ { - metas, err = GetBackfillMetas(sess, BackgroundSubtaskHistoryTable, fmt.Sprintf("task_key like '%s'", bjPrefixKey), "get_backfill_job_metas") + metas, err = GetBackfillMetas(se, BackgroundSubtaskHistoryTable, fmt.Sprintf("task_key like '%s'", bjPrefixKey), "get_backfill_job_metas") if err == nil { for _, m := range metas { if m.Error != nil { @@ -522,10 +522,10 @@ func GetBackfillErr(sess *session, bjPrefixKey string) error { return errors.Trace(err) } -func checkAndHandleInterruptedBackfillJobs(sess *session, ddlJobID, currEleID int64, currEleKey []byte) (err error) { +func checkAndHandleInterruptedBackfillJobs(se *sess.Session, ddlJobID, currEleID int64, currEleKey []byte) (err error) { var bJobs []*BackfillJob for i := 0; i < retrySQLTimes; i++ { - bJobs, err = GetInterruptedBackfillJobForOneEle(sess, ddlJobID, currEleID, currEleKey) + bJobs, err = GetInterruptedBackfillJobForOneEle(se, ddlJobID, currEleID, currEleKey) if err == nil { break } @@ -539,13 +539,13 @@ func checkAndHandleInterruptedBackfillJobs(sess *session, ddlJobID, currEleID in return nil } - return cleanupBackfillJobs(sess, bJobs[0].PrefixKeyString()) + return cleanupBackfillJobs(se, bJobs[0].PrefixKeyString()) } -func cleanupBackfillJobs(sess *session, prefixKey string) error { +func cleanupBackfillJobs(se *sess.Session, prefixKey string) error { var err error for i := 0; i < retrySQLTimes; i++ { - err = MoveBackfillJobsToHistoryTable(sess, prefixKey) + err = MoveBackfillJobsToHistoryTable(se, prefixKey) if err == nil { return nil } @@ -556,13 +556,13 @@ func cleanupBackfillJobs(sess *session, prefixKey string) error { } // CheckBackfillJobCountWithPhyID checks if the backfill job is interrupted, if not gets the backfill job count. -func CheckBackfillJobCountWithPhyID(sess *session, ddlJobID, currEleID int64, currEleKey []byte, pTblID int64) (backfillJobCnt int, err error) { - err = checkAndHandleInterruptedBackfillJobs(sess, ddlJobID, currEleID, currEleKey) +func CheckBackfillJobCountWithPhyID(se *sess.Session, ddlJobID, currEleID int64, currEleKey []byte, pTblID int64) (backfillJobCnt int, err error) { + err = checkAndHandleInterruptedBackfillJobs(se, ddlJobID, currEleID, currEleKey) if err != nil { return 0, errors.Trace(err) } - backfillJobCnt, err = GetBackfillJobCount(sess, BackgroundSubtaskTable, + backfillJobCnt, err = GetBackfillJobCount(se, BackgroundSubtaskTable, fmt.Sprintf("task_key like '%s' and ddl_physical_tid = %d", backfillJobPrefixKeyString(ddlJobID, currEleKey, currEleID), pTblID), "check_backfill_job_count") if err != nil { return 0, errors.Trace(err) @@ -571,11 +571,11 @@ func CheckBackfillJobCountWithPhyID(sess *session, ddlJobID, currEleID int64, cu return backfillJobCnt, nil } -func getBackfillJobWithRetry(sess *session, tableName, bjPrefixKey string) ([]*BackfillJob, error) { +func getBackfillJobWithRetry(se *sess.Session, tableName, bjPrefixKey string) ([]*BackfillJob, error) { var err error var bJobs []*BackfillJob for i := 0; i < retrySQLTimes; i++ { - bJobs, err = GetBackfillJobs(sess, tableName, fmt.Sprintf("task_key like '%s' limit 1", bjPrefixKey), "check_backfill_job_state") + bJobs, err = GetBackfillJobs(se, tableName, fmt.Sprintf("task_key like '%s' limit 1", bjPrefixKey), "check_backfill_job_state") if err != nil { logutil.BgLogger().Warn("[ddl] GetBackfillJobs failed", zap.Error(err)) time.Sleep(RetrySQLInterval) @@ -587,13 +587,13 @@ func getBackfillJobWithRetry(sess *session, tableName, bjPrefixKey string) ([]*B } // GetPhysicalTableMetas gets the max backfill metas per physical table in BackgroundSubtaskTable and BackgroundSubtaskHistoryTable. -func GetPhysicalTableMetas(sess *session, ddlJobID, currEleID int64, currEleKey []byte) (map[int64]*BackfillJobRangeMeta, error) { +func GetPhysicalTableMetas(se *sess.Session, ddlJobID, currEleID int64, currEleKey []byte) (map[int64]*BackfillJobRangeMeta, error) { condition := fmt.Sprintf("task_key like '%s'", backfillJobPrefixKeyString(ddlJobID, currEleKey, currEleID)) - pTblMs, err := GetBackfillIDAndMetas(sess, BackgroundSubtaskTable, condition, "get_ptbl_metas") + pTblMs, err := GetBackfillIDAndMetas(se, BackgroundSubtaskTable, condition, "get_ptbl_metas") if err != nil { return nil, errors.Trace(err) } - hPTblMs, err := GetBackfillIDAndMetas(sess, BackgroundSubtaskHistoryTable, condition, "get_ptbl_metas") + hPTblMs, err := GetBackfillIDAndMetas(se, BackgroundSubtaskHistoryTable, condition, "get_ptbl_metas") if err != nil { return nil, errors.Trace(err) } @@ -611,13 +611,8 @@ func GetPhysicalTableMetas(sess *session, ddlJobID, currEleID int64, currEleKey } // MoveBackfillJobsToHistoryTable moves backfill table jobs to the backfill history table. -func MoveBackfillJobsToHistoryTable(sctx sessionctx.Context, prefixKey string) error { - s, ok := sctx.(*session) - if !ok { - return errors.Errorf("sess ctx:%#v convert session failed", sctx) - } - - return s.runInTxn(func(se *session) error { +func MoveBackfillJobsToHistoryTable(se *sess.Session, prefixKey string) error { + return se.RunInTxn(func(se *sess.Session) error { // TODO: Consider batch by batch update backfill jobs and insert backfill history jobs. bJobs, err := GetBackfillJobs(se, BackgroundSubtaskTable, fmt.Sprintf("task_key like '%s'", prefixKey), "update_backfill_job") if err != nil { @@ -627,7 +622,7 @@ func MoveBackfillJobsToHistoryTable(sctx sessionctx.Context, prefixKey string) e return nil } - txn, err := se.txn() + txn, err := se.Txn() if err != nil { return errors.Trace(err) } diff --git a/ddl/index.go b/ddl/index.go index 21c5b430f03bf..b095bc7eed634 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -748,10 +748,10 @@ func canUseIngest() bool { // IngestJobsNotExisted checks the ddl about `add index` with ingest method not existed. func IngestJobsNotExisted(ctx sessionctx.Context) bool { - se := session{ctx} + se := sess.NewSession(ctx) template := "select job_meta from mysql.tidb_ddl_job where reorg and (type = %d or type = %d) and processing;" sql := fmt.Sprintf(template, model.ActionAddIndex, model.ActionAddPrimaryKey) - rows, err := se.execute(context.Background(), sql, "check-pitr") + rows, err := se.Execute(context.Background(), sql, "check-pitr") if err != nil { logutil.BgLogger().Warn("cannot check ingest job", zap.Error(err)) return false @@ -975,7 +975,7 @@ func runReorgJobAndHandleErr(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, return } defer w.sessPool.Put(sctx) - rh := newReorgHandler(newSession(sctx)) + rh := newReorgHandler(sess.NewSession(sctx)) dbInfo, err := t.GetDatabase(job.SchemaID) if err != nil { return false, ver, errors.Trace(err) @@ -1319,9 +1319,9 @@ func (w *baseIndexWorker) String() string { } func (w *baseIndexWorker) UpdateTask(bfJob *BackfillJob) error { - s := newSession(w.backfillCtx.sessCtx) + s := sess.NewSession(w.backfillCtx.sessCtx) - return s.runInTxn(func(se *session) error { + return s.RunInTxn(func(se *sess.Session) error { jobs, err := GetBackfillJobs(se, BackgroundSubtaskTable, fmt.Sprintf("task_key = '%s'", bfJob.keyString()), "update_backfill_task") if err != nil { return err @@ -1343,9 +1343,9 @@ func (w *baseIndexWorker) UpdateTask(bfJob *BackfillJob) error { } func (w *baseIndexWorker) FinishTask(bfJob *BackfillJob) error { - s := newSession(w.backfillCtx.sessCtx) - return s.runInTxn(func(se *session) error { - txn, err := se.txn() + s := sess.NewSession(w.backfillCtx.sessCtx) + return s.RunInTxn(func(se *sess.Session) error { + txn, err := se.Txn() if err != nil { return errors.Trace(err) } @@ -2122,7 +2122,7 @@ func (w *worker) updateReorgInfoForPartitions(t table.PartitionedTable, reorg *r return false, errors.Trace(err) } -func runBackfillJobsWithLightning(d *ddl, sess *session, bfJob *BackfillJob, jobCtx *JobContext) error { +func runBackfillJobsWithLightning(d *ddl, sess *sess.Session, bfJob *BackfillJob, jobCtx *JobContext) error { bc, err := ingest.LitBackCtxMgr.Register(d.ctx, bfJob.Meta.IsUnique, bfJob.JobID, bfJob.Meta.SQLMode) if err != nil { logutil.BgLogger().Warn("[ddl] lightning register error", zap.Error(err)) diff --git a/ddl/internal/session/BUILD.bazel b/ddl/internal/session/BUILD.bazel index 27739611c4ef5..e5f238f703c19 100644 --- a/ddl/internal/session/BUILD.bazel +++ b/ddl/internal/session/BUILD.bazel @@ -2,16 +2,25 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "session", - srcs = ["session_pool.go"], + srcs = [ + "session.go", + "session_pool.go", + ], importpath = "github.com/pingcap/tidb/ddl/internal/session", visibility = ["//ddl:__subpackages__"], deps = [ "//kv", + "//metrics", "//parser/mysql", + "//parser/terror", "//sessionctx", + "//sessiontxn", + "//util/chunk", "//util/logutil", "//util/mock", + "//util/sqlexec", "@com_github_ngaut_pools//:pools", "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_failpoint//:failpoint", ], ) diff --git a/ddl/internal/session/session.go b/ddl/internal/session/session.go new file mode 100644 index 0000000000000..224639e236021 --- /dev/null +++ b/ddl/internal/session/session.go @@ -0,0 +1,137 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "context" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/metrics" + "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessiontxn" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/sqlexec" +) + +// Session wraps sessionctx.Context for transaction usage. +type Session struct { + sessionctx.Context +} + +// NewSession creates a new Session. +func NewSession(s sessionctx.Context) *Session { + return &Session{s} +} + +// Begin starts a transaction. +func (s *Session) Begin() error { + err := sessiontxn.NewTxn(context.Background(), s.Context) + if err != nil { + return err + } + s.GetSessionVars().SetInTxn(true) + return nil +} + +// Commit commits the transaction. +func (s *Session) Commit() error { + s.StmtCommit(context.Background()) + return s.CommitTxn(context.Background()) +} + +// Txn activate and returns the current transaction. +func (s *Session) Txn() (kv.Transaction, error) { + return s.Context.Txn(true) +} + +// Rollback aborts the transaction. +func (s *Session) Rollback() { + s.StmtRollback(context.Background(), false) + s.RollbackTxn(context.Background()) +} + +// Reset resets the session. +func (s *Session) Reset() { + s.StmtRollback(context.Background(), false) +} + +// Execute executes a query. +func (s *Session) Execute(ctx context.Context, query string, label string) ([]chunk.Row, error) { + startTime := time.Now() + var err error + defer func() { + metrics.DDLJobTableDuration.WithLabelValues(label + "-" + metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) + }() + + if ctx.Value(kv.RequestSourceKey) == nil { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnDDL) + } + rs, err := s.Context.(sqlexec.SQLExecutor).ExecuteInternal(ctx, query) + if err != nil { + return nil, errors.Trace(err) + } + + if rs == nil { + return nil, nil + } + var rows []chunk.Row + defer terror.Call(rs.Close) + if rows, err = sqlexec.DrainRecordSet(ctx, rs, 8); err != nil { + return nil, errors.Trace(err) + } + return rows, nil +} + +// Session returns the sessionctx.Context. +func (s *Session) Session() sessionctx.Context { + return s.Context +} + +// RunInTxn runs a function in a transaction. +func (s *Session) RunInTxn(f func(*Session) error) (err error) { + err = s.Begin() + if err != nil { + return err + } + failpoint.Inject("NotifyBeginTxnCh", func(val failpoint.Value) { + //nolint:forcetypeassert + v := val.(int) + if v == 1 { + MockDDLOnce = 1 + TestNotifyBeginTxnCh <- struct{}{} + } else if v == 2 && MockDDLOnce == 1 { + <-TestNotifyBeginTxnCh + MockDDLOnce = 0 + } + }) + + err = f(s) + if err != nil { + s.Rollback() + return + } + return errors.Trace(s.Commit()) +} + +var ( + // MockDDLOnce is only used for test. + MockDDLOnce = int64(0) + // TestNotifyBeginTxnCh is used for if the txn is beginning in RunInTxn. + TestNotifyBeginTxnCh = make(chan struct{}) +) diff --git a/ddl/internal/session/session_pool.go b/ddl/internal/session/session_pool.go index 9602d0acaeb60..2e97867034b5d 100644 --- a/ddl/internal/session/session_pool.go +++ b/ddl/internal/session/session_pool.go @@ -27,7 +27,7 @@ import ( "github.com/pingcap/tidb/util/mock" ) -// Pool is used to new session. +// Pool is used to new Session. type Pool struct { mu struct { sync.Mutex @@ -37,7 +37,7 @@ type Pool struct { store kv.Storage } -// NewSessionPool creates a new session pool. +// NewSessionPool creates a new Session pool. func NewSessionPool(resPool *pools.ResourcePool, store kv.Storage) *Pool { return &Pool{resPool: resPool, store: store} } @@ -54,7 +54,7 @@ func (sg *Pool) Get() (sessionctx.Context, error) { sg.mu.Lock() if sg.mu.closed { sg.mu.Unlock() - return nil, errors.Errorf("session pool is closed") + return nil, errors.Errorf("Session pool is closed") } sg.mu.Unlock() @@ -66,7 +66,7 @@ func (sg *Pool) Get() (sessionctx.Context, error) { ctx, ok := resource.(sessionctx.Context) if !ok { - return nil, fmt.Errorf("session pool resource get %v", ctx) + return nil, fmt.Errorf("Session pool resource get %v", ctx) } ctx.GetSessionVars().SetStatusFlag(mysql.ServerStatusAutocommit, true) ctx.GetSessionVars().InRestrictedSQL = true @@ -92,7 +92,7 @@ func (sg *Pool) Close() { if sg.mu.closed || sg.resPool == nil { return } - logutil.BgLogger().Info("[ddl] closing session pool") + logutil.BgLogger().Info("[ddl] closing Session pool") sg.resPool.Close() sg.mu.closed = true } diff --git a/ddl/job_table.go b/ddl/job_table.go index 3c8abefef731e..15d716200b44e 100644 --- a/ddl/job_table.go +++ b/ddl/job_table.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/ddl/ingest" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/metrics" @@ -102,7 +103,7 @@ const ( reorg ) -func (d *ddl) getJob(sess *session, tp jobType, filter func(*model.Job) (bool, error)) (*model.Job, error) { +func (d *ddl) getJob(se *sess.Session, tp jobType, filter func(*model.Job) (bool, error)) (*model.Job, error) { not := "not" label := "get_job_general" if tp == reorg { @@ -110,7 +111,7 @@ func (d *ddl) getJob(sess *session, tp jobType, filter func(*model.Job) (bool, e label = "get_job_reorg" } sql := fmt.Sprintf(getJobSQL, not, d.excludeJobIDs()) - rows, err := sess.execute(context.Background(), sql, label) + rows, err := se.Execute(context.Background(), sql, label) if err != nil { return nil, errors.Trace(err) } @@ -129,7 +130,7 @@ func (d *ddl) getJob(sess *session, tp jobType, filter func(*model.Job) (bool, e return nil, errors.Trace(err) } if b { - if err := d.markJobProcessing(sess, &runJob); err != nil { + if err := d.markJobProcessing(se, &runJob); err != nil { logutil.BgLogger().Warn("[ddl] handle ddl job failed: mark job is processing meet error", zap.Error(err), zap.String("job", runJob.String())) return nil, errors.Trace(err) } @@ -139,7 +140,7 @@ func (d *ddl) getJob(sess *session, tp jobType, filter func(*model.Job) (bool, e return nil, nil } -func (d *ddl) getGeneralJob(sess *session) (*model.Job, error) { +func (d *ddl) getGeneralJob(sess *sess.Session) (*model.Job, error) { return d.getJob(sess, general, func(job *model.Job) (bool, error) { if job.Type == model.ActionDropSchema { // Check if there is any reorg job on this schema. @@ -154,12 +155,12 @@ func (d *ddl) getGeneralJob(sess *session) (*model.Job, error) { }) } -func (d *ddl) checkJobIsRunnable(sess *session, sql string) (bool, error) { - rows, err := sess.execute(context.Background(), sql, "check_runnable") +func (d *ddl) checkJobIsRunnable(se *sess.Session, sql string) (bool, error) { + rows, err := se.Execute(context.Background(), sql, "check_runnable") return len(rows) == 0, err } -func (d *ddl) getReorgJob(sess *session) (*model.Job, error) { +func (d *ddl) getReorgJob(sess *sess.Session) (*model.Job, error) { return d.getJob(sess, reorg, func(job *model.Job) (bool, error) { // Check if there is any block ddl running, like drop schema and flashback cluster. sql := fmt.Sprintf("select job_id from mysql.tidb_ddl_job where "+ @@ -172,12 +173,12 @@ func (d *ddl) getReorgJob(sess *session) (*model.Job, error) { } func (d *ddl) startDispatchLoop() { - se, err := d.sessPool.Get() + sessCtx, err := d.sessPool.Get() if err != nil { logutil.BgLogger().Fatal("dispatch loop get session failed, it should not happen, please try restart TiDB", zap.Error(err)) } - defer d.sessPool.Put(se) - sess := newSession(se) + defer d.sessPool.Put(sessCtx) + se := sess.NewSession(sessCtx) var notifyDDLJobByEtcdCh clientv3.WatchChan if d.etcdCli != nil { notifyDDLJobByEtcdCh = d.etcdCli.Watch(d.ctx, addingDDLJobConcurrent) @@ -206,12 +207,12 @@ func (d *ddl) startDispatchLoop() { case <-d.ctx.Done(): return } - d.loadDDLJobAndRun(sess, d.generalDDLWorkerPool, d.getGeneralJob) - d.loadDDLJobAndRun(sess, d.reorgWorkerPool, d.getReorgJob) + d.loadDDLJobAndRun(se, d.generalDDLWorkerPool, d.getGeneralJob) + d.loadDDLJobAndRun(se, d.reorgWorkerPool, d.getReorgJob) } } -func (d *ddl) loadDDLJobAndRun(sess *session, pool *workerPool, getJob func(*session) (*model.Job, error)) { +func (d *ddl) loadDDLJobAndRun(se *sess.Session, pool *workerPool, getJob func(*sess.Session) (*model.Job, error)) { wk, err := pool.get() if err != nil || wk == nil { logutil.BgLogger().Debug(fmt.Sprintf("[ddl] no %v worker available now", pool.tp()), zap.Error(err)) @@ -222,7 +223,7 @@ func (d *ddl) loadDDLJobAndRun(sess *session, pool *workerPool, getJob func(*ses d.mu.hook.OnGetJobBefore(pool.tp().String()) d.mu.RUnlock() - job, err := getJob(sess) + job, err := getJob(se) if job == nil || err != nil { if err != nil { logutil.BgLogger().Warn("[ddl] get job met error", zap.Error(err)) @@ -317,9 +318,9 @@ func (d *ddl) delivery2worker(wk *worker, pool *workerPool, job *model.Job) { }) } -func (d *ddl) markJobProcessing(sess *session, job *model.Job) error { - sess.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) - _, err := sess.execute(context.Background(), fmt.Sprintf("update mysql.tidb_ddl_job set processing = 1 where job_id = %d", job.ID), "mark_job_processing") +func (d *ddl) markJobProcessing(se *sess.Session, job *model.Job) error { + se.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) + _, err := se.Execute(context.Background(), fmt.Sprintf("update mysql.tidb_ddl_job set processing = 1 where job_id = %d", job.ID), "mark_job_processing") return errors.Trace(err) } @@ -378,35 +379,35 @@ func (d *ddl) loadBackfillJobAndRun() { if !isDistReorg { return } - se, err := d.sessPool.Get() + sessCtx, err := d.sessPool.Get() if err != nil { logutil.BgLogger().Fatal("dispatch backfill jobs loop get session failed, it should not happen, please try restart TiDB", zap.Error(err)) } - sess := newSession(se) + se := sess.NewSession(sessCtx) runningJobIDs := d.backfillCtxJobIDs() if len(runningJobIDs) >= reorgWorkerCnt { - d.sessPool.Put(se) + d.sessPool.Put(sessCtx) return } // TODO: Add ele info to distinguish backfill jobs. // Get a Backfill job to get the reorg info like element info, schema ID and so on. - bfJob, err := GetBackfillJobForOneEle(sess, runningJobIDs, InstanceLease) + bfJob, err := GetBackfillJobForOneEle(se, runningJobIDs, InstanceLease) if err != nil || bfJob == nil { if err != nil { logutil.BgLogger().Warn("[ddl] get backfill jobs failed in this instance", zap.Error(err)) } else { logutil.BgLogger().Debug("[ddl] get no backfill job in this instance") } - d.sessPool.Put(se) + d.sessPool.Put(sessCtx) return } jobCtx, existent := d.setBackfillCtxJobContext(bfJob.JobID, bfJob.Meta.Query, bfJob.Meta.Type) if existent { logutil.BgLogger().Warn("[ddl] get the type of backfill job is running in this instance", zap.String("backfill job", bfJob.AbbrStr())) - d.sessPool.Put(se) + d.sessPool.Put(sessCtx) return } // TODO: Adjust how the non-owner uses ReorgCtx. @@ -416,7 +417,7 @@ func (d *ddl) loadBackfillJobAndRun() { tidbutil.Recover(metrics.LabelDistReorg, "runBackfillJobs", nil, false) d.removeBackfillCtxJobCtx(bfJob.JobID) d.removeReorgCtx(genBackfillJobReorgCtxID(bfJob.JobID)) - d.sessPool.Put(se) + d.sessPool.Put(sessCtx) }() if bfJob.Meta.ReorgTp == model.ReorgTypeLitMerge { @@ -426,14 +427,14 @@ func (d *ddl) loadBackfillJobAndRun() { return } logutil.BgLogger().Info("[ddl] run backfill jobs with ingest in this instance", zap.String("bfJob", bfJob.AbbrStr())) - err = runBackfillJobsWithLightning(d, sess, bfJob, jobCtx) + err = runBackfillJobsWithLightning(d, se, bfJob, jobCtx) } else { logutil.BgLogger().Info("[ddl] run backfill jobs with txn-merge in this instance", zap.String("bfJob", bfJob.AbbrStr())) - _, err = runBackfillJobs(d, sess, nil, bfJob, jobCtx) + _, err = runBackfillJobs(d, se, nil, bfJob, jobCtx) } if err == nil { - err = syncBackfillHistoryJobs(sess, d.uuid, bfJob) + err = syncBackfillHistoryJobs(se, d.uuid, bfJob) } logutil.BgLogger().Info("[ddl] run backfill jobs finished in this instance", zap.Stringer("reorg type", bfJob.Meta.ReorgTp), zap.Error(err)) }) @@ -444,7 +445,7 @@ const ( updateDDLJobSQL = "update mysql.tidb_ddl_job set job_meta = %s where job_id = %d" ) -func insertDDLJobs2Table(sess *session, updateRawArgs bool, jobs ...*model.Job) error { +func insertDDLJobs2Table(se *sess.Session, updateRawArgs bool, jobs ...*model.Job) error { failpoint.Inject("mockAddBatchDDLJobsErr", func(val failpoint.Value) { if val.(bool) { failpoint.Return(errors.Errorf("mockAddBatchDDLJobsErr")) @@ -465,9 +466,9 @@ func insertDDLJobs2Table(sess *session, updateRawArgs bool, jobs ...*model.Job) } sql.WriteString(fmt.Sprintf("(%d, %t, %s, %s, %s, %d, %t)", job.ID, job.MayNeedReorg(), strconv.Quote(job2SchemaIDs(job)), strconv.Quote(job2TableIDs(job)), wrapKey2String(b), job.Type, !job.NotStarted())) } - sess.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) + se.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) - _, err := sess.execute(ctx, sql.String(), "insert_job") + _, err := se.Execute(ctx, sql.String(), "insert_job") logutil.BgLogger().Debug("[ddl] add job to mysql.tidb_ddl_job table", zap.String("sql", sql.String())) return errors.Trace(err) } @@ -511,25 +512,25 @@ func job2UniqueIDs(job *model.Job, schema bool) string { func (w *worker) deleteDDLJob(job *model.Job) error { sql := fmt.Sprintf("delete from mysql.tidb_ddl_job where job_id = %d", job.ID) - _, err := w.sess.execute(context.Background(), sql, "delete_job") + _, err := w.sess.Execute(context.Background(), sql, "delete_job") return errors.Trace(err) } -func updateDDLJob2Table(sctx *session, job *model.Job, updateRawArgs bool) error { +func updateDDLJob2Table(se *sess.Session, job *model.Job, updateRawArgs bool) error { b, err := job.Encode(updateRawArgs) if err != nil { return err } sql := fmt.Sprintf(updateDDLJobSQL, wrapKey2String(b), job.ID) - _, err = sctx.execute(context.Background(), sql, "update_job") + _, err = se.Execute(context.Background(), sql, "update_job") return errors.Trace(err) } // getDDLReorgHandle gets DDL reorg handle. -func getDDLReorgHandle(sess *session, job *model.Job) (element *meta.Element, startKey, endKey kv.Key, physicalTableID int64, err error) { +func getDDLReorgHandle(se *sess.Session, job *model.Job) (element *meta.Element, startKey, endKey kv.Key, physicalTableID int64, err error) { sql := fmt.Sprintf("select ele_id, ele_type, start_key, end_key, physical_id from mysql.tidb_ddl_reorg where job_id = %d", job.ID) ctx := kv.WithInternalSourceType(context.Background(), getDDLRequestSource(job.Type)) - rows, err := sess.execute(ctx, sql, "get_handle") + rows, err := se.Execute(ctx, sql, "get_handle") if err != nil { return nil, nil, nil, 0, err } @@ -550,54 +551,54 @@ func getDDLReorgHandle(sess *session, job *model.Job) (element *meta.Element, st // updateDDLReorgHandle update startKey, endKey physicalTableID and element of the handle. // Caller should wrap this in a separate transaction, to avoid conflicts. -func updateDDLReorgHandle(sess *session, jobID int64, startKey kv.Key, endKey kv.Key, physicalTableID int64, element *meta.Element) error { +func updateDDLReorgHandle(se *sess.Session, jobID int64, startKey kv.Key, endKey kv.Key, physicalTableID int64, element *meta.Element) error { sql := fmt.Sprintf("update mysql.tidb_ddl_reorg set ele_id = %d, ele_type = %s, start_key = %s, end_key = %s, physical_id = %d where job_id = %d", element.ID, wrapKey2String(element.TypeKey), wrapKey2String(startKey), wrapKey2String(endKey), physicalTableID, jobID) - _, err := sess.execute(context.Background(), sql, "update_handle") + _, err := se.Execute(context.Background(), sql, "update_handle") return err } // initDDLReorgHandle initializes the handle for ddl reorg. -func initDDLReorgHandle(s *session, jobID int64, startKey kv.Key, endKey kv.Key, physicalTableID int64, element *meta.Element) error { +func initDDLReorgHandle(s *sess.Session, jobID int64, startKey kv.Key, endKey kv.Key, physicalTableID int64, element *meta.Element) error { del := fmt.Sprintf("delete from mysql.tidb_ddl_reorg where job_id = %d", jobID) ins := fmt.Sprintf("insert into mysql.tidb_ddl_reorg(job_id, ele_id, ele_type, start_key, end_key, physical_id) values (%d, %d, %s, %s, %s, %d)", jobID, element.ID, wrapKey2String(element.TypeKey), wrapKey2String(startKey), wrapKey2String(endKey), physicalTableID) - return s.runInTxn(func(se *session) error { - _, err := se.execute(context.Background(), del, "init_handle") + return s.RunInTxn(func(se *sess.Session) error { + _, err := se.Execute(context.Background(), del, "init_handle") if err != nil { logutil.BgLogger().Info("initDDLReorgHandle failed to delete", zap.Int64("jobID", jobID), zap.Error(err)) } - _, err = se.execute(context.Background(), ins, "init_handle") + _, err = se.Execute(context.Background(), ins, "init_handle") return err }) } // deleteDDLReorgHandle deletes the handle for ddl reorg. -func removeDDLReorgHandle(s *session, job *model.Job, elements []*meta.Element) error { +func removeDDLReorgHandle(se *sess.Session, job *model.Job, elements []*meta.Element) error { if len(elements) == 0 { return nil } sql := fmt.Sprintf("delete from mysql.tidb_ddl_reorg where job_id = %d", job.ID) - return s.runInTxn(func(se *session) error { - _, err := se.execute(context.Background(), sql, "remove_handle") + return se.RunInTxn(func(se *sess.Session) error { + _, err := se.Execute(context.Background(), sql, "remove_handle") return err }) } // removeReorgElement removes the element from ddl reorg, it is the same with removeDDLReorgHandle, only used in failpoint -func removeReorgElement(s *session, job *model.Job) error { +func removeReorgElement(se *sess.Session, job *model.Job) error { sql := fmt.Sprintf("delete from mysql.tidb_ddl_reorg where job_id = %d", job.ID) - return s.runInTxn(func(se *session) error { - _, err := se.execute(context.Background(), sql, "remove_handle") + return se.RunInTxn(func(se *sess.Session) error { + _, err := se.Execute(context.Background(), sql, "remove_handle") return err }) } // cleanDDLReorgHandles removes handles that are no longer needed. -func cleanDDLReorgHandles(s *session, job *model.Job) error { +func cleanDDLReorgHandles(se *sess.Session, job *model.Job) error { sql := "delete from mysql.tidb_ddl_reorg where job_id = " + strconv.FormatInt(job.ID, 10) - return s.runInTxn(func(se *session) error { - _, err := se.execute(context.Background(), sql, "clean_handle") + return se.RunInTxn(func(se *sess.Session) error { + _, err := se.Execute(context.Background(), sql, "clean_handle") return err }) } @@ -609,8 +610,8 @@ func wrapKey2String(key []byte) string { return fmt.Sprintf("0x%x", key) } -func getJobsBySQL(sess *session, tbl, condition string) ([]*model.Job, error) { - rows, err := sess.execute(context.Background(), fmt.Sprintf("select job_meta from mysql.%s where %s", tbl, condition), "get_job") +func getJobsBySQL(se *sess.Session, tbl, condition string) ([]*model.Job, error) { + rows, err := se.Execute(context.Background(), fmt.Sprintf("select job_meta from mysql.%s where %s", tbl, condition), "get_job") if err != nil { return nil, errors.Trace(err) } @@ -627,10 +628,10 @@ func getJobsBySQL(sess *session, tbl, condition string) ([]*model.Job, error) { return jobs, nil } -func syncBackfillHistoryJobs(sess *session, uuid string, backfillJob *BackfillJob) error { +func syncBackfillHistoryJobs(se *sess.Session, uuid string, backfillJob *BackfillJob) error { sql := fmt.Sprintf("update mysql.%s set state = '%s' where task_key like '%s' and exec_id = '%s' limit 1;", BackgroundSubtaskHistoryTable, model.JobStateSynced.String(), backfillJob.PrefixKeyString(), uuid) - _, err := sess.execute(context.Background(), sql, "sync_backfill_history_job") + _, err := se.Execute(context.Background(), sql, "sync_backfill_history_job") return err } @@ -656,22 +657,22 @@ func generateInsertBackfillJobSQL(tableName string, backfillJobs []*BackfillJob) } // AddBackfillHistoryJob adds the backfill jobs to the tidb_background_subtask_history table. -func AddBackfillHistoryJob(sess *session, backfillJobs []*BackfillJob) error { +func AddBackfillHistoryJob(se *sess.Session, backfillJobs []*BackfillJob) error { label := fmt.Sprintf("add_%s_job", BackgroundSubtaskHistoryTable) sql, err := generateInsertBackfillJobSQL(BackgroundSubtaskHistoryTable, backfillJobs) if err != nil { return err } - _, err = sess.execute(context.Background(), sql, label) + _, err = se.Execute(context.Background(), sql, label) return errors.Trace(err) } // AddBackfillJobs adds the backfill jobs to the tidb_background_subtask table. -func AddBackfillJobs(s *session, backfillJobs []*BackfillJob) error { +func AddBackfillJobs(s *sess.Session, backfillJobs []*BackfillJob) error { label := fmt.Sprintf("add_%s_job", BackgroundSubtaskTable) // Do runInTxn to get StartTS. - return s.runInTxn(func(se *session) error { - txn, err := se.txn() + return s.RunInTxn(func(se *sess.Session) error { + txn, err := se.Txn() if err != nil { return errors.Trace(err) } @@ -684,13 +685,13 @@ func AddBackfillJobs(s *session, backfillJobs []*BackfillJob) error { if err != nil { return err } - _, err = se.execute(context.Background(), sql, label) + _, err = se.Execute(context.Background(), sql, label) return errors.Trace(err) }) } // GetBackfillJobForOneEle gets the backfill jobs in the tblName table that contains only one element. -func GetBackfillJobForOneEle(s *session, excludedJobIDs []int64, lease time.Duration) (*BackfillJob, error) { +func GetBackfillJobForOneEle(s *sess.Session, excludedJobIDs []int64, lease time.Duration) (*BackfillJob, error) { eJobIDsBuilder := strings.Builder{} for _, id := range excludedJobIDs { eJobIDsBuilder.WriteString(fmt.Sprintf(" and task_key not like \"%d_%%\"", id)) @@ -698,7 +699,7 @@ func GetBackfillJobForOneEle(s *session, excludedJobIDs []int64, lease time.Dura var err error var bJobs []*BackfillJob - err = s.runInTxn(func(se *session) error { + err = s.RunInTxn(func(se *sess.Session) error { currTime, err := GetOracleTimeWithStartTS(se) if err != nil { return err @@ -718,10 +719,10 @@ func GetBackfillJobForOneEle(s *session, excludedJobIDs []int64, lease time.Dura // GetAndMarkBackfillJobsForOneEle batch gets the backfill jobs in the tblName table that contains only one element, // and update these jobs with instance ID and lease. -func GetAndMarkBackfillJobsForOneEle(s *session, batch int, jobID int64, uuid string, pTblID int64, lease time.Duration) ([]*BackfillJob, error) { +func GetAndMarkBackfillJobsForOneEle(s *sess.Session, batch int, jobID int64, uuid string, pTblID int64, lease time.Duration) ([]*BackfillJob, error) { var validLen int var bJobs []*BackfillJob - err := s.runInTxn(func(se *session) error { + err := s.RunInTxn(func(se *sess.Session) error { currTime, err := GetOracleTimeWithStartTS(se) if err != nil { return err @@ -732,7 +733,7 @@ func GetAndMarkBackfillJobsForOneEle(s *session, batch int, jobID int64, uuid st leaseStr, jobID, batch) if pTblID != getJobWithoutPartition { if pTblID == 0 { - rows, err := s.execute(context.Background(), + rows, err := s.Execute(context.Background(), fmt.Sprintf("select ddl_physical_tid from mysql.%s group by substring_index(task_key,\"_\",3), ddl_physical_tid having max(length(exec_id)) = 0 or max(exec_expired) < '%s' order by substring_index(task_key,\"_\",3), ddl_physical_tid limit 1", BackgroundSubtaskTable, leaseStr), "get_mark_backfill_job") if err != nil { @@ -782,7 +783,7 @@ func GetAndMarkBackfillJobsForOneEle(s *session, batch int, jobID int64, uuid st } // GetInterruptedBackfillJobForOneEle gets an interrupted backfill job that contains only one element. -func GetInterruptedBackfillJobForOneEle(sess *session, jobID, eleID int64, eleKey []byte) ([]*BackfillJob, error) { +func GetInterruptedBackfillJobForOneEle(sess *sess.Session, jobID, eleID int64, eleKey []byte) ([]*BackfillJob, error) { bJobs, err := GetBackfillJobs(sess, BackgroundSubtaskHistoryTable, fmt.Sprintf("task_key like '%s' and state = \"%s\" limit 1", backfillJobPrefixKeyString(jobID, eleKey, eleID), model.JobStateCancelled.String()), "get_interrupt_backfill_job") if err != nil || len(bJobs) == 0 { @@ -792,8 +793,8 @@ func GetInterruptedBackfillJobForOneEle(sess *session, jobID, eleID int64, eleKe } // GetBackfillJobCount gets the number of rows in the tblName table according to condition. -func GetBackfillJobCount(sess *session, tblName, condition string, label string) (int, error) { - rows, err := sess.execute(context.Background(), fmt.Sprintf("select count(1) from mysql.%s where %s", tblName, condition), label) +func GetBackfillJobCount(se *sess.Session, tblName, condition string, label string) (int, error) { + rows, err := se.Execute(context.Background(), fmt.Sprintf("select count(1) from mysql.%s where %s", tblName, condition), label) if err != nil { return 0, errors.Trace(err) } @@ -805,8 +806,8 @@ func GetBackfillJobCount(sess *session, tblName, condition string, label string) } // GetBackfillMetas gets the backfill metas in the tblName table according to condition. -func GetBackfillMetas(sess *session, tblName, condition string, label string) ([]*model.BackfillMeta, error) { - rows, err := sess.execute(context.Background(), fmt.Sprintf("select meta from mysql.%s where %s", tblName, condition), label) +func GetBackfillMetas(se *sess.Session, tblName, condition string, label string) ([]*model.BackfillMeta, error) { + rows, err := se.Execute(context.Background(), fmt.Sprintf("select meta from mysql.%s where %s", tblName, condition), label) if err != nil { return nil, errors.Trace(err) } @@ -828,11 +829,11 @@ func GetBackfillMetas(sess *session, tblName, condition string, label string) ([ } // GetBackfillIDAndMetas gets the backfill IDs and metas in the tblName table according to condition. -func GetBackfillIDAndMetas(sess *session, tblName, condition string, label string) ([]*BackfillJobRangeMeta, error) { +func GetBackfillIDAndMetas(se *sess.Session, tblName, condition string, label string) ([]*BackfillJobRangeMeta, error) { sql := "select tbl.task_key, tbl.meta, tbl.ddl_physical_tid from (select max(task_key) max_id, ddl_physical_tid " + fmt.Sprintf(" from mysql.%s tbl where %s group by ddl_physical_tid) tmp join mysql.%s tbl", tblName, condition, tblName) + " on tbl.task_key=tmp.max_id and tbl.ddl_physical_tid=tmp.ddl_physical_tid;" - rows, err := sess.execute(context.Background(), sql, label) + rows, err := se.Execute(context.Background(), sql, label) if err != nil { return nil, errors.Trace(err) } @@ -864,10 +865,10 @@ func GetBackfillIDAndMetas(sess *session, tblName, condition string, label strin return pTblMetas, nil } -func getUnsyncedInstanceIDs(sess *session, jobID int64, label string) ([]string, error) { +func getUnsyncedInstanceIDs(se *sess.Session, jobID int64, label string) ([]string, error) { sql := fmt.Sprintf("select sum((state='%s') + (state='%s')) as tmp, exec_id from mysql.tidb_background_subtask_history where task_key like \"%d_%%\" group by exec_id having tmp = 0;", model.JobStateSynced.String(), model.JobStateCancelled.String(), jobID) - rows, err := sess.execute(context.Background(), sql, label) + rows, err := se.Execute(context.Background(), sql, label) if err != nil { return nil, errors.Trace(err) } @@ -880,8 +881,8 @@ func getUnsyncedInstanceIDs(sess *session, jobID int64, label string) ([]string, } // GetBackfillJobs gets the backfill jobs in the tblName table according to condition. -func GetBackfillJobs(sess *session, tblName, condition string, label string) ([]*BackfillJob, error) { - rows, err := sess.execute(context.Background(), fmt.Sprintf("select * from mysql.%s where %s", tblName, condition), label) +func GetBackfillJobs(se *sess.Session, tblName, condition string, label string) ([]*BackfillJob, error) { + rows, err := se.Execute(context.Background(), fmt.Sprintf("select * from mysql.%s where %s", tblName, condition), label) if err != nil { return nil, errors.Trace(err) } @@ -931,24 +932,24 @@ func GetBackfillJobs(sess *session, tblName, condition string, label string) ([] // RemoveBackfillJob removes the backfill jobs from the tidb_background_subtask table. // If isOneEle is true, removes all jobs with backfillJob's ddl_job_id, ele_id and ele_key. Otherwise, removes the backfillJob. -func RemoveBackfillJob(sess *session, isOneEle bool, backfillJob *BackfillJob) error { +func RemoveBackfillJob(se *sess.Session, isOneEle bool, backfillJob *BackfillJob) error { sql := "delete from mysql.tidb_background_subtask" if !isOneEle { sql += fmt.Sprintf(" where task_key = '%s'", backfillJob.keyString()) } else { sql += fmt.Sprintf(" where task_key like '%s'", backfillJob.PrefixKeyString()) } - _, err := sess.execute(context.Background(), sql, "remove_backfill_job") + _, err := se.Execute(context.Background(), sql, "remove_backfill_job") return err } -func updateBackfillJob(sess *session, tableName string, backfillJob *BackfillJob, label string) error { +func updateBackfillJob(se *sess.Session, tableName string, backfillJob *BackfillJob, label string) error { mate, err := backfillJob.Meta.Encode() if err != nil { return err } sql := fmt.Sprintf("update mysql.%s set exec_id = '%s', exec_expired = '%s', state = '%s', checkpoint = %s, meta = %s where task_key = '%s'", tableName, backfillJob.InstanceID, backfillJob.InstanceLease, backfillJob.State.String(), wrapKey2String(backfillJob.Meta.CurrKey), wrapKey2String(mate), backfillJob.keyString()) - _, err = sess.execute(context.Background(), sql, label) + _, err = se.Execute(context.Background(), sql, label) return err } diff --git a/ddl/job_table_test.go b/ddl/job_table_test.go index 8ea9f27ae88f7..923433f46471f 100644 --- a/ddl/job_table_test.go +++ b/ddl/job_table_test.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/ddl/internal/callback" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" @@ -261,7 +262,7 @@ func TestSimpleExecBackfillJobs(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") d := dom.DDL() - se := ddl.NewSession(tk.Session()) + se := sess.NewSession(tk.Session()) jobID1 := int64(1) jobID2 := int64(2) @@ -315,7 +316,7 @@ func TestSimpleExecBackfillJobs(t *testing.T) { require.Equal(t, expectJob, bJob) previousTime, err := ddl.GetOracleTimeWithStartTS(se) require.EqualError(t, err, "[kv:8024]invalid transaction") - readInTxn(se, func(sessionctx.Context) { + readInTxn(se.Session(), func(sessionctx.Context) { previousTime, err = ddl.GetOracleTimeWithStartTS(se) require.NoError(t, err) }) @@ -330,7 +331,7 @@ func TestSimpleExecBackfillJobs(t *testing.T) { expectJob.InstanceID = uuid equalBackfillJob(t, expectJob, bJobs[0], ddl.GetLeaseGoTime(previousTime, instanceLease)) var currTime time.Time - readInTxn(se, func(sessionctx.Context) { + readInTxn(se.Session(), func(sessionctx.Context) { currTime, err = ddl.GetOracleTimeWithStartTS(se) require.NoError(t, err) }) @@ -543,7 +544,7 @@ func TestSimpleExecBackfillJobs(t *testing.T) { // ID jobID eleID // ------------------------ // 0 jobID2 eleID2 - readInTxn(se, func(sessionctx.Context) { + readInTxn(se.Session(), func(sessionctx.Context) { currTime, err = ddl.GetOracleTimeWithStartTS(se) require.NoError(t, err) }) @@ -741,7 +742,7 @@ func TestGetTasks(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") - se := ddl.NewSession(tk.Session()) + se := sess.NewSession(tk.Session()) se.GetSessionVars().SQLMode = mysql.ModeNone d := dom.DDL() @@ -762,19 +763,21 @@ func TestGetTasks(t *testing.T) { var err1 error ch := make(chan struct{}, 1) wg.Run(func() { - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/NotifyBeginTxnCh", `return(1)`)) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/internal/session/NotifyBeginTxnCh", `return(1)`)) ch <- struct{}{} var bJobs []*ddl.BackfillJob bJobs, err = ddl.GetAndMarkBackfillJobsForOneEle(se, 1, jobID1, uuid, 1, instanceLease) require.Len(t, bJobs, 1) }) <-ch - defer func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/NotifyBeginTxnCh")) }() + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/ddl/internal/session/NotifyBeginTxnCh")) + }() wg.Run(func() { tk1 := testkit.NewTestKit(t, store) tk1.MustExec("use test") - se1 := ddl.NewSession(tk1.Session()) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/NotifyBeginTxnCh", `return(2)`)) + se1 := sess.NewSession(tk1.Session()) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/internal/session/NotifyBeginTxnCh", `return(2)`)) var bJobs1 []*ddl.BackfillJob bJobs1, err1 = ddl.GetAndMarkBackfillJobsForOneEle(se1, 1, jobID1, uuid, 1, instanceLease) require.Len(t, bJobs1, 1) @@ -801,7 +804,7 @@ func TestGetTasks(t *testing.T) { tableID = int64(tableIDi) tbl := testGetTable(t, dom, tableID) pID := int64(0) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/NotifyBeginTxnCh", `return(0)`)) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/internal/session/NotifyBeginTxnCh", `return(0)`)) // Mock GetAndMarkBackfillJobsForOneEle gets a writing conflict error, but getTasks is successful. // Step 1: se1 begins txn1. // Step 2: se2 begins txn2. @@ -809,7 +812,7 @@ func TestGetTasks(t *testing.T) { // Step 4: se2 begin txn3. // Step 5: getTasks(txn3) executes successfully. wg.Run(func() { - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/NotifyBeginTxnCh", `return(1)`)) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/internal/session/NotifyBeginTxnCh", `return(1)`)) ch <- struct{}{} bJobs, err := ddl.GetTasks(ddl.GetDDLCtx(d), se, tbl, jobID1, &pID, 1) require.Nil(t, err) @@ -819,8 +822,8 @@ func TestGetTasks(t *testing.T) { wg.Run(func() { tk1 := testkit.NewTestKit(t, store) tk1.MustExec("use test") - se1 := ddl.NewSession(tk1.Session()) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/NotifyBeginTxnCh", `return(2)`)) + se1 := sess.NewSession(tk1.Session()) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/ddl/internal/session/NotifyBeginTxnCh", `return(2)`)) bJobs1, err1 := ddl.GetTasks(ddl.GetDDLCtx(d), se1, tbl, jobID1, &pID, 1) require.Nil(t, err1) require.Len(t, bJobs1, 1) diff --git a/ddl/partition.go b/ddl/partition.go index aab5095dfcb64..7df6a7262dae6 100644 --- a/ddl/partition.go +++ b/ddl/partition.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/config" + sess "github.com/pingcap/tidb/ddl/internal/session" "github.com/pingcap/tidb/ddl/label" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/ddl/util" @@ -1827,7 +1828,7 @@ func (w *worker) onDropTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) ( return ver, err1 } defer w.sessPool.Put(sctx) - rh := newReorgHandler(newSession(sctx)) + rh := newReorgHandler(sess.NewSession(sctx)) reorgInfo, err := getReorgInfoFromPartitions(d.jobContext(job.ID), d, rh, job, dbInfo, pt, physicalTableIDs, elements) if err != nil || reorgInfo.first { @@ -2146,13 +2147,13 @@ func (w *worker) onExchangeTablePartition(d *ddlCtx, t *meta.Meta, job *model.Jo failpoint.Inject("exchangePartitionAutoID", func(val failpoint.Value) { if val.(bool) { - se, err := w.sessPool.Get() - defer w.sessPool.Put(se) + seCtx, err := w.sessPool.Get() + defer w.sessPool.Put(seCtx) if err != nil { failpoint.Return(ver, err) } - sess := newSession(se) - _, err = sess.execute(context.Background(), "insert ignore into test.pt values (40000000)", "exchange_partition_test") + se := sess.NewSession(seCtx) + _, err = se.Execute(context.Background(), "insert ignore into test.pt values (40000000)", "exchange_partition_test") if err != nil { failpoint.Return(ver, err) } @@ -2534,7 +2535,7 @@ func doPartitionReorgWork(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, tb return done, ver, err1 } defer w.sessPool.Put(sctx) - rh := newReorgHandler(newSession(sctx)) + rh := newReorgHandler(sess.NewSession(sctx)) elements := BuildElements(tbl.Meta().Columns[0], tbl.Meta().Indices) partTbl, ok := tbl.(table.PartitionedTable) if !ok { diff --git a/ddl/reorg.go b/ddl/reorg.go index 13f874acb3deb..95677daec2857 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -766,14 +766,14 @@ func (r *reorgInfo) UpdateReorgMeta(startKey kv.Key, pool *sess.Pool) (err error } defer pool.Put(sctx) - se := newSession(sctx) - err = se.begin() + se := sess.NewSession(sctx) + err = se.Begin() if err != nil { return } rh := newReorgHandler(se) err = updateDDLReorgHandle(rh.s, r.Job.ID, startKey, r.EndKey, r.PhysicalTableID, r.currElement) - err1 := se.commit() + err1 := se.Commit() if err == nil { err = err1 } @@ -782,15 +782,15 @@ func (r *reorgInfo) UpdateReorgMeta(startKey kv.Key, pool *sess.Pool) (err error // reorgHandler is used to handle the reorg information duration reorganization DDL job. type reorgHandler struct { - s *session + s *sess.Session } // NewReorgHandlerForTest creates a new reorgHandler, only used in test. -func NewReorgHandlerForTest(sess sessionctx.Context) *reorgHandler { - return newReorgHandler(newSession(sess)) +func NewReorgHandlerForTest(se sessionctx.Context) *reorgHandler { + return newReorgHandler(sess.NewSession(se)) } -func newReorgHandler(sess *session) *reorgHandler { +func newReorgHandler(sess *sess.Session) *reorgHandler { return &reorgHandler{s: sess} } @@ -810,7 +810,7 @@ func (r *reorgHandler) RemoveDDLReorgHandle(job *model.Job, elements []*meta.Ele } // CleanupDDLReorgHandles removes the job reorganization related handles. -func CleanupDDLReorgHandles(job *model.Job, s *session) { +func CleanupDDLReorgHandles(job *model.Job, s *sess.Session) { if job != nil && !job.IsFinished() && !job.IsSynced() { // Job is given, but it is neither finished nor synced; do nothing return From a0f2405981960ec705bf44f12b96ca8aec1506c4 Mon Sep 17 00:00:00 2001 From: tiancaiamao Date: Tue, 4 Apr 2023 23:58:57 +0800 Subject: [PATCH 07/12] expression: make expression TestGetLock test more stable (#42815) close pingcap/tidb#35155 --- expression/builtin_miscellaneous.go | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/expression/builtin_miscellaneous.go b/expression/builtin_miscellaneous.go index c55d7e571dd12..93aa05b1bad06 100644 --- a/expression/builtin_miscellaneous.go +++ b/expression/builtin_miscellaneous.go @@ -229,17 +229,18 @@ func (b *builtinLockSig) evalInt(row chunk.Row) (int64, bool, error) { } err = b.ctx.GetAdvisoryLock(lockName, timeout) if err != nil { - switch errors.Cause(err).(*terror.Error).Code() { - case mysql.ErrLockWaitTimeout: - return 0, false, nil // Another user has the lock - case mysql.ErrLockDeadlock: - // Currently this code is not reachable because each Advisory Lock - // Uses a separate session. Deadlock detection does not work across - // independent sessions. - return 0, false, errUserLockDeadlock - default: - return 0, false, err + if terr, ok := errors.Cause(err).(*terror.Error); ok { + switch terr.Code() { + case mysql.ErrLockWaitTimeout: + return 0, false, nil // Another user has the lock + case mysql.ErrLockDeadlock: + // Currently this code is not reachable because each Advisory Lock + // Uses a separate session. Deadlock detection does not work across + // independent sessions. + return 0, false, errUserLockDeadlock + } } + return 0, false, err } return 1, false, nil } From b384b73f2137b24a67c8871245e67963b6731139 Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Wed, 5 Apr 2023 21:40:58 +0800 Subject: [PATCH 08/12] nogo: enable revive for br/pkg/lightning (#42752) --- br/pkg/lightning/backend/kv/allocator.go | 2 +- br/pkg/lightning/backend/kv/kv2sql.go | 2 +- br/pkg/lightning/backend/kv/session.go | 44 ++++++------- br/pkg/lightning/backend/kv/sql2kv.go | 4 +- br/pkg/lightning/backend/local/compress.go | 8 +-- br/pkg/lightning/backend/local/engine.go | 6 +- br/pkg/lightning/backend/local/iterator.go | 6 +- br/pkg/lightning/backend/local/local.go | 22 ++++--- br/pkg/lightning/backend/local/localhelper.go | 2 +- br/pkg/lightning/backend/local/region_job.go | 2 +- br/pkg/lightning/backend/noop/noop.go | 50 +++++++------- br/pkg/lightning/backend/tidb/tidb.go | 44 ++++++------- br/pkg/lightning/checkpoints/checkpoints.go | 9 ++- .../lightning/checkpoints/glue_checkpoint.go | 8 +-- br/pkg/lightning/common/retry.go | 3 +- br/pkg/lightning/common/util.go | 2 +- br/pkg/lightning/glue/glue.go | 18 ++--- br/pkg/lightning/importer/chunk_process.go | 2 +- br/pkg/lightning/importer/get_pre_info.go | 6 +- br/pkg/lightning/importer/meta_manager.go | 66 ++++++++++--------- br/pkg/lightning/importer/mock/mock.go | 16 ++--- br/pkg/lightning/importer/precheck_impl.go | 49 +++++--------- br/pkg/lightning/importer/table_import.go | 4 +- br/pkg/lightning/importer/tidb.go | 2 +- build/nogo_config.json | 3 +- 25 files changed, 181 insertions(+), 199 deletions(-) diff --git a/br/pkg/lightning/backend/kv/allocator.go b/br/pkg/lightning/backend/kv/allocator.go index 14703e1143a45..6792851ef9b06 100644 --- a/br/pkg/lightning/backend/kv/allocator.go +++ b/br/pkg/lightning/backend/kv/allocator.go @@ -42,7 +42,7 @@ func NewPanickingAllocators(base int64) autoid.Allocators { } // Rebase implements the autoid.Allocator interface -func (alloc *panickingAllocator) Rebase(ctx context.Context, newBase int64, allocIDs bool) error { +func (alloc *panickingAllocator) Rebase(_ context.Context, newBase int64, _ bool) error { // CAS for { oldBase := atomic.LoadInt64(alloc.base) diff --git a/br/pkg/lightning/backend/kv/kv2sql.go b/br/pkg/lightning/backend/kv/kv2sql.go index 7304ba055dce3..de4377abe62b3 100644 --- a/br/pkg/lightning/backend/kv/kv2sql.go +++ b/br/pkg/lightning/backend/kv/kv2sql.go @@ -42,7 +42,7 @@ func (t *TableKVDecoder) Name() string { } // DecodeHandleFromRowKey implements KVDecoder.DecodeHandleFromRowKey. -func (t *TableKVDecoder) DecodeHandleFromRowKey(key []byte) (kv.Handle, error) { +func (*TableKVDecoder) DecodeHandleFromRowKey(key []byte) (kv.Handle, error) { return tablecodec.DecodeRowKey(key) } diff --git a/br/pkg/lightning/backend/kv/session.go b/br/pkg/lightning/backend/kv/session.go index d7fac93ebb700..d6d551251b7cd 100644 --- a/br/pkg/lightning/backend/kv/session.go +++ b/br/pkg/lightning/backend/kv/session.go @@ -151,27 +151,27 @@ func (mb *MemBuf) Set(k kv.Key, v []byte) error { } // SetWithFlags implements the kv.MemBuffer interface. -func (mb *MemBuf) SetWithFlags(k kv.Key, v []byte, ops ...kv.FlagsOp) error { +func (mb *MemBuf) SetWithFlags(k kv.Key, v []byte, _ ...kv.FlagsOp) error { return mb.Set(k, v) } // Delete implements the kv.MemBuffer interface. -func (mb *MemBuf) Delete(k kv.Key) error { +func (*MemBuf) Delete(_ kv.Key) error { return errors.New("unsupported operation") } // Release publish all modifications in the latest staging buffer to upper level. -func (mb *MemBuf) Release(h kv.StagingHandle) { +func (*MemBuf) Release(_ kv.StagingHandle) { } // Staging creates a new staging buffer. -func (mb *MemBuf) Staging() kv.StagingHandle { +func (*MemBuf) Staging() kv.StagingHandle { return 0 } // Cleanup the resources referenced by the StagingHandle. // If the changes are not published by `Release`, they will be discarded. -func (mb *MemBuf) Cleanup(h kv.StagingHandle) {} +func (*MemBuf) Cleanup(_ kv.StagingHandle) {} // Size returns sum of keys and values length. func (mb *MemBuf) Size() int { @@ -193,16 +193,16 @@ func (s *kvUnionStore) GetMemBuffer() kv.MemBuffer { } // GetIndexName implements the kv.UnionStore interface. -func (s *kvUnionStore) GetIndexName(tableID, indexID int64) string { +func (*kvUnionStore) GetIndexName(_, _ int64) string { panic("Unsupported Operation") } // CacheIndexName implements the kv.UnionStore interface. -func (s *kvUnionStore) CacheIndexName(tableID, indexID int64, name string) { +func (*kvUnionStore) CacheIndexName(_, _ int64, _ string) { } // CacheTableInfo implements the kv.UnionStore interface. -func (s *kvUnionStore) CacheTableInfo(id int64, info *model.TableInfo) { +func (*kvUnionStore) CacheTableInfo(_ int64, _ *model.TableInfo) { } // transaction is a trimmed down Transaction type which only supports adding a @@ -218,26 +218,26 @@ func (t *transaction) GetMemBuffer() kv.MemBuffer { } // Discard implements the kv.Transaction interface. -func (t *transaction) Discard() { +func (*transaction) Discard() { // do nothing } // Flush implements the kv.Transaction interface. -func (t *transaction) Flush() (int, error) { +func (*transaction) Flush() (int, error) { // do nothing return 0, nil } // Reset implements the kv.MemBuffer interface -func (t *transaction) Reset() {} +func (*transaction) Reset() {} // Get implements the kv.Retriever interface -func (t *transaction) Get(ctx context.Context, key kv.Key) ([]byte, error) { +func (*transaction) Get(_ context.Context, _ kv.Key) ([]byte, error) { return nil, kv.ErrNotExist } // Iter implements the kv.Retriever interface -func (t *transaction) Iter(k kv.Key, upperBound kv.Key) (kv.Iterator, error) { +func (*transaction) Iter(_ kv.Key, _ kv.Key) (kv.Iterator, error) { return &invalidIterator{}, nil } @@ -247,16 +247,16 @@ func (t *transaction) Set(k kv.Key, v []byte) error { } // GetTableInfo implements the kv.Transaction interface. -func (t *transaction) GetTableInfo(id int64) *model.TableInfo { +func (*transaction) GetTableInfo(_ int64) *model.TableInfo { return nil } // CacheTableInfo implements the kv.Transaction interface. -func (t *transaction) CacheTableInfo(id int64, info *model.TableInfo) { +func (*transaction) CacheTableInfo(_ int64, _ *model.TableInfo) { } // SetAssertion implements the kv.Transaction interface. -func (t *transaction) SetAssertion(key []byte, assertion ...kv.FlagsOp) error { +func (*transaction) SetAssertion(_ []byte, _ ...kv.FlagsOp) error { return nil } @@ -336,7 +336,7 @@ func (se *Session) TakeKvPairs() *Pairs { } // Txn implements the sessionctx.Context interface -func (se *Session) Txn(active bool) (kv.Transaction, error) { +func (se *Session) Txn(_ bool) (kv.Transaction, error) { return &se.txn, nil } @@ -356,25 +356,25 @@ func (se *Session) Value(key fmt.Stringer) interface{} { } // StmtAddDirtyTableOP implements the sessionctx.Context interface -func (se *Session) StmtAddDirtyTableOP(op int, physicalID int64, handle kv.Handle) {} +func (*Session) StmtAddDirtyTableOP(_ int, _ int64, _ kv.Handle) {} // GetInfoSchema implements the sessionctx.Context interface. -func (se *Session) GetInfoSchema() sessionctx.InfoschemaMetaVersion { +func (*Session) GetInfoSchema() sessionctx.InfoschemaMetaVersion { return nil } // GetBuiltinFunctionUsage returns the BuiltinFunctionUsage of current Context, which is not thread safe. // Use primitive map type to prevent circular import. Should convert it to telemetry.BuiltinFunctionUsage before using. -func (se *Session) GetBuiltinFunctionUsage() map[string]uint32 { +func (*Session) GetBuiltinFunctionUsage() map[string]uint32 { return make(map[string]uint32) } // BuiltinFunctionUsageInc implements the sessionctx.Context interface. -func (se *Session) BuiltinFunctionUsageInc(scalarFuncSigName string) { +func (*Session) BuiltinFunctionUsageInc(_ string) { } // GetStmtStats implements the sessionctx.Context interface. -func (se *Session) GetStmtStats() *stmtstats.StatementStats { +func (*Session) GetStmtStats() *stmtstats.StatementStats { return nil } diff --git a/br/pkg/lightning/backend/kv/sql2kv.go b/br/pkg/lightning/backend/kv/sql2kv.go index 438cd6d8d0982..c54a9e498e4d1 100644 --- a/br/pkg/lightning/backend/kv/sql2kv.go +++ b/br/pkg/lightning/backend/kv/sql2kv.go @@ -179,13 +179,13 @@ func Row2KvPairs(row encode.Row) []common.KvPair { // // See comments in `(*TableRestore).initializeColumns` for the meaning of the // `columnPermutation` parameter. -func (kvcodec *tableKVEncoder) Encode(row []types.Datum, rowID int64, columnPermutation []int, offset int64) (encode.Row, error) { +func (kvcodec *tableKVEncoder) Encode(row []types.Datum, rowID int64, columnPermutation []int, _ int64) (encode.Row, error) { var value types.Datum var err error record := kvcodec.GetOrCreateRecord() for i, col := range kvcodec.Columns { - var theDatum *types.Datum = nil + var theDatum *types.Datum j := columnPermutation[i] if j >= 0 && j < len(row) { theDatum = &row[j] diff --git a/br/pkg/lightning/backend/local/compress.go b/br/pkg/lightning/backend/local/compress.go index bf5aea0924150..c9315d33b3225 100644 --- a/br/pkg/lightning/backend/local/compress.go +++ b/br/pkg/lightning/backend/local/compress.go @@ -35,7 +35,7 @@ var gzipWriterPool = sync.Pool{ }, } -func (c *gzipCompressor) Do(w io.Writer, p []byte) error { +func (*gzipCompressor) Do(w io.Writer, p []byte) error { z := gzipWriterPool.Get().(*gzip.Writer) defer gzipWriterPool.Put(z) z.Reset(w) @@ -45,7 +45,7 @@ func (c *gzipCompressor) Do(w io.Writer, p []byte) error { return z.Close() } -func (c *gzipCompressor) Type() string { +func (*gzipCompressor) Type() string { return "gzip" } @@ -57,7 +57,7 @@ var gzipReaderPool = sync.Pool{ }, } -func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { +func (*gzipDecompressor) Do(r io.Reader) ([]byte, error) { z := gzipReaderPool.Get().(*gzip.Reader) if err := z.Reset(r); err != nil { gzipReaderPool.Put(z) @@ -71,6 +71,6 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { return io.ReadAll(z) } -func (d *gzipDecompressor) Type() string { +func (*gzipDecompressor) Type() string { return "gzip" } diff --git a/br/pkg/lightning/backend/local/engine.go b/br/pkg/lightning/backend/local/engine.go index d555ee8ecd715..beaa45b866d95 100644 --- a/br/pkg/lightning/backend/local/engine.go +++ b/br/pkg/lightning/backend/local/engine.go @@ -242,7 +242,7 @@ func (e *Engine) unlock() { // TotalMemorySize returns the total memory size of the engine. func (e *Engine) TotalMemorySize() int64 { - var memSize int64 = 0 + var memSize int64 e.localWriters.Range(func(k, v interface{}) bool { w := k.(*Writer) if w.kvBuffer != nil { @@ -355,7 +355,7 @@ func (c *RangePropertiesCollector) Finish(userProps map[string]string) error { } // Name implements `pebble.TablePropertyCollector`. -func (c *RangePropertiesCollector) Name() string { +func (*RangePropertiesCollector) Name() string { return propRangeIndex } @@ -1146,7 +1146,7 @@ func (w *Writer) appendRowsUnsorted(ctx context.Context, kvs []common.KvPair) er } // AppendRows appends rows to the SST file. -func (w *Writer) AppendRows(ctx context.Context, tableName string, columnNames []string, rows encode.Rows) error { +func (w *Writer) AppendRows(ctx context.Context, _ string, columnNames []string, rows encode.Rows) error { kvs := kv.Rows2KvPairs(rows) if len(kvs) == 0 { return nil diff --git a/br/pkg/lightning/backend/local/iterator.go b/br/pkg/lightning/backend/local/iterator.go index feb3bbc8a7d94..9595f8f68ea51 100644 --- a/br/pkg/lightning/backend/local/iterator.go +++ b/br/pkg/lightning/backend/local/iterator.go @@ -60,7 +60,7 @@ func (p pebbleIter) Seek(key []byte) bool { return p.SeekGE(key) } -func (p pebbleIter) OpType() sst.Pair_OP { +func (pebbleIter) OpType() sst.Pair_OP { return sst.Pair_Put } @@ -194,7 +194,7 @@ func (d *dupDetectIter) Close() error { return d.iter.Close() } -func (d *dupDetectIter) OpType() sst.Pair_OP { +func (*dupDetectIter) OpType() sst.Pair_OP { return sst.Pair_Put } @@ -281,7 +281,7 @@ func (d *dupDBIter) Close() error { return d.iter.Close() } -func (d *dupDBIter) OpType() sst.Pair_OP { +func (*dupDBIter) OpType() sst.Pair_OP { return sst.Pair_Put } diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go index da870e7d4495b..60c1ddf1365e5 100644 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -254,13 +254,13 @@ func NewEncodingBuilder(ctx context.Context) encode.EncodingBuilder { // NewEncoder creates a KV encoder. // It implements the `backend.EncodingBuilder` interface. -func (b *encodingBuilder) NewEncoder(ctx context.Context, config *encode.EncodingConfig) (encode.Encoder, error) { +func (b *encodingBuilder) NewEncoder(_ context.Context, config *encode.EncodingConfig) (encode.Encoder, error) { return kv.NewTableKVEncoder(config, b.metrics) } // MakeEmptyRows creates an empty KV rows. // It implements the `backend.EncodingBuilder` interface. -func (b *encodingBuilder) MakeEmptyRows() encode.Rows { +func (*encodingBuilder) MakeEmptyRows() encode.Rows { return kv.MakeRowsFromKvPairs(nil) } @@ -590,7 +590,7 @@ func NewLocalBackend( // TotalMemoryConsume returns the total memory usage of the local backend. func (local *Local) TotalMemoryConsume() int64 { - var memConsume int64 = 0 + var memConsume int64 local.engines.Range(func(k, v interface{}) bool { e := v.(*Engine) if e != nil { @@ -805,12 +805,12 @@ func (local *Local) FlushAllEngines(parentCtx context.Context) (err error) { } // RetryImportDelay returns the delay time before retrying to import a file. -func (local *Local) RetryImportDelay() time.Duration { +func (*Local) RetryImportDelay() time.Duration { return defaultRetryBackoffTime } // ShouldPostProcess returns true if the backend should post process the data. -func (local *Local) ShouldPostProcess() bool { +func (*Local) ShouldPostProcess() bool { return true } @@ -1202,7 +1202,7 @@ func (local *Local) startWorker( } } -func (local *Local) isRetryableImportTiKVError(err error) bool { +func (*Local) isRetryableImportTiKVError(err error) bool { err = errors.Cause(err) // io.EOF is not retryable in normal case // but on TiKV restart, if we're writing to TiKV(through GRPC) @@ -1552,7 +1552,7 @@ func engineSSTDir(storeDir string, engineUUID uuid.UUID) string { } // LocalWriter returns a new local writer. -func (local *Local) LocalWriter(ctx context.Context, cfg *backend.LocalWriterConfig, engineUUID uuid.UUID) (backend.EngineWriter, error) { +func (local *Local) LocalWriter(_ context.Context, cfg *backend.LocalWriterConfig, engineUUID uuid.UUID) (backend.EngineWriter, error) { e, ok := local.engines.Load(engineUUID) if !ok { return nil, errors.Errorf("could not find engine for %s", engineUUID.String()) @@ -1619,7 +1619,8 @@ func (local *Local) EngineFileSizes() (res []backend.EngineFileSize) { var getSplitConfFromStoreFunc = getSplitConfFromStore // return region split size, region split keys, error -func getSplitConfFromStore(ctx context.Context, host string, tls *common.TLS) (int64, int64, error) { +func getSplitConfFromStore(ctx context.Context, host string, tls *common.TLS) ( + splitSize int64, regionSplitKeys int64, err error) { var ( nested struct { Coprocessor struct { @@ -1631,7 +1632,7 @@ func getSplitConfFromStore(ctx context.Context, host string, tls *common.TLS) (i if err := tls.WithHost(host).GetJSON(ctx, "/config", &nested); err != nil { return 0, 0, errors.Trace(err) } - splitSize, err := units.FromHumanSize(nested.Coprocessor.RegionSplitSize) + splitSize, err = units.FromHumanSize(nested.Coprocessor.RegionSplitSize) if err != nil { return 0, 0, errors.Trace(err) } @@ -1640,7 +1641,8 @@ func getSplitConfFromStore(ctx context.Context, host string, tls *common.TLS) (i } // return region split size, region split keys, error -func getRegionSplitSizeKeys(ctx context.Context, cli pd.Client, tls *common.TLS) (int64, int64, error) { +func getRegionSplitSizeKeys(ctx context.Context, cli pd.Client, tls *common.TLS) ( + regionSplitSize int64, regionSplitKeys int64, err error) { stores, err := cli.GetAllStores(ctx, pd.WithExcludeTombstone()) if err != nil { return 0, 0, err diff --git a/br/pkg/lightning/backend/local/localhelper.go b/br/pkg/lightning/backend/local/localhelper.go index 3f8e66be06eeb..187cb64fdd0a2 100644 --- a/br/pkg/lightning/backend/local/localhelper.go +++ b/br/pkg/lightning/backend/local/localhelper.go @@ -683,7 +683,7 @@ func (s *storeWriteLimiter) getLimiter(storeID uint64) *rate.Limiter { type noopStoreWriteLimiter struct{} -func (noopStoreWriteLimiter) WaitN(ctx context.Context, storeID uint64, n int) error { +func (noopStoreWriteLimiter) WaitN(_ context.Context, _ uint64, _ int) error { return nil } diff --git a/br/pkg/lightning/backend/local/region_job.go b/br/pkg/lightning/backend/local/region_job.go index f12766c5af2c2..35d234f755776 100644 --- a/br/pkg/lightning/backend/local/region_job.go +++ b/br/pkg/lightning/backend/local/region_job.go @@ -391,7 +391,7 @@ func (j *regionJob) ingest( return nil } -func (j *regionJob) checkWriteStall( +func (*regionJob) checkWriteStall( ctx context.Context, region *split.RegionInfo, clientFactory ImportClientFactory, diff --git a/br/pkg/lightning/backend/noop/noop.go b/br/pkg/lightning/backend/noop/noop.go index 08f05759adfc8..a639d111b47eb 100644 --- a/br/pkg/lightning/backend/noop/noop.go +++ b/br/pkg/lightning/backend/noop/noop.go @@ -47,52 +47,52 @@ func (r noopRows) Clear() encode.Rows { } // Close the connection to the backend. -func (b noopBackend) Close() {} +func (noopBackend) Close() {} // MakeEmptyRows creates an empty collection of encoded rows. -func (b noopBackend) MakeEmptyRows() encode.Rows { +func (noopBackend) MakeEmptyRows() encode.Rows { return noopRows{} } // RetryImportDelay returns the duration to sleep when retrying an import -func (b noopBackend) RetryImportDelay() time.Duration { +func (noopBackend) RetryImportDelay() time.Duration { return 0 } // ShouldPostProcess returns whether KV-specific post-processing should be // performed for this backend. Post-processing includes checksum and analyze. -func (b noopBackend) ShouldPostProcess() bool { +func (noopBackend) ShouldPostProcess() bool { return false } // NewEncoder creates an encoder of a TiDB table. -func (b noopBackend) NewEncoder(ctx context.Context, config *encode.EncodingConfig) (encode.Encoder, error) { +func (noopBackend) NewEncoder(_ context.Context, _ *encode.EncodingConfig) (encode.Encoder, error) { return noopEncoder{}, nil } // OpenEngine creates a new engine file for the given table. -func (b noopBackend) OpenEngine(context.Context, *backend.EngineConfig, uuid.UUID) error { +func (noopBackend) OpenEngine(context.Context, *backend.EngineConfig, uuid.UUID) error { return nil } // CloseEngine closes the engine file, flushing any remaining data. -func (b noopBackend) CloseEngine(ctx context.Context, cfg *backend.EngineConfig, engineUUID uuid.UUID) error { +func (noopBackend) CloseEngine(_ context.Context, _ *backend.EngineConfig, _ uuid.UUID) error { return nil } // ImportEngine imports a closed engine file. -func (b noopBackend) ImportEngine(ctx context.Context, engineUUID uuid.UUID, regionSplitSize, regionSplitKeys int64) error { +func (noopBackend) ImportEngine(_ context.Context, _ uuid.UUID, _, _ int64) error { return nil } // CleanupEngine removes all data related to the engine. -func (b noopBackend) CleanupEngine(ctx context.Context, engineUUID uuid.UUID) error { +func (noopBackend) CleanupEngine(_ context.Context, _ uuid.UUID) error { return nil } // CheckRequirements performs the check whether the backend satisfies the // version requirements -func (b noopBackend) CheckRequirements(context.Context, *backend.CheckCtx) error { +func (noopBackend) CheckRequirements(context.Context, *backend.CheckCtx) error { return nil } @@ -108,7 +108,7 @@ func (b noopBackend) CheckRequirements(context.Context, *backend.CheckCtx) error // - State (must be model.StatePublic) // - Offset (must be 0, 1, 2, ...) // - PKIsHandle (true = do not generate _tidb_rowid) -func (b noopBackend) FetchRemoteTableModels(ctx context.Context, schemaName string) ([]*model.TableInfo, error) { +func (noopBackend) FetchRemoteTableModels(_ context.Context, _ string) ([]*model.TableInfo, error) { return nil, nil } @@ -118,81 +118,81 @@ func (b noopBackend) FetchRemoteTableModels(ctx context.Context, schemaName stri // // This method is only relevant for local backend, and is no-op for all // other backends. -func (b noopBackend) FlushEngine(ctx context.Context, engineUUID uuid.UUID) error { +func (noopBackend) FlushEngine(_ context.Context, _ uuid.UUID) error { return nil } // FlushAllEngines performs FlushEngine on all opened engines. This is a // very expensive operation and should only be used in some rare situation // (e.g. preparing to resolve a disk quota violation). -func (b noopBackend) FlushAllEngines(ctx context.Context) error { +func (noopBackend) FlushAllEngines(_ context.Context) error { return nil } // EngineFileSizes obtains the size occupied locally of all engines managed // by this backend. This method is used to compute disk quota. // It can return nil if the content are all stored remotely. -func (b noopBackend) EngineFileSizes() []backend.EngineFileSize { +func (noopBackend) EngineFileSizes() []backend.EngineFileSize { return nil } // ResetEngine clears all written KV pairs in this opened engine. -func (b noopBackend) ResetEngine(ctx context.Context, engineUUID uuid.UUID) error { +func (noopBackend) ResetEngine(_ context.Context, _ uuid.UUID) error { return nil } // LocalWriter obtains a thread-local EngineWriter for writing rows into the given engine. -func (b noopBackend) LocalWriter(context.Context, *backend.LocalWriterConfig, uuid.UUID) (backend.EngineWriter, error) { +func (noopBackend) LocalWriter(context.Context, *backend.LocalWriterConfig, uuid.UUID) (backend.EngineWriter, error) { return Writer{}, nil } // TotalMemoryConsume returns the total memory usage of the backend. -func (b noopBackend) TotalMemoryConsume() int64 { +func (noopBackend) TotalMemoryConsume() int64 { return 0 } type noopEncoder struct{} // Close the encoder. -func (e noopEncoder) Close() {} +func (noopEncoder) Close() {} // Encode encodes a row of SQL values into a backend-friendly format. -func (e noopEncoder) Encode([]types.Datum, int64, []int, int64) (encode.Row, error) { +func (noopEncoder) Encode([]types.Datum, int64, []int, int64) (encode.Row, error) { return noopRow{}, nil } type noopRow struct{} // Size returns the size of the encoded row. -func (r noopRow) Size() uint64 { +func (noopRow) Size() uint64 { return 0 } // ClassifyAndAppend classifies the row into the corresponding collection. -func (r noopRow) ClassifyAndAppend(*encode.Rows, *verification.KVChecksum, *encode.Rows, *verification.KVChecksum) { +func (noopRow) ClassifyAndAppend(*encode.Rows, *verification.KVChecksum, *encode.Rows, *verification.KVChecksum) { } // Writer define a local writer that do nothing. type Writer struct{} // AppendRows implements the EngineWriter interface. -func (w Writer) AppendRows(context.Context, string, []string, encode.Rows) error { +func (Writer) AppendRows(context.Context, string, []string, encode.Rows) error { return nil } // IsSynced implements the EngineWriter interface. -func (w Writer) IsSynced() bool { +func (Writer) IsSynced() bool { return true } // Close implements the EngineWriter interface. -func (w Writer) Close(context.Context) (backend.ChunkFlushStatus, error) { +func (Writer) Close(context.Context) (backend.ChunkFlushStatus, error) { return trueStatus{}, nil } type trueStatus struct{} // Flushed implements the ChunkFlushStatus interface. -func (s trueStatus) Flushed() bool { +func (trueStatus) Flushed() bool { return true } diff --git a/br/pkg/lightning/backend/tidb/tidb.go b/br/pkg/lightning/backend/tidb/tidb.go index ed70135461e9b..65fb37d1e7bd6 100644 --- a/br/pkg/lightning/backend/tidb/tidb.go +++ b/br/pkg/lightning/backend/tidb/tidb.go @@ -103,7 +103,7 @@ func NewEncodingBuilder() encode.EncodingBuilder { // NewEncoder creates a KV encoder. // It implements the `backend.EncodingBuilder` interface. -func (b *encodingBuilder) NewEncoder(ctx context.Context, config *encode.EncodingConfig) (encode.Encoder, error) { +func (*encodingBuilder) NewEncoder(ctx context.Context, config *encode.EncodingConfig) (encode.Encoder, error) { se := kv.NewSessionCtx(&config.SessionOptions, log.FromContext(ctx)) if config.SQLMode.HasStrictMode() { se.GetSessionVars().SkipUTF8Check = false @@ -121,7 +121,7 @@ func (b *encodingBuilder) NewEncoder(ctx context.Context, config *encode.Encodin // MakeEmptyRows creates an empty KV rows. // It implements the `backend.EncodingBuilder` interface. -func (b *encodingBuilder) MakeEmptyRows() encode.Rows { +func (*encodingBuilder) MakeEmptyRows() encode.Rows { return tidbRows(nil) } @@ -245,7 +245,7 @@ func (b *targetInfoGetter) FetchRemoteTableModels(ctx context.Context, schemaNam // CheckRequirements performs the check whether the backend satisfies the version requirements. // It implements the `backend.TargetInfoGetter` interface. -func (b *targetInfoGetter) CheckRequirements(ctx context.Context, _ *backend.CheckCtx) error { +func (*targetInfoGetter) CheckRequirements(ctx context.Context, _ *backend.CheckCtx) error { log.FromContext(ctx).Info("skipping check requirements for tidb backend") return nil } @@ -393,7 +393,7 @@ func (enc *tidbEncoder) appendSQL(sb *strings.Builder, datum *types.Datum, _ *ta // return errors.Trace(err) // } // datum = &d - // } + // } enc.appendSQLBytes(sb, datum.GetBytes()) case types.KindBytes: @@ -447,7 +447,7 @@ func getColumnByIndex(cols []*table.Column, index int) *table.Column { return cols[index] } -func (enc *tidbEncoder) Encode(row []types.Datum, rowID int64, columnPermutation []int, offset int64) (encode.Row, error) { +func (enc *tidbEncoder) Encode(row []types.Datum, _ int64, columnPermutation []int, offset int64) (encode.Row, error) { cols := enc.tbl.Cols() if len(enc.columnIdx) == 0 { @@ -534,39 +534,39 @@ func EncodeRowForRecord(ctx context.Context, encTable table.Table, sqlMode mysql return resRow.(tidbRow).insertStmt } -func (be *tidbBackend) Close() { +func (*tidbBackend) Close() { // *Not* going to close `be.db`. The db object is normally borrowed from a // TidbManager, so we let the manager to close it. } -func (be *tidbBackend) RetryImportDelay() time.Duration { +func (*tidbBackend) RetryImportDelay() time.Duration { return 0 } -func (be *tidbBackend) MaxChunkSize() int { +func (*tidbBackend) MaxChunkSize() int { failpoint.Inject("FailIfImportedSomeRows", func() { failpoint.Return(1) }) return 1048576 } -func (be *tidbBackend) ShouldPostProcess() bool { +func (*tidbBackend) ShouldPostProcess() bool { return true } -func (be *tidbBackend) OpenEngine(context.Context, *backend.EngineConfig, uuid.UUID) error { +func (*tidbBackend) OpenEngine(context.Context, *backend.EngineConfig, uuid.UUID) error { return nil } -func (be *tidbBackend) CloseEngine(context.Context, *backend.EngineConfig, uuid.UUID) error { +func (*tidbBackend) CloseEngine(context.Context, *backend.EngineConfig, uuid.UUID) error { return nil } -func (be *tidbBackend) CleanupEngine(context.Context, uuid.UUID) error { +func (*tidbBackend) CleanupEngine(context.Context, uuid.UUID) error { return nil } -func (be *tidbBackend) ImportEngine(context.Context, uuid.UUID, int64, int64) error { +func (*tidbBackend) ImportEngine(context.Context, uuid.UUID, int64, int64) error { return nil } @@ -600,7 +600,7 @@ rowLoop: return nil } -func (be *tidbBackend) TotalMemoryConsume() int64 { +func (*tidbBackend) TotalMemoryConsume() int64 { return 0 } @@ -725,29 +725,29 @@ func (be *tidbBackend) execStmts(ctx context.Context, stmtTasks []stmtTask, tabl } // EngineFileSizes returns the size of each engine file. -func (be *tidbBackend) EngineFileSizes() []backend.EngineFileSize { +func (*tidbBackend) EngineFileSizes() []backend.EngineFileSize { return nil } // FlushEngine flushes the data in the engine to the underlying storage. -func (be *tidbBackend) FlushEngine(context.Context, uuid.UUID) error { +func (*tidbBackend) FlushEngine(context.Context, uuid.UUID) error { return nil } // FlushAllEngines flushes all the data in the engines to the underlying storage. -func (be *tidbBackend) FlushAllEngines(context.Context) error { +func (*tidbBackend) FlushAllEngines(context.Context) error { return nil } // ResetEngine resets the engine. -func (be *tidbBackend) ResetEngine(context.Context, uuid.UUID) error { +func (*tidbBackend) ResetEngine(context.Context, uuid.UUID) error { return errors.New("cannot reset an engine in TiDB backend") } // LocalWriter returns a writer that writes data to local storage. func (be *tidbBackend) LocalWriter( - ctx context.Context, - cfg *backend.LocalWriterConfig, + _ context.Context, + _ *backend.LocalWriterConfig, _ uuid.UUID, ) (backend.EngineWriter, error) { return &Writer{be: be}, nil @@ -759,7 +759,7 @@ type Writer struct { } // Close implements the EngineWriter interface. -func (w *Writer) Close(ctx context.Context) (backend.ChunkFlushStatus, error) { +func (*Writer) Close(_ context.Context) (backend.ChunkFlushStatus, error) { return nil, nil } @@ -769,7 +769,7 @@ func (w *Writer) AppendRows(ctx context.Context, tableName string, columnNames [ } // IsSynced implements the EngineWriter interface. -func (w *Writer) IsSynced() bool { +func (*Writer) IsSynced() bool { return true } diff --git a/br/pkg/lightning/checkpoints/checkpoints.go b/br/pkg/lightning/checkpoints/checkpoints.go index 0f380bc46aeea..07e1c319c6813 100644 --- a/br/pkg/lightning/checkpoints/checkpoints.go +++ b/br/pkg/lightning/checkpoints/checkpoints.go @@ -686,7 +686,7 @@ func (*NullCheckpointsDB) Initialize(context.Context, *config.Config, map[string } // TaskCheckpoint implements the DB interface. -func (*NullCheckpointsDB) TaskCheckpoint(ctx context.Context) (*TaskCheckpoint, error) { +func (*NullCheckpointsDB) TaskCheckpoint(context.Context) (*TaskCheckpoint, error) { return nil, nil } @@ -1183,11 +1183,10 @@ func createExstorageByCompletePath(ctx context.Context, completePath string) (st } // separateCompletePath separates fileName from completePath, returns fileName and newPath. -func separateCompletePath(completePath string) (string, string, error) { +func separateCompletePath(completePath string) (fileName string, newPath string, err error) { if completePath == "" { return "", "", nil } - var fileName, newPath string purl, err := storage.ParseRawURL(completePath) if err != nil { return "", "", errors.Trace(err) @@ -1220,7 +1219,7 @@ func (cpdb *FileCheckpointsDB) save() error { } // Initialize implements CheckpointsDB.Initialize. -func (cpdb *FileCheckpointsDB) Initialize(ctx context.Context, cfg *config.Config, dbInfo map[string]*TidbDBInfo) error { +func (cpdb *FileCheckpointsDB) Initialize(_ context.Context, cfg *config.Config, dbInfo map[string]*TidbDBInfo) error { cpdb.lock.Lock() defer cpdb.lock.Unlock() @@ -1811,7 +1810,7 @@ func (cpdb *FileCheckpointsDB) RemoveCheckpoint(_ context.Context, tableName str } // MoveCheckpoints implements CheckpointsDB.MoveCheckpoints. -func (cpdb *FileCheckpointsDB) MoveCheckpoints(ctx context.Context, taskID int64) error { +func (cpdb *FileCheckpointsDB) MoveCheckpoints(_ context.Context, taskID int64) error { cpdb.lock.Lock() defer cpdb.lock.Unlock() diff --git a/br/pkg/lightning/checkpoints/glue_checkpoint.go b/br/pkg/lightning/checkpoints/glue_checkpoint.go index 6d3dda2381ee5..fa256ab34ab27 100644 --- a/br/pkg/lightning/checkpoints/glue_checkpoint.go +++ b/br/pkg/lightning/checkpoints/glue_checkpoint.go @@ -358,7 +358,7 @@ func (g GlueCheckpointsDB) Get(ctx context.Context, tableName string) (*TableChe } // Close implements CheckpointsDB.Close. -func (g GlueCheckpointsDB) Close() error { +func (GlueCheckpointsDB) Close() error { return nil } @@ -769,17 +769,17 @@ func (g GlueCheckpointsDB) DestroyErrorCheckpoint(ctx context.Context, tableName } // DumpTables implements CheckpointsDB.DumpTables. -func (g GlueCheckpointsDB) DumpTables(ctx context.Context, csv io.Writer) error { +func (GlueCheckpointsDB) DumpTables(_ context.Context, _ io.Writer) error { return errors.Errorf("dumping glue checkpoint into CSV not unsupported") } // DumpEngines implements CheckpointsDB.DumpEngines. -func (g GlueCheckpointsDB) DumpEngines(ctx context.Context, csv io.Writer) error { +func (GlueCheckpointsDB) DumpEngines(_ context.Context, _ io.Writer) error { return errors.Errorf("dumping glue checkpoint into CSV not unsupported") } // DumpChunks implements CheckpointsDB.DumpChunks. -func (g GlueCheckpointsDB) DumpChunks(ctx context.Context, csv io.Writer) error { +func (GlueCheckpointsDB) DumpChunks(_ context.Context, _ io.Writer) error { return errors.Errorf("dumping glue checkpoint into CSV not unsupported") } diff --git a/br/pkg/lightning/common/retry.go b/br/pkg/lightning/common/retry.go index 074edaebf9ce3..c9962e1b34f7c 100644 --- a/br/pkg/lightning/common/retry.go +++ b/br/pkg/lightning/common/retry.go @@ -102,8 +102,7 @@ func isSingleRetryableError(err error) bool { if nerr.Timeout() { return true } - switch cause := nerr.(type) { - case *net.OpError: + if cause, ok := nerr.(*net.OpError); ok { syscallErr, ok := cause.Unwrap().(*os.SyscallError) if ok { return syscallErr.Err == syscall.ECONNREFUSED || syscallErr.Err == syscall.ECONNRESET diff --git a/br/pkg/lightning/common/util.go b/br/pkg/lightning/common/util.go index 8d8200fc962da..cb833e103af68 100644 --- a/br/pkg/lightning/common/util.go +++ b/br/pkg/lightning/common/util.go @@ -165,7 +165,7 @@ type SQLWithRetry struct { HideQueryLog bool } -func (t SQLWithRetry) perform(_ context.Context, parentLogger log.Logger, purpose string, action func() error) error { +func (SQLWithRetry) perform(_ context.Context, parentLogger log.Logger, purpose string, action func() error) error { return Retry(purpose, parentLogger, action) } diff --git a/br/pkg/lightning/glue/glue.go b/br/pkg/lightning/glue/glue.go index 1212e4092420e..fa29f38e5cc23 100644 --- a/br/pkg/lightning/glue/glue.go +++ b/br/pkg/lightning/glue/glue.go @@ -72,25 +72,25 @@ func (session *sqlConnSession) Execute(ctx context.Context, sql string) ([]sqlex } // CommitTxn implements checkpoints.Session.CommitTxn -func (session *sqlConnSession) CommitTxn(context.Context) error { +func (*sqlConnSession) CommitTxn(context.Context) error { return errors.New("sqlConnSession doesn't have a valid CommitTxn implementation") } // RollbackTxn implements checkpoints.Session.RollbackTxn -func (session *sqlConnSession) RollbackTxn(context.Context) {} +func (*sqlConnSession) RollbackTxn(context.Context) {} // PrepareStmt implements checkpoints.Session.PrepareStmt -func (session *sqlConnSession) PrepareStmt(sql string) (stmtID uint32, paramCount int, fields []*ast.ResultField, err error) { +func (*sqlConnSession) PrepareStmt(_ string) (stmtID uint32, paramCount int, fields []*ast.ResultField, err error) { return 0, 0, nil, errors.New("sqlConnSession doesn't have a valid PrepareStmt implementation") } // ExecutePreparedStmt implements checkpoints.Session.ExecutePreparedStmt -func (session *sqlConnSession) ExecutePreparedStmt(ctx context.Context, stmtID uint32, param []types.Datum) (sqlexec.RecordSet, error) { +func (*sqlConnSession) ExecutePreparedStmt(_ context.Context, _ uint32, _ []types.Datum) (sqlexec.RecordSet, error) { return nil, errors.New("sqlConnSession doesn't have a valid ExecutePreparedStmt implementation") } // DropPreparedStmt implements checkpoints.Session.DropPreparedStmt -func (session *sqlConnSession) DropPreparedStmt(stmtID uint32) error { +func (*sqlConnSession) DropPreparedStmt(_ uint32) error { return errors.New("sqlConnSession doesn't have a valid DropPreparedStmt implementation") } @@ -176,7 +176,7 @@ func (e *ExternalTiDBGlue) GetParser() *parser.Parser { } // GetTables implements Glue.GetTables. -func (e ExternalTiDBGlue) GetTables(context.Context, string) ([]*model.TableInfo, error) { +func (ExternalTiDBGlue) GetTables(context.Context, string) ([]*model.TableInfo, error) { return nil, errors.New("ExternalTiDBGlue doesn't have a valid GetTables function") } @@ -190,12 +190,12 @@ func (e *ExternalTiDBGlue) GetSession(ctx context.Context) (checkpoints.Session, } // OpenCheckpointsDB implements Glue.OpenCheckpointsDB. -func (e *ExternalTiDBGlue) OpenCheckpointsDB(ctx context.Context, cfg *config.Config) (checkpoints.DB, error) { +func (*ExternalTiDBGlue) OpenCheckpointsDB(ctx context.Context, cfg *config.Config) (checkpoints.DB, error) { return checkpoints.OpenCheckpointsDB(ctx, cfg) } // OwnsSQLExecutor implements Glue.OwnsSQLExecutor. -func (e *ExternalTiDBGlue) OwnsSQLExecutor() bool { +func (*ExternalTiDBGlue) OwnsSQLExecutor() bool { return true } @@ -205,7 +205,7 @@ func (e *ExternalTiDBGlue) Close() { } // Record implements Glue.Record. -func (e *ExternalTiDBGlue) Record(string, uint64) { +func (*ExternalTiDBGlue) Record(string, uint64) { } // record key names diff --git a/br/pkg/lightning/importer/chunk_process.go b/br/pkg/lightning/importer/chunk_process.go index 63c7a81d5ac10..9967f8b628ef1 100644 --- a/br/pkg/lightning/importer/chunk_process.go +++ b/br/pkg/lightning/importer/chunk_process.go @@ -574,7 +574,7 @@ func (cr *chunkProcessor) deliverLoop( return } -func (cr *chunkProcessor) maybeSaveCheckpoint( +func (*chunkProcessor) maybeSaveCheckpoint( rc *Controller, t *TableImporter, engineID int32, diff --git a/br/pkg/lightning/importer/get_pre_info.go b/br/pkg/lightning/importer/get_pre_info.go index 80cb6f58c3058..d8d1a9f743b8a 100644 --- a/br/pkg/lightning/importer/get_pre_info.go +++ b/br/pkg/lightning/importer/get_pre_info.go @@ -568,7 +568,7 @@ func (p *PreImportInfoGetterImpl) EstimateSourceDataSize(ctx context.Context, op if tableInfo.Core.TiFlashReplica != nil && tableInfo.Core.TiFlashReplica.Available { tiflashSize += tableSize * int64(tableInfo.Core.TiFlashReplica.Count) } - tableCount += 1 + tableCount++ } } } @@ -665,8 +665,8 @@ func (p *PreImportInfoGetterImpl) sampleDataFromTable( initializedColumns := false var ( columnPermutation []int - kvSize uint64 = 0 - rowSize uint64 = 0 + kvSize uint64 + rowSize uint64 extendVals []types.Datum ) rowCount := 0 diff --git a/br/pkg/lightning/importer/meta_manager.go b/br/pkg/lightning/importer/meta_manager.go index cc575c1654a70..fe128638a1318 100644 --- a/br/pkg/lightning/importer/meta_manager.go +++ b/br/pkg/lightning/importer/meta_manager.go @@ -835,7 +835,7 @@ func (m *dbTaskMetaMgr) CanPauseSchedulerByKeyRange() bool { // CheckAndFinishRestore check task meta and return whether to switch cluster to normal state and clean up the metadata // Return values: first boolean indicates whether switch back tidb cluster to normal state (restore schedulers, switch tikv to normal) // the second boolean indicates whether to clean up the metadata in tidb -func (m *dbTaskMetaMgr) CheckAndFinishRestore(ctx context.Context, finished bool) (bool, bool, error) { +func (m *dbTaskMetaMgr) CheckAndFinishRestore(ctx context.Context, finished bool) (switchBack bool, allFinished bool, err error) { conn, err := m.session.Conn(ctx) if err != nil { return false, false, errors.Trace(err) @@ -851,8 +851,8 @@ func (m *dbTaskMetaMgr) CheckAndFinishRestore(ctx context.Context, finished bool return false, false, errors.Annotate(err, "enable pessimistic transaction failed") } - switchBack := true - allFinished := finished + switchBack = true + allFinished = finished err = exec.Transact(ctx, "check and finish schedulers", func(ctx context.Context, tx *sql.Tx) error { rows, err := tx.QueryContext(ctx, fmt.Sprintf("SELECT task_id, status, state from %s FOR UPDATE", m.tableName)) if err != nil { @@ -996,84 +996,86 @@ func MaybeCleanupAllMetas( type noopMetaMgrBuilder struct{} -func (b noopMetaMgrBuilder) Init(ctx context.Context) error { +func (noopMetaMgrBuilder) Init(_ context.Context) error { return nil } -func (b noopMetaMgrBuilder) TaskMetaMgr(pd *pdutil.PdController) taskMetaMgr { +func (noopMetaMgrBuilder) TaskMetaMgr(_ *pdutil.PdController) taskMetaMgr { return noopTaskMetaMgr{} } -func (b noopMetaMgrBuilder) TableMetaMgr(tr *TableImporter) tableMetaMgr { +func (noopMetaMgrBuilder) TableMetaMgr(_ *TableImporter) tableMetaMgr { return noopTableMetaMgr{} } type noopTaskMetaMgr struct{} -func (m noopTaskMetaMgr) InitTask(ctx context.Context, tikvSourceSize, tiflashSourceSize int64) error { +func (noopTaskMetaMgr) InitTask(_ context.Context, _, _ int64) error { return nil } -func (m noopTaskMetaMgr) CheckTasksExclusively(ctx context.Context, action func(tasks []taskMeta) ([]taskMeta, error)) error { +func (noopTaskMetaMgr) CheckTasksExclusively(_ context.Context, _ func(tasks []taskMeta) ([]taskMeta, error)) error { return nil } -func (m noopTaskMetaMgr) CheckAndPausePdSchedulers(ctx context.Context) (pdutil.UndoFunc, error) { +func (noopTaskMetaMgr) CheckAndPausePdSchedulers(_ context.Context) (pdutil.UndoFunc, error) { return func(ctx context.Context) error { return nil }, nil } -func (m noopTaskMetaMgr) CanPauseSchedulerByKeyRange() bool { +func (noopTaskMetaMgr) CanPauseSchedulerByKeyRange() bool { return false } -func (m noopTaskMetaMgr) CheckTaskExist(ctx context.Context) (bool, error) { +func (noopTaskMetaMgr) CheckTaskExist(_ context.Context) (bool, error) { return true, nil } -func (m noopTaskMetaMgr) CheckAndFinishRestore(context.Context, bool) (bool, bool, error) { +func (noopTaskMetaMgr) CheckAndFinishRestore(context.Context, bool) ( + needSwitchBack bool, needCleanup bool, err error) { return false, true, nil } -func (m noopTaskMetaMgr) Cleanup(ctx context.Context) error { +func (noopTaskMetaMgr) Cleanup(_ context.Context) error { return nil } -func (m noopTaskMetaMgr) CleanupTask(ctx context.Context) error { +func (noopTaskMetaMgr) CleanupTask(_ context.Context) error { return nil } -func (m noopTaskMetaMgr) CleanupAllMetas(ctx context.Context) error { +func (noopTaskMetaMgr) CleanupAllMetas(_ context.Context) error { return nil } -func (m noopTaskMetaMgr) Close() { +func (noopTaskMetaMgr) Close() { } type noopTableMetaMgr struct{} -func (m noopTableMetaMgr) InitTableMeta(ctx context.Context) error { +func (noopTableMetaMgr) InitTableMeta(_ context.Context) error { return nil } -func (m noopTableMetaMgr) AllocTableRowIDs(ctx context.Context, rawRowIDMax int64) (*verify.KVChecksum, int64, error) { +func (noopTableMetaMgr) AllocTableRowIDs(_ context.Context, _ int64) (*verify.KVChecksum, int64, error) { return nil, 0, nil } -func (m noopTableMetaMgr) UpdateTableStatus(ctx context.Context, status metaStatus) error { +func (noopTableMetaMgr) UpdateTableStatus(_ context.Context, _ metaStatus) error { return nil } -func (m noopTableMetaMgr) UpdateTableBaseChecksum(ctx context.Context, checksum *verify.KVChecksum) error { +func (noopTableMetaMgr) UpdateTableBaseChecksum(_ context.Context, _ *verify.KVChecksum) error { return nil } -func (m noopTableMetaMgr) CheckAndUpdateLocalChecksum(ctx context.Context, checksum *verify.KVChecksum, hasLocalDupes bool) (bool, bool, *verify.KVChecksum, error) { +func (noopTableMetaMgr) CheckAndUpdateLocalChecksum(_ context.Context, _ *verify.KVChecksum, _ bool) ( + otherHasDupe bool, needRemoteDupe bool, baseTotalChecksum *verify.KVChecksum, err error) { return false, true, &verify.KVChecksum{}, nil } -func (m noopTableMetaMgr) FinishTable(ctx context.Context) error { +func (noopTableMetaMgr) FinishTable(_ context.Context) error { return nil } @@ -1081,7 +1083,7 @@ type singleMgrBuilder struct { taskID int64 } -func (b singleMgrBuilder) Init(context.Context) error { +func (singleMgrBuilder) Init(context.Context) error { return nil } @@ -1092,7 +1094,7 @@ func (b singleMgrBuilder) TaskMetaMgr(pd *pdutil.PdController) taskMetaMgr { } } -func (b singleMgrBuilder) TableMetaMgr(tr *TableImporter) tableMetaMgr { +func (singleMgrBuilder) TableMetaMgr(_ *TableImporter) tableMetaMgr { return noopTableMetaMgr{} } @@ -1106,14 +1108,14 @@ type singleTaskMetaMgr struct { tiflashAvail uint64 } -func (m *singleTaskMetaMgr) InitTask(ctx context.Context, tikvSourceSize, tiflashSourceSize int64) error { +func (m *singleTaskMetaMgr) InitTask(_ context.Context, tikvSourceSize, tiflashSourceSize int64) error { m.tikvSourceBytes = uint64(tikvSourceSize) m.tiflashSourceBytes = uint64(tiflashSourceSize) m.initialized = true return nil } -func (m *singleTaskMetaMgr) CheckTasksExclusively(ctx context.Context, action func(tasks []taskMeta) ([]taskMeta, error)) error { +func (m *singleTaskMetaMgr) CheckTasksExclusively(_ context.Context, action func(tasks []taskMeta) ([]taskMeta, error)) error { newTasks, err := action([]taskMeta{ { taskID: m.taskID, @@ -1143,25 +1145,25 @@ func (m *singleTaskMetaMgr) CanPauseSchedulerByKeyRange() bool { return m.pd.CanPauseSchedulerByKeyRange() } -func (m *singleTaskMetaMgr) CheckTaskExist(ctx context.Context) (bool, error) { +func (m *singleTaskMetaMgr) CheckTaskExist(_ context.Context) (bool, error) { return m.initialized, nil } -func (m *singleTaskMetaMgr) CheckAndFinishRestore(context.Context, bool) (shouldSwitchBack bool, shouldCleanupMeta bool, err error) { +func (*singleTaskMetaMgr) CheckAndFinishRestore(context.Context, bool) (shouldSwitchBack bool, shouldCleanupMeta bool, err error) { return true, true, nil } -func (m *singleTaskMetaMgr) Cleanup(ctx context.Context) error { +func (*singleTaskMetaMgr) Cleanup(_ context.Context) error { return nil } -func (m *singleTaskMetaMgr) CleanupTask(ctx context.Context) error { +func (*singleTaskMetaMgr) CleanupTask(_ context.Context) error { return nil } -func (m *singleTaskMetaMgr) CleanupAllMetas(ctx context.Context) error { +func (*singleTaskMetaMgr) CleanupAllMetas(_ context.Context) error { return nil } -func (m *singleTaskMetaMgr) Close() { +func (*singleTaskMetaMgr) Close() { } diff --git a/br/pkg/lightning/importer/mock/mock.go b/br/pkg/lightning/importer/mock/mock.go index 9d521e9e71853..6b0809729e1ef 100644 --- a/br/pkg/lightning/importer/mock/mock.go +++ b/br/pkg/lightning/importer/mock/mock.go @@ -214,7 +214,7 @@ func (t *TargetInfo) SetTableInfo(schemaName string, tableName string, tblInfo * // FetchRemoteTableModels fetches the table structures from the remote target. // It implements the TargetInfoGetter interface. -func (t *TargetInfo) FetchRemoteTableModels(ctx context.Context, schemaName string) ([]*model.TableInfo, error) { +func (t *TargetInfo) FetchRemoteTableModels(_ context.Context, schemaName string) ([]*model.TableInfo, error) { resultInfos := []*model.TableInfo{} tblMap, ok := t.dbTblInfoMap[schemaName] if !ok { @@ -229,7 +229,7 @@ func (t *TargetInfo) FetchRemoteTableModels(ctx context.Context, schemaName stri // GetTargetSysVariablesForImport gets some important systam variables for importing on the target. // It implements the TargetInfoGetter interface. -func (t *TargetInfo) GetTargetSysVariablesForImport(ctx context.Context, _ ...ropts.GetPreInfoOption) map[string]string { +func (t *TargetInfo) GetTargetSysVariablesForImport(_ context.Context, _ ...ropts.GetPreInfoOption) map[string]string { result := make(map[string]string) for k, v := range t.sysVarMap { result[k] = v @@ -239,7 +239,7 @@ func (t *TargetInfo) GetTargetSysVariablesForImport(ctx context.Context, _ ...ro // GetReplicationConfig gets the replication config on the target. // It implements the TargetInfoGetter interface. -func (t *TargetInfo) GetReplicationConfig(ctx context.Context) (*pdtypes.ReplicationConfig, error) { +func (t *TargetInfo) GetReplicationConfig(_ context.Context) (*pdtypes.ReplicationConfig, error) { replCount := t.MaxReplicasPerRegion if replCount <= 0 { replCount = 1 @@ -251,7 +251,7 @@ func (t *TargetInfo) GetReplicationConfig(ctx context.Context) (*pdtypes.Replica // GetStorageInfo gets the storage information on the target. // It implements the TargetInfoGetter interface. -func (t *TargetInfo) GetStorageInfo(ctx context.Context) (*pdtypes.StoresInfo, error) { +func (t *TargetInfo) GetStorageInfo(_ context.Context) (*pdtypes.StoresInfo, error) { resultStoreInfos := make([]*pdtypes.StoreInfo, len(t.StorageInfos)) for i, storeInfo := range t.StorageInfos { resultStoreInfos[i] = &pdtypes.StoreInfo{ @@ -277,7 +277,7 @@ func (t *TargetInfo) GetStorageInfo(ctx context.Context) (*pdtypes.StoresInfo, e // GetEmptyRegionsInfo gets the region information of all the empty regions on the target. // It implements the TargetInfoGetter interface. -func (t *TargetInfo) GetEmptyRegionsInfo(ctx context.Context) (*pdtypes.RegionsInfo, error) { +func (t *TargetInfo) GetEmptyRegionsInfo(_ context.Context) (*pdtypes.RegionsInfo, error) { totalEmptyRegions := []pdtypes.RegionInfo{} totalEmptyRegionCount := 0 for storeID, storeEmptyRegionCount := range t.EmptyRegionCountMap { @@ -304,7 +304,7 @@ func (t *TargetInfo) GetEmptyRegionsInfo(ctx context.Context) (*pdtypes.RegionsI // IsTableEmpty checks whether the specified table on the target DB contains data or not. // It implements the TargetInfoGetter interface. -func (t *TargetInfo) IsTableEmpty(ctx context.Context, schemaName string, tableName string) (*bool, error) { +func (t *TargetInfo) IsTableEmpty(_ context.Context, schemaName string, tableName string) (*bool, error) { var result bool tblInfoMap, ok := t.dbTblInfoMap[schemaName] if !ok { @@ -316,12 +316,12 @@ func (t *TargetInfo) IsTableEmpty(ctx context.Context, schemaName string, tableN result = true return &result, nil } - result = (tblInfo.RowCount == 0) + result = tblInfo.RowCount == 0 return &result, nil } // CheckVersionRequirements performs the check whether the target satisfies the version requirements. // It implements the TargetInfoGetter interface. -func (t *TargetInfo) CheckVersionRequirements(ctx context.Context) error { +func (*TargetInfo) CheckVersionRequirements(_ context.Context) error { return nil } diff --git a/br/pkg/lightning/importer/precheck_impl.go b/br/pkg/lightning/importer/precheck_impl.go index 43f4b71fd619f..93a9ece0b84a5 100644 --- a/br/pkg/lightning/importer/precheck_impl.go +++ b/br/pkg/lightning/importer/precheck_impl.go @@ -15,7 +15,6 @@ package importer import ( "context" - "encoding/json" "fmt" "path/filepath" "reflect" @@ -64,7 +63,7 @@ func NewClusterResourceCheckItem(preInfoGetter PreImportInfoGetter) PrecheckItem } // GetCheckItemID implements PrecheckItem.GetCheckItemID. -func (ci *clusterResourceCheckItem) GetCheckItemID() CheckItemID { +func (*clusterResourceCheckItem) GetCheckItemID() CheckItemID { return CheckTargetClusterSize } @@ -199,7 +198,7 @@ func NewClusterVersionCheckItem(preInfoGetter PreImportInfoGetter, dbMetas []*my } // GetCheckItemID implements PrecheckItem.GetCheckItemID. -func (ci *clusterVersionCheckItem) GetCheckItemID() CheckItemID { +func (*clusterVersionCheckItem) GetCheckItemID() CheckItemID { return CheckTargetClusterVersion } @@ -234,7 +233,7 @@ func NewEmptyRegionCheckItem(preInfoGetter PreImportInfoGetter, dbMetas []*mydum } // GetCheckItemID implements PrecheckItem.GetCheckItemID. -func (ci *emptyRegionCheckItem) GetCheckItemID() CheckItemID { +func (*emptyRegionCheckItem) GetCheckItemID() CheckItemID { return CheckTargetClusterEmptyRegion } @@ -331,7 +330,7 @@ func NewRegionDistributionCheckItem(preInfoGetter PreImportInfoGetter, dbMetas [ } // GetCheckItemID implements PrecheckItem.GetCheckItemID. -func (ci *regionDistributionCheckItem) GetCheckItemID() CheckItemID { +func (*regionDistributionCheckItem) GetCheckItemID() CheckItemID { return CheckTargetClusterRegionDist } @@ -409,7 +408,7 @@ func NewStoragePermissionCheckItem(cfg *config.Config) PrecheckItem { } // GetCheckItemID implements PrecheckItem.GetCheckItemID. -func (ci *storagePermissionCheckItem) GetCheckItemID() CheckItemID { +func (*storagePermissionCheckItem) GetCheckItemID() CheckItemID { return CheckSourcePermission } @@ -453,12 +452,12 @@ func NewLargeFileCheckItem(cfg *config.Config, dbMetas []*mydump.MDDatabaseMeta) } // GetCheckItemID implements PrecheckItem.GetCheckItemID. -func (ci *largeFileCheckItem) GetCheckItemID() CheckItemID { +func (*largeFileCheckItem) GetCheckItemID() CheckItemID { return CheckLargeDataFile } // Check implements PrecheckItem.Check. -func (ci *largeFileCheckItem) Check(ctx context.Context) (*CheckResult, error) { +func (ci *largeFileCheckItem) Check(_ context.Context) (*CheckResult, error) { theResult := &CheckResult{ Item: ci.GetCheckItemID(), Severity: Warn, @@ -495,12 +494,12 @@ func NewLocalDiskPlacementCheckItem(cfg *config.Config) PrecheckItem { } // GetCheckItemID implements PrecheckItem.GetCheckItemID. -func (ci *localDiskPlacementCheckItem) GetCheckItemID() CheckItemID { +func (*localDiskPlacementCheckItem) GetCheckItemID() CheckItemID { return CheckLocalDiskPlacement } // Check implements PrecheckItem.Check. -func (ci *localDiskPlacementCheckItem) Check(ctx context.Context) (*CheckResult, error) { +func (ci *localDiskPlacementCheckItem) Check(_ context.Context) (*CheckResult, error) { theResult := &CheckResult{ Item: ci.GetCheckItemID(), Severity: Warn, @@ -536,7 +535,7 @@ func NewLocalTempKVDirCheckItem(cfg *config.Config, preInfoGetter PreImportInfoG } // GetCheckItemID implements PrecheckItem.GetCheckItemID. -func (ci *localTempKVDirCheckItem) GetCheckItemID() CheckItemID { +func (*localTempKVDirCheckItem) GetCheckItemID() CheckItemID { return CheckLocalTempKVDir } @@ -618,7 +617,7 @@ func NewCheckpointCheckItem(cfg *config.Config, preInfoGetter PreImportInfoGette } // GetCheckItemID implements PrecheckItem.GetCheckItemID. -func (ci *checkpointCheckItem) GetCheckItemID() CheckItemID { +func (*checkpointCheckItem) GetCheckItemID() CheckItemID { return CheckCheckpoints } @@ -761,7 +760,7 @@ func NewCDCPITRCheckItem(cfg *config.Config) PrecheckItem { } // GetCheckItemID implements PrecheckItem interface. -func (ci *CDCPITRCheckItem) GetCheckItemID() CheckItemID { +func (*CDCPITRCheckItem) GetCheckItemID() CheckItemID { return CheckTargetUsingCDCPITR } @@ -849,24 +848,6 @@ type onlyState struct { State string `json:"state"` } -func isActiveCDCChangefeed(jsonBytes []byte) bool { - s := onlyState{} - err := json.Unmarshal(jsonBytes, &s) - if err != nil { - // maybe a compatible issue, skip this key - log.L().Error("unmarshal etcd value failed when check CDC changefeed, will skip this key", - zap.ByteString("value", jsonBytes), - zap.Error(err)) - return false - } - switch s.State { - case "normal", "stopped", "error": - return true - default: - return false - } -} - type schemaCheckItem struct { cfg *config.Config preInfoGetter PreImportInfoGetter @@ -885,7 +866,7 @@ func NewSchemaCheckItem(cfg *config.Config, preInfoGetter PreImportInfoGetter, d } // GetCheckItemID implements PrecheckItem interface. -func (ci *schemaCheckItem) GetCheckItemID() CheckItemID { +func (*schemaCheckItem) GetCheckItemID() CheckItemID { return CheckSourceSchemaValid } @@ -1123,7 +1104,7 @@ func NewCSVHeaderCheckItem(cfg *config.Config, preInfoGetter PreImportInfoGetter } // GetCheckItemID implements PrecheckItem interface. -func (ci *csvHeaderCheckItem) GetCheckItemID() CheckItemID { +func (*csvHeaderCheckItem) GetCheckItemID() CheckItemID { return CheckCSVHeader } @@ -1341,7 +1322,7 @@ func NewTableEmptyCheckItem(cfg *config.Config, preInfoGetter PreImportInfoGette } // GetCheckItemID implements PrecheckItem interface -func (ci *tableEmptyCheckItem) GetCheckItemID() CheckItemID { +func (*tableEmptyCheckItem) GetCheckItemID() CheckItemID { return CheckTargetTableEmpty } diff --git a/br/pkg/lightning/importer/table_import.go b/br/pkg/lightning/importer/table_import.go index 793816d709971..0fc9f15283ebd 100644 --- a/br/pkg/lightning/importer/table_import.go +++ b/br/pkg/lightning/importer/table_import.go @@ -269,7 +269,7 @@ func (tr *TableImporter) populateChunks(ctx context.Context, rc *Controller, cp } // RebaseChunkRowIDs rebase the row id of the chunks. -func (tr *TableImporter) RebaseChunkRowIDs(cp *checkpoints.TableCheckpoint, rowIDBase int64) { +func (*TableImporter) RebaseChunkRowIDs(cp *checkpoints.TableCheckpoint, rowIDBase int64) { if rowIDBase == 0 { return } @@ -1297,7 +1297,7 @@ func (tr *TableImporter) addIndexes(ctx context.Context, db *sql.DB) (retErr err return nil } -func (tr *TableImporter) executeDDL( +func (*TableImporter) executeDDL( ctx context.Context, db *sql.DB, ddl string, diff --git a/br/pkg/lightning/importer/tidb.go b/br/pkg/lightning/importer/tidb.go index 2679f3cee05b5..3249e25be2e97 100644 --- a/br/pkg/lightning/importer/tidb.go +++ b/br/pkg/lightning/importer/tidb.go @@ -103,7 +103,7 @@ func DBFromConfig(ctx context.Context, dsn config.DBStore) (*sql.DB, error) { } // NewTiDBManager creates a new TiDB manager. -func NewTiDBManager(ctx context.Context, dsn config.DBStore, tls *common.TLS) (*TiDBManager, error) { +func NewTiDBManager(ctx context.Context, dsn config.DBStore, _ *common.TLS) (*TiDBManager, error) { db, err := DBFromConfig(ctx, dsn) if err != nil { return nil, errors.Trace(err) diff --git a/build/nogo_config.json b/build/nogo_config.json index 13a7712f14234..16f7a293e3f02 100644 --- a/build/nogo_config.json +++ b/build/nogo_config.json @@ -358,8 +358,7 @@ ".*_generated\\.go$": "ignore generated code" }, "only_files": { - "br/pkg/lightning/mydump/": "br/pkg/lightning/mydump/", - "br/pkg/lightning/importer/opts": "br/pkg/lightning/importer/opts", + "br/pkg/lightning/": "br/pkg/lightning/", "executor/aggregate.go": "executor/aggregate.go", "types/json_binary_functions.go": "types/json_binary_functions.go", "types/json_binary_test.go": "types/json_binary_test.go", From 3625fc95bd0dfc99ade0912a9d17cf38b5d3ff97 Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Thu, 6 Apr 2023 13:00:58 +0800 Subject: [PATCH 09/12] *: upgrade go 1.20.3 (#42820) ref pingcap/tidb#42824 --- WORKSPACE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/WORKSPACE b/WORKSPACE index ba96f0ae22630..c9eb43995fae5 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -47,7 +47,7 @@ go_download_sdk( "https://mirrors.aliyun.com/golang/{}", "https://dl.google.com/go/{}", ], - version = "1.20.2", + version = "1.20.3", ) go_register_toolchains( From e942ab61488f2b84b40326bbc1422a42920e3f8d Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Thu, 6 Apr 2023 15:46:58 +0800 Subject: [PATCH 10/12] kazel: support auto-config shard_count (#42751) close pingcap/tidb#42827 --- cmd/ddltest/BUILD.bazel | 1 + config/BUILD.bazel | 2 +- ddl/label/BUILD.bazel | 1 + ddl/placement/BUILD.bazel | 1 + ddl/schematracker/BUILD.bazel | 1 + ddl/tiflashtest/BUILD.bazel | 2 +- distsql/BUILD.bazel | 2 +- disttask/framework/dispatcher/BUILD.bazel | 2 +- disttask/framework/scheduler/BUILD.bazel | 1 + disttask/loaddata/BUILD.bazel | 1 + domain/BUILD.bazel | 2 +- domain/infosync/BUILD.bazel | 2 +- executor/aggfuncs/BUILD.bazel | 2 +- executor/asyncloaddata/BUILD.bazel | 2 +- executor/autoidtest/BUILD.bazel | 2 +- executor/fktest/BUILD.bazel | 2 +- executor/importer/BUILD.bazel | 1 + executor/loaddatatest/BUILD.bazel | 2 +- executor/seqtest/BUILD.bazel | 2 +- executor/simpletest/BUILD.bazel | 2 +- expression/aggregation/BUILD.bazel | 1 + expression/casetest/BUILD.bazel | 1 - extension/BUILD.bazel | 1 + infoschema/perfschema/BUILD.bazel | 1 + kv/BUILD.bazel | 1 + meta/BUILD.bazel | 1 + meta/autoid/BUILD.bazel | 1 + metrics/BUILD.bazel | 1 + owner/BUILD.bazel | 1 + parser/BUILD.bazel | 1 + parser/ast/BUILD.bazel | 1 + parser/auth/BUILD.bazel | 1 + parser/charset/BUILD.bazel | 1 + parser/format/BUILD.bazel | 1 + parser/model/BUILD.bazel | 1 + parser/mysql/BUILD.bazel | 1 + parser/terror/BUILD.bazel | 1 + parser/types/BUILD.bazel | 2 +- planner/cascades/BUILD.bazel | 2 +- planner/funcdep/BUILD.bazel | 2 +- planner/memo/BUILD.bazel | 1 + plugin/BUILD.bazel | 1 + resourcemanager/pool/spool/BUILD.bazel | 2 +- sessionctx/binloginfo/BUILD.bazel | 1 + sessionctx/sessionstates/BUILD.bazel | 2 +- sessionctx/stmtctx/BUILD.bazel | 1 + sessiontxn/BUILD.bazel | 2 +- sessiontxn/isolation/BUILD.bazel | 2 +- sessiontxn/staleread/BUILD.bazel | 1 + statistics/handle/BUILD.bazel | 2 +- statistics/handle/updatetest/BUILD.bazel | 2 +- store/copr/BUILD.bazel | 2 +- store/driver/BUILD.bazel | 1 + store/driver/txn/BUILD.bazel | 1 + store/gcworker/BUILD.bazel | 1 + store/helper/BUILD.bazel | 1 + store/mockstore/mockcopr/BUILD.bazel | 1 + store/mockstore/unistore/BUILD.bazel | 1 + .../mockstore/unistore/cophandler/BUILD.bazel | 1 + .../mockstore/unistore/lockstore/BUILD.bazel | 1 + store/mockstore/unistore/tikv/BUILD.bazel | 1 + structure/BUILD.bazel | 1 + table/BUILD.bazel | 2 +- table/temptable/BUILD.bazel | 1 + tablecodec/BUILD.bazel | 2 +- telemetry/BUILD.bazel | 2 +- tidb-binlog/node/BUILD.bazel | 1 + tools/tazel/BUILD.bazel | 2 + tools/tazel/ast.go | 74 +++++++++++++++++++ tools/tazel/main.go | 28 +++++-- tools/tazel/util.go | 8 ++ ttl/cache/BUILD.bazel | 2 +- ttl/client/BUILD.bazel | 1 - ttl/metrics/BUILD.bazel | 1 - ttl/session/BUILD.bazel | 2 +- 75 files changed, 173 insertions(+), 38 deletions(-) create mode 100644 tools/tazel/ast.go diff --git a/cmd/ddltest/BUILD.bazel b/cmd/ddltest/BUILD.bazel index 39ac3c9bdb7b4..07882d4e3c403 100644 --- a/cmd/ddltest/BUILD.bazel +++ b/cmd/ddltest/BUILD.bazel @@ -12,6 +12,7 @@ go_test( ], flaky = True, race = "on", + shard_count = 6, deps = [ "//config", "//domain", diff --git a/config/BUILD.bazel b/config/BUILD.bazel index e3034c60caff6..db2e165331bca 100644 --- a/config/BUILD.bazel +++ b/config/BUILD.bazel @@ -37,7 +37,7 @@ go_test( data = glob(["**"]), embed = [":config"], flaky = True, - shard_count = 2, + shard_count = 23, deps = [ "//testkit/testsetup", "//util/logutil", diff --git a/ddl/label/BUILD.bazel b/ddl/label/BUILD.bazel index db9c3f836e954..e691b79edb004 100644 --- a/ddl/label/BUILD.bazel +++ b/ddl/label/BUILD.bazel @@ -28,6 +28,7 @@ go_test( ], embed = [":label"], flaky = True, + shard_count = 8, deps = [ "//parser/ast", "//testkit/testsetup", diff --git a/ddl/placement/BUILD.bazel b/ddl/placement/BUILD.bazel index 0f695612ca9d8..761250c9c9064 100644 --- a/ddl/placement/BUILD.bazel +++ b/ddl/placement/BUILD.bazel @@ -36,6 +36,7 @@ go_test( embed = [":placement"], flaky = True, race = "on", + shard_count = 24, deps = [ "//kv", "//meta", diff --git a/ddl/schematracker/BUILD.bazel b/ddl/schematracker/BUILD.bazel index 7da4f6b6996b0..1b4c75cfea9b3 100644 --- a/ddl/schematracker/BUILD.bazel +++ b/ddl/schematracker/BUILD.bazel @@ -44,6 +44,7 @@ go_test( ], embed = [":schematracker"], flaky = True, + shard_count = 14, deps = [ "//executor", "//infoschema", diff --git a/ddl/tiflashtest/BUILD.bazel b/ddl/tiflashtest/BUILD.bazel index 4e24ea074d7f4..b1bae2dd615d8 100644 --- a/ddl/tiflashtest/BUILD.bazel +++ b/ddl/tiflashtest/BUILD.bazel @@ -8,7 +8,7 @@ go_test( "main_test.go", ], flaky = True, - shard_count = 30, + shard_count = 32, deps = [ "//config", "//ddl", diff --git a/distsql/BUILD.bazel b/distsql/BUILD.bazel index 31c1f3e8ad95e..3485b21011984 100644 --- a/distsql/BUILD.bazel +++ b/distsql/BUILD.bazel @@ -66,7 +66,7 @@ go_test( embed = [":distsql"], flaky = True, race = "on", - shard_count = 2, + shard_count = 23, deps = [ "//kv", "//parser/charset", diff --git a/disttask/framework/dispatcher/BUILD.bazel b/disttask/framework/dispatcher/BUILD.bazel index 804c62db8b6ed..c2b44f291e868 100644 --- a/disttask/framework/dispatcher/BUILD.bazel +++ b/disttask/framework/dispatcher/BUILD.bazel @@ -36,7 +36,7 @@ go_test( embed = [":dispatcher"], flaky = True, race = "on", - shard_count = 6, + shard_count = 5, deps = [ "//disttask/framework/proto", "//disttask/framework/storage", diff --git a/disttask/framework/scheduler/BUILD.bazel b/disttask/framework/scheduler/BUILD.bazel index 36f3b0dc6d47a..3309750e3a644 100644 --- a/disttask/framework/scheduler/BUILD.bazel +++ b/disttask/framework/scheduler/BUILD.bazel @@ -32,6 +32,7 @@ go_test( ], embed = [":scheduler"], flaky = True, + shard_count = 8, deps = [ "//disttask/framework/proto", "//resourcemanager/pool/spool", diff --git a/disttask/loaddata/BUILD.bazel b/disttask/loaddata/BUILD.bazel index b2e342c22207a..4747a24879863 100644 --- a/disttask/loaddata/BUILD.bazel +++ b/disttask/loaddata/BUILD.bazel @@ -37,6 +37,7 @@ go_test( data = glob(["testdata/**"]), embed = [":loaddata"], flaky = True, + shard_count = 4, deps = [ "//br/pkg/lightning/config", "//br/pkg/lightning/mydump", diff --git a/domain/BUILD.bazel b/domain/BUILD.bazel index b0ef87f4ab7e1..548a6f382f612 100644 --- a/domain/BUILD.bazel +++ b/domain/BUILD.bazel @@ -116,7 +116,7 @@ go_test( ], embed = [":domain"], flaky = True, - shard_count = 25, + shard_count = 22, deps = [ "//config", "//ddl", diff --git a/domain/infosync/BUILD.bazel b/domain/infosync/BUILD.bazel index f050873ee128f..60039abcd6a7a 100644 --- a/domain/infosync/BUILD.bazel +++ b/domain/infosync/BUILD.bazel @@ -60,7 +60,7 @@ go_test( srcs = ["info_test.go"], embed = [":infosync"], flaky = True, - shard_count = 50, + shard_count = 3, deps = [ "//ddl/placement", "//ddl/util", diff --git a/executor/aggfuncs/BUILD.bazel b/executor/aggfuncs/BUILD.bazel index e3958fae6a0e1..5b34257948f23 100644 --- a/executor/aggfuncs/BUILD.bazel +++ b/executor/aggfuncs/BUILD.bazel @@ -89,7 +89,7 @@ go_test( embed = [":aggfuncs"], flaky = True, race = "on", - shard_count = 50, + shard_count = 48, deps = [ "//expression", "//expression/aggregation", diff --git a/executor/asyncloaddata/BUILD.bazel b/executor/asyncloaddata/BUILD.bazel index f9394b981f43b..a084d50343ea4 100644 --- a/executor/asyncloaddata/BUILD.bazel +++ b/executor/asyncloaddata/BUILD.bazel @@ -35,7 +35,7 @@ go_test( embed = [":asyncloaddata"], flaky = True, race = "on", - shard_count = 6, + shard_count = 7, deps = [ "//br/pkg/lightning/config", "//executor", diff --git a/executor/autoidtest/BUILD.bazel b/executor/autoidtest/BUILD.bazel index aea16482d1dd7..e6d04505022cb 100644 --- a/executor/autoidtest/BUILD.bazel +++ b/executor/autoidtest/BUILD.bazel @@ -9,7 +9,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 11, + shard_count = 10, deps = [ "//autoid_service", "//config", diff --git a/executor/fktest/BUILD.bazel b/executor/fktest/BUILD.bazel index bb7f6d363db7c..9cdadb056be9e 100644 --- a/executor/fktest/BUILD.bazel +++ b/executor/fktest/BUILD.bazel @@ -8,7 +8,7 @@ go_test( "main_test.go", ], flaky = True, - shard_count = 30, + shard_count = 38, deps = [ "//config", "//executor", diff --git a/executor/importer/BUILD.bazel b/executor/importer/BUILD.bazel index 0d63c585f4ded..2e8d59d303be8 100644 --- a/executor/importer/BUILD.bazel +++ b/executor/importer/BUILD.bazel @@ -39,6 +39,7 @@ go_test( embed = [":importer"], flaky = True, race = "on", + shard_count = 4, deps = [ "//br/pkg/errors", "//br/pkg/lightning/config", diff --git a/executor/loaddatatest/BUILD.bazel b/executor/loaddatatest/BUILD.bazel index 3e8f9f151c7f5..0f2a18a7e83f6 100644 --- a/executor/loaddatatest/BUILD.bazel +++ b/executor/loaddatatest/BUILD.bazel @@ -9,7 +9,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 11, + shard_count = 10, deps = [ "//br/pkg/lightning/mydump", "//config", diff --git a/executor/seqtest/BUILD.bazel b/executor/seqtest/BUILD.bazel index efd24f0e2f473..606b3e4abf74b 100644 --- a/executor/seqtest/BUILD.bazel +++ b/executor/seqtest/BUILD.bazel @@ -10,7 +10,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 45, + shard_count = 40, deps = [ "//config", "//ddl/testutil", diff --git a/executor/simpletest/BUILD.bazel b/executor/simpletest/BUILD.bazel index 3188479359a87..2d0d1e46c8cb0 100644 --- a/executor/simpletest/BUILD.bazel +++ b/executor/simpletest/BUILD.bazel @@ -10,7 +10,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 50, + shard_count = 33, deps = [ "//config", "//parser/auth", diff --git a/expression/aggregation/BUILD.bazel b/expression/aggregation/BUILD.bazel index 139cc56284b29..b66495b0c2217 100644 --- a/expression/aggregation/BUILD.bazel +++ b/expression/aggregation/BUILD.bazel @@ -58,6 +58,7 @@ go_test( ], embed = [":aggregation"], flaky = True, + shard_count = 14, deps = [ "//expression", "//kv", diff --git a/expression/casetest/BUILD.bazel b/expression/casetest/BUILD.bazel index e52b9489d1fae..ee76a6ea2fdd1 100644 --- a/expression/casetest/BUILD.bazel +++ b/expression/casetest/BUILD.bazel @@ -10,7 +10,6 @@ go_test( ], data = glob(["testdata/**"]), flaky = True, - shard_count = 2, deps = [ "//config", "//testkit", diff --git a/extension/BUILD.bazel b/extension/BUILD.bazel index f180dcf7a9955..578b3eca306a1 100644 --- a/extension/BUILD.bazel +++ b/extension/BUILD.bazel @@ -39,6 +39,7 @@ go_test( ], embed = [":extension"], flaky = True, + shard_count = 14, deps = [ "//expression", "//parser/ast", diff --git a/infoschema/perfschema/BUILD.bazel b/infoschema/perfschema/BUILD.bazel index defe5bdaf68d8..9cc533bf1dab5 100644 --- a/infoschema/perfschema/BUILD.bazel +++ b/infoschema/perfschema/BUILD.bazel @@ -42,6 +42,7 @@ go_test( data = glob(["testdata/**"]), embed = [":perfschema"], flaky = True, + shard_count = 4, deps = [ "//kv", "//parser/terror", diff --git a/kv/BUILD.bazel b/kv/BUILD.bazel index fbd8e7c5466ed..ebfe5252110b8 100644 --- a/kv/BUILD.bazel +++ b/kv/BUILD.bazel @@ -73,6 +73,7 @@ go_test( ], embed = [":kv"], flaky = True, + shard_count = 19, deps = [ "//parser/model", "//parser/mysql", diff --git a/meta/BUILD.bazel b/meta/BUILD.bazel index c6c796a9771c1..1746e2c205714 100644 --- a/meta/BUILD.bazel +++ b/meta/BUILD.bazel @@ -30,6 +30,7 @@ go_test( ], embed = [":meta"], flaky = True, + shard_count = 12, deps = [ "//kv", "//parser/model", diff --git a/meta/autoid/BUILD.bazel b/meta/autoid/BUILD.bazel index 0eb6034820160..e653a904ce7ca 100644 --- a/meta/autoid/BUILD.bazel +++ b/meta/autoid/BUILD.bazel @@ -48,6 +48,7 @@ go_test( "seq_autoid_test.go", ], flaky = True, + shard_count = 10, deps = [ ":autoid", "//kv", diff --git a/metrics/BUILD.bazel b/metrics/BUILD.bazel index 3457da50b9a08..f6b729f68e9ef 100644 --- a/metrics/BUILD.bazel +++ b/metrics/BUILD.bazel @@ -47,6 +47,7 @@ go_test( ], embed = [":metrics"], flaky = True, + shard_count = 4, deps = [ "//parser/terror", "//testkit/testsetup", diff --git a/owner/BUILD.bazel b/owner/BUILD.bazel index 0a3024113369a..b304c3ef473f1 100644 --- a/owner/BUILD.bazel +++ b/owner/BUILD.bazel @@ -32,6 +32,7 @@ go_test( ], embed = [":owner"], flaky = True, + shard_count = 3, deps = [ "//ddl", "//infoschema", diff --git a/parser/BUILD.bazel b/parser/BUILD.bazel index ec67a5c141849..c1472b8d9a883 100644 --- a/parser/BUILD.bazel +++ b/parser/BUILD.bazel @@ -43,6 +43,7 @@ go_test( data = glob(["**"]), embed = [":parser"], flaky = True, + shard_count = 50, deps = [ "//parser/ast", "//parser/charset", diff --git a/parser/ast/BUILD.bazel b/parser/ast/BUILD.bazel index 94831343ca13f..7d2671d40f016 100644 --- a/parser/ast/BUILD.bazel +++ b/parser/ast/BUILD.bazel @@ -47,6 +47,7 @@ go_test( ], embed = [":ast"], flaky = True, + shard_count = 50, deps = [ "//parser", "//parser/auth", diff --git a/parser/auth/BUILD.bazel b/parser/auth/BUILD.bazel index 422f99c488a4f..f31fe83ce1fca 100644 --- a/parser/auth/BUILD.bazel +++ b/parser/auth/BUILD.bazel @@ -28,6 +28,7 @@ go_test( ], embed = [":auth"], flaky = True, + shard_count = 16, deps = [ "//parser/mysql", "@com_github_stretchr_testify//require", diff --git a/parser/charset/BUILD.bazel b/parser/charset/BUILD.bazel index 5337fcefd44a9..f8fa92e88dc82 100644 --- a/parser/charset/BUILD.bazel +++ b/parser/charset/BUILD.bazel @@ -42,6 +42,7 @@ go_test( ], embed = [":charset"], flaky = True, + shard_count = 7, deps = [ "@com_github_stretchr_testify//require", "@org_golang_x_text//transform", diff --git a/parser/format/BUILD.bazel b/parser/format/BUILD.bazel index 7df5fe10ac9f6..b7c41426d3a93 100644 --- a/parser/format/BUILD.bazel +++ b/parser/format/BUILD.bazel @@ -13,6 +13,7 @@ go_test( srcs = ["format_test.go"], embed = [":format"], flaky = True, + shard_count = 3, deps = [ "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", diff --git a/parser/model/BUILD.bazel b/parser/model/BUILD.bazel index 39046731496d9..a96a1dae04f26 100644 --- a/parser/model/BUILD.bazel +++ b/parser/model/BUILD.bazel @@ -30,6 +30,7 @@ go_test( ], embed = [":model"], flaky = True, + shard_count = 20, deps = [ "//parser/charset", "//parser/mysql", diff --git a/parser/mysql/BUILD.bazel b/parser/mysql/BUILD.bazel index d610c6847eb25..46490b56224ee 100644 --- a/parser/mysql/BUILD.bazel +++ b/parser/mysql/BUILD.bazel @@ -33,5 +33,6 @@ go_test( ], embed = [":mysql"], flaky = True, + shard_count = 8, deps = ["@com_github_stretchr_testify//require"], ) diff --git a/parser/terror/BUILD.bazel b/parser/terror/BUILD.bazel index 3a49216b5e2ba..eec345033fa08 100644 --- a/parser/terror/BUILD.bazel +++ b/parser/terror/BUILD.bazel @@ -19,6 +19,7 @@ go_test( srcs = ["terror_test.go"], embed = [":terror"], flaky = True, + shard_count = 6, deps = [ "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", diff --git a/parser/types/BUILD.bazel b/parser/types/BUILD.bazel index df3346c496a3f..fa695269f8dd6 100644 --- a/parser/types/BUILD.bazel +++ b/parser/types/BUILD.bazel @@ -27,7 +27,7 @@ go_test( ], embed = [":types"], flaky = True, - shard_count = 50, + shard_count = 5, deps = [ "//parser", "//parser/ast", diff --git a/planner/cascades/BUILD.bazel b/planner/cascades/BUILD.bazel index a149c3ad1fd01..d08db52f77032 100644 --- a/planner/cascades/BUILD.bazel +++ b/planner/cascades/BUILD.bazel @@ -43,7 +43,7 @@ go_test( data = glob(["testdata/**"]), embed = [":cascades"], flaky = True, - shard_count = 5, + shard_count = 40, deps = [ "//domain", "//expression", diff --git a/planner/funcdep/BUILD.bazel b/planner/funcdep/BUILD.bazel index 323fb2c0b8011..e5f8591c1b675 100644 --- a/planner/funcdep/BUILD.bazel +++ b/planner/funcdep/BUILD.bazel @@ -28,7 +28,7 @@ go_test( ], embed = [":funcdep"], flaky = True, - shard_count = 4, + shard_count = 16, deps = [ "//domain", "//infoschema", diff --git a/planner/memo/BUILD.bazel b/planner/memo/BUILD.bazel index 6006c9e86edf4..273981a6a68b8 100644 --- a/planner/memo/BUILD.bazel +++ b/planner/memo/BUILD.bazel @@ -30,6 +30,7 @@ go_test( ], embed = [":memo"], flaky = True, + shard_count = 22, deps = [ "//domain", "//expression", diff --git a/plugin/BUILD.bazel b/plugin/BUILD.bazel index 6dd39d9af8c92..69c18887fc4cb 100644 --- a/plugin/BUILD.bazel +++ b/plugin/BUILD.bazel @@ -38,6 +38,7 @@ go_test( ], embed = [":plugin"], flaky = True, + shard_count = 10, deps = [ "//kv", "//parser/mysql", diff --git a/resourcemanager/pool/spool/BUILD.bazel b/resourcemanager/pool/spool/BUILD.bazel index c8f6788d824f5..7c4b6d4f2caa0 100644 --- a/resourcemanager/pool/spool/BUILD.bazel +++ b/resourcemanager/pool/spool/BUILD.bazel @@ -32,7 +32,7 @@ go_test( embed = [":spool"], flaky = True, race = "on", - shard_count = 2, + shard_count = 5, deps = [ "//resourcemanager/pool", "//resourcemanager/util", diff --git a/sessionctx/binloginfo/BUILD.bazel b/sessionctx/binloginfo/BUILD.bazel index 3c345cd043469..51e43939b3bff 100644 --- a/sessionctx/binloginfo/BUILD.bazel +++ b/sessionctx/binloginfo/BUILD.bazel @@ -32,6 +32,7 @@ go_test( ], embed = [":binloginfo"], flaky = True, + shard_count = 11, deps = [ "//autoid_service", "//ddl", diff --git a/sessionctx/sessionstates/BUILD.bazel b/sessionctx/sessionstates/BUILD.bazel index 76330477475d2..2348abae1ea6d 100644 --- a/sessionctx/sessionstates/BUILD.bazel +++ b/sessionctx/sessionstates/BUILD.bazel @@ -30,7 +30,7 @@ go_test( ], embed = [":sessionstates"], flaky = True, - shard_count = 13, + shard_count = 14, deps = [ "//config", "//errno", diff --git a/sessionctx/stmtctx/BUILD.bazel b/sessionctx/stmtctx/BUILD.bazel index d0f9d6a08ea23..8785f2a09b3f5 100644 --- a/sessionctx/stmtctx/BUILD.bazel +++ b/sessionctx/stmtctx/BUILD.bazel @@ -36,6 +36,7 @@ go_test( ], embed = [":stmtctx"], flaky = True, + shard_count = 4, deps = [ "//kv", "//sessionctx/variable", diff --git a/sessiontxn/BUILD.bazel b/sessiontxn/BUILD.bazel index 51a54724b33ec..4a2e57f093c37 100644 --- a/sessiontxn/BUILD.bazel +++ b/sessiontxn/BUILD.bazel @@ -27,7 +27,7 @@ go_test( "txn_rc_tso_optimize_test.go", ], flaky = True, - shard_count = 50, + shard_count = 25, deps = [ ":sessiontxn", "//domain", diff --git a/sessiontxn/isolation/BUILD.bazel b/sessiontxn/isolation/BUILD.bazel index f4fe40db4cfce..dc4f2673578cb 100644 --- a/sessiontxn/isolation/BUILD.bazel +++ b/sessiontxn/isolation/BUILD.bazel @@ -47,7 +47,7 @@ go_test( "serializable_test.go", ], flaky = True, - shard_count = 28, + shard_count = 27, deps = [ ":isolation", "//config", diff --git a/sessiontxn/staleread/BUILD.bazel b/sessiontxn/staleread/BUILD.bazel index 9c1e11823e32a..ffc0d36b661cd 100644 --- a/sessiontxn/staleread/BUILD.bazel +++ b/sessiontxn/staleread/BUILD.bazel @@ -43,6 +43,7 @@ go_test( "provider_test.go", ], flaky = True, + shard_count = 10, deps = [ ":staleread", "//domain", diff --git a/statistics/handle/BUILD.bazel b/statistics/handle/BUILD.bazel index 50f011f8e648e..b27d0e28e7a75 100644 --- a/statistics/handle/BUILD.bazel +++ b/statistics/handle/BUILD.bazel @@ -72,7 +72,7 @@ go_test( embed = [":handle"], flaky = True, race = "on", - shard_count = 50, + shard_count = 33, deps = [ "//config", "//domain", diff --git a/statistics/handle/updatetest/BUILD.bazel b/statistics/handle/updatetest/BUILD.bazel index 6ac60d66afc3d..c4d6a5ba05a09 100644 --- a/statistics/handle/updatetest/BUILD.bazel +++ b/statistics/handle/updatetest/BUILD.bazel @@ -8,7 +8,7 @@ go_test( "update_test.go", ], flaky = True, - shard_count = 40, + shard_count = 44, deps = [ "//metrics", "//parser/model", diff --git a/store/copr/BUILD.bazel b/store/copr/BUILD.bazel index 7a3f81b7d0235..aa137de8c9df3 100644 --- a/store/copr/BUILD.bazel +++ b/store/copr/BUILD.bazel @@ -78,7 +78,7 @@ go_test( embed = [":copr"], flaky = True, race = "on", - shard_count = 33, + shard_count = 28, deps = [ "//kv", "//store/driver/backoff", diff --git a/store/driver/BUILD.bazel b/store/driver/BUILD.bazel index e56417cc419ce..36151cda74d68 100644 --- a/store/driver/BUILD.bazel +++ b/store/driver/BUILD.bazel @@ -41,6 +41,7 @@ go_test( ], embed = [":driver"], flaky = True, + shard_count = 7, deps = [ "//domain", "//kv", diff --git a/store/driver/txn/BUILD.bazel b/store/driver/txn/BUILD.bazel index 437fd464b8c81..c2dca28ea46b1 100644 --- a/store/driver/txn/BUILD.bazel +++ b/store/driver/txn/BUILD.bazel @@ -56,6 +56,7 @@ go_test( ], embed = [":txn"], flaky = True, + shard_count = 5, deps = [ "//kv", "//testkit/testsetup", diff --git a/store/gcworker/BUILD.bazel b/store/gcworker/BUILD.bazel index c2c4bd73ea9cb..805a4e625c9c6 100644 --- a/store/gcworker/BUILD.bazel +++ b/store/gcworker/BUILD.bazel @@ -54,6 +54,7 @@ go_test( ], embed = [":gcworker"], flaky = True, + shard_count = 30, deps = [ "//ddl/placement", "//ddl/util", diff --git a/store/helper/BUILD.bazel b/store/helper/BUILD.bazel index 1afca88ba31ba..3a90615abd2b0 100644 --- a/store/helper/BUILD.bazel +++ b/store/helper/BUILD.bazel @@ -37,6 +37,7 @@ go_test( ], embed = [":helper"], flaky = True, + shard_count = 6, deps = [ "//parser/model", "//store/mockstore", diff --git a/store/mockstore/mockcopr/BUILD.bazel b/store/mockstore/mockcopr/BUILD.bazel index f881d161790a5..a386c0b20a513 100644 --- a/store/mockstore/mockcopr/BUILD.bazel +++ b/store/mockstore/mockcopr/BUILD.bazel @@ -55,6 +55,7 @@ go_test( ], embed = [":mockcopr"], flaky = True, + shard_count = 3, deps = [ "//domain", "//kv", diff --git a/store/mockstore/unistore/BUILD.bazel b/store/mockstore/unistore/BUILD.bazel index 77821e16b18f5..2712a21e5764b 100644 --- a/store/mockstore/unistore/BUILD.bazel +++ b/store/mockstore/unistore/BUILD.bazel @@ -52,6 +52,7 @@ go_test( ], embed = [":unistore"], flaky = True, + shard_count = 4, deps = [ "//testkit/testsetup", "@com_github_pingcap_kvproto//pkg/kvrpcpb", diff --git a/store/mockstore/unistore/cophandler/BUILD.bazel b/store/mockstore/unistore/cophandler/BUILD.bazel index 91dc48a88798c..c1e32c7bab2a8 100644 --- a/store/mockstore/unistore/cophandler/BUILD.bazel +++ b/store/mockstore/unistore/cophandler/BUILD.bazel @@ -62,6 +62,7 @@ go_test( ], embed = [":cophandler"], flaky = True, + shard_count = 4, deps = [ "//expression", "//kv", diff --git a/store/mockstore/unistore/lockstore/BUILD.bazel b/store/mockstore/unistore/lockstore/BUILD.bazel index c222cd16c4f28..fb8e8f534f0e2 100644 --- a/store/mockstore/unistore/lockstore/BUILD.bazel +++ b/store/mockstore/unistore/lockstore/BUILD.bazel @@ -26,6 +26,7 @@ go_test( ], embed = [":lockstore"], flaky = True, + shard_count = 4, deps = [ "//testkit/testsetup", "@com_github_stretchr_testify//require", diff --git a/store/mockstore/unistore/tikv/BUILD.bazel b/store/mockstore/unistore/tikv/BUILD.bazel index bc7ecacf382fc..52578cacbc78a 100644 --- a/store/mockstore/unistore/tikv/BUILD.bazel +++ b/store/mockstore/unistore/tikv/BUILD.bazel @@ -70,6 +70,7 @@ go_test( ], embed = [":tikv"], flaky = True, + shard_count = 28, deps = [ "//store/mockstore/unistore/config", "//store/mockstore/unistore/lockstore", diff --git a/structure/BUILD.bazel b/structure/BUILD.bazel index f99645353e16a..0fcf53831a8fa 100644 --- a/structure/BUILD.bazel +++ b/structure/BUILD.bazel @@ -29,6 +29,7 @@ go_test( ], embed = [":structure"], flaky = True, + shard_count = 4, deps = [ "//kv", "//parser/mysql", diff --git a/table/BUILD.bazel b/table/BUILD.bazel index e1cf80e5e90fa..4aea7a7feeeb9 100644 --- a/table/BUILD.bazel +++ b/table/BUILD.bazel @@ -46,7 +46,7 @@ go_test( embed = [":table"], flaky = True, race = "on", - shard_count = 50, + shard_count = 9, deps = [ "//errno", "//expression", diff --git a/table/temptable/BUILD.bazel b/table/temptable/BUILD.bazel index 0d606bbe02d1f..6b37476e145f5 100644 --- a/table/temptable/BUILD.bazel +++ b/table/temptable/BUILD.bazel @@ -39,6 +39,7 @@ go_test( ], embed = [":temptable"], flaky = True, + shard_count = 17, deps = [ "//infoschema", "//kv", diff --git a/tablecodec/BUILD.bazel b/tablecodec/BUILD.bazel index 9e0515d8fda23..2a5f82acb90fb 100644 --- a/tablecodec/BUILD.bazel +++ b/tablecodec/BUILD.bazel @@ -36,7 +36,7 @@ go_test( ], embed = [":tablecodec"], flaky = True, - shard_count = 30, + shard_count = 23, deps = [ "//kv", "//parser/mysql", diff --git a/telemetry/BUILD.bazel b/telemetry/BUILD.bazel index 12c9f049675da..d8abecb84a89c 100644 --- a/telemetry/BUILD.bazel +++ b/telemetry/BUILD.bazel @@ -62,7 +62,7 @@ go_test( ], embed = [":telemetry"], flaky = True, - shard_count = 30, + shard_count = 36, deps = [ "//autoid_service", "//config", diff --git a/tidb-binlog/node/BUILD.bazel b/tidb-binlog/node/BUILD.bazel index 01c2669bbb640..31853fec390c1 100644 --- a/tidb-binlog/node/BUILD.bazel +++ b/tidb-binlog/node/BUILD.bazel @@ -24,6 +24,7 @@ go_test( srcs = ["registry_test.go"], embed = [":node"], flaky = True, + shard_count = 3, deps = [ "//util/etcd", "@com_github_stretchr_testify//require", diff --git a/tools/tazel/BUILD.bazel b/tools/tazel/BUILD.bazel index c17f1d013e45f..2a909468cf1da 100644 --- a/tools/tazel/BUILD.bazel +++ b/tools/tazel/BUILD.bazel @@ -3,12 +3,14 @@ load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") go_library( name = "tazel_lib", srcs = [ + "ast.go", "main.go", "util.go", ], importpath = "github.com/pingcap/tidb/tools/tazel", visibility = ["//visibility:private"], deps = [ + "//util/mathutil", "//util/set", "@com_github_bazelbuild_buildtools//build:go_default_library", "@com_github_pingcap_log//:log", diff --git a/tools/tazel/ast.go b/tools/tazel/ast.go new file mode 100644 index 0000000000000..91b4c661c6c06 --- /dev/null +++ b/tools/tazel/ast.go @@ -0,0 +1,74 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "go/ast" + "go/parser" + "go/token" + "io/fs" + "path/filepath" + "strings" + + "github.com/pingcap/log" + "go.uber.org/zap" +) + +var testMap map[string]uint32 + +func initCount() { + testMap = make(map[string]uint32) +} + +func addTestMap(path string) { + if _, ok := testMap[path]; !ok { + testMap[path] = 0 + } + testMap[path]++ +} + +func walk() { + err := filepath.Walk(".", func(path string, d fs.FileInfo, _ error) error { + if d.IsDir() || !strings.HasSuffix(d.Name(), "_test.go") { + return nil + } + return scan(path) + }) + if err != nil { + log.Fatal("fail to walk", zap.Error(err)) + } +} + +func scan(path string) error { + fset := token.NewFileSet() + path, err := filepath.Abs(path) + if err != nil { + return err + } + f, err := parser.ParseFile(fset, path, nil, parser.AllErrors) + if err != nil { + return err + } + for _, n := range f.Decls { + funcDecl, ok := n.(*ast.FuncDecl) + if ok { + if strings.HasPrefix(funcDecl.Name.Name, "Test") && funcDecl.Recv == nil && + funcDecl.Name.Name != "TestMain" { + addTestMap(filepath.Dir(path)) + } + } + } + return nil +} diff --git a/tools/tazel/main.go b/tools/tazel/main.go index 3d012ac97cf2b..420e5833b06e7 100644 --- a/tools/tazel/main.go +++ b/tools/tazel/main.go @@ -19,13 +19,19 @@ import ( "io/fs" "os" "path/filepath" + "strconv" "github.com/bazelbuild/buildtools/build" "github.com/pingcap/log" + "github.com/pingcap/tidb/util/mathutil" "go.uber.org/zap" ) +const maxShardCount = 50 + func main() { + initCount() + walk() if _, err := os.Stat("WORKSPACE"); errors.Is(err, os.ErrNotExist) { log.Fatal("It should run from the project root") } @@ -42,21 +48,29 @@ func main() { log.Fatal("fail to parser BUILD.bazel", zap.Error(err), zap.String("path", path)) } gotest := buildfile.Rules("go_test") - toWrite := false if len(gotest) != 0 { if gotest[0].AttrString("timeout") == "" { gotest[0].SetAttr("timeout", &build.StringExpr{Value: "short"}) - toWrite = true } if !skipFlaky(path) && gotest[0].AttrLiteral("flaky") == "" { gotest[0].SetAttr("flaky", &build.LiteralExpr{Token: "True"}) - toWrite = true + } + if !skipShardCount(path) { + abspath, err := filepath.Abs(path) + if err != nil { + return err + } + if cnt, ok := testMap[filepath.Dir(abspath)]; ok { + if cnt > 2 { + gotest[0].SetAttr("shard_count", + &build.LiteralExpr{Token: strconv.FormatUint(uint64(mathutil.Min(cnt, maxShardCount)), 10)}) + } else { + gotest[0].DelAttr("shard_count") + } + } } } - if toWrite { - log.Info("write file", zap.String("path", path)) - write(path, buildfile) - } + write(path, buildfile) return nil }) if err != nil { diff --git a/tools/tazel/util.go b/tools/tazel/util.go index f09ebace433c2..d2fc9b71ffc10 100644 --- a/tools/tazel/util.go +++ b/tools/tazel/util.go @@ -16,6 +16,7 @@ package main import ( "os" + "strings" "github.com/bazelbuild/buildtools/build" "github.com/pingcap/tidb/util/set" @@ -38,3 +39,10 @@ func skipTazel(path string) bool { pmap.Insert("build/BUILD.bazel") return pmap.Exist(path) } + +func skipShardCount(path string) bool { + return strings.HasPrefix(path, "br") || + strings.HasPrefix(path, "tests") || + strings.HasPrefix(path, "dumpling") || + strings.HasPrefix(path, "util") +} diff --git a/ttl/cache/BUILD.bazel b/ttl/cache/BUILD.bazel index 1e448cbeccf3f..6b91715c72635 100644 --- a/ttl/cache/BUILD.bazel +++ b/ttl/cache/BUILD.bazel @@ -47,7 +47,7 @@ go_test( ], embed = [":cache"], flaky = True, - shard_count = 50, + shard_count = 13, deps = [ "//infoschema", "//kv", diff --git a/ttl/client/BUILD.bazel b/ttl/client/BUILD.bazel index 6c045a6e08fe9..8c558b724abd6 100644 --- a/ttl/client/BUILD.bazel +++ b/ttl/client/BUILD.bazel @@ -24,7 +24,6 @@ go_test( srcs = ["command_test.go"], embed = [":client"], flaky = True, - shard_count = 5, deps = [ "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", diff --git a/ttl/metrics/BUILD.bazel b/ttl/metrics/BUILD.bazel index 5945532cfda66..85e31cf8dff57 100644 --- a/ttl/metrics/BUILD.bazel +++ b/ttl/metrics/BUILD.bazel @@ -17,6 +17,5 @@ go_test( srcs = ["metrics_test.go"], embed = [":metrics"], flaky = True, - shard_count = 5, deps = ["@com_github_stretchr_testify//require"], ) diff --git a/ttl/session/BUILD.bazel b/ttl/session/BUILD.bazel index a98067cff9854..ced8b7727f994 100644 --- a/ttl/session/BUILD.bazel +++ b/ttl/session/BUILD.bazel @@ -28,7 +28,7 @@ go_test( "sysvar_test.go", ], flaky = True, - shard_count = 5, + shard_count = 6, deps = [ ":session", "//sessionctx/variable", From 575b652c7a7cd005ef922f8f0c3eb96cd7483785 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E8=B6=85?= Date: Thu, 6 Apr 2023 16:58:58 +0800 Subject: [PATCH 11/12] disttask: `TaskMananger` uses pool to get a session (#42832) close pingcap/tidb#42831 --- disttask/framework/dispatcher/BUILD.bazel | 1 + .../framework/dispatcher/dispatcher_test.go | 27 +++- disttask/framework/storage/BUILD.bazel | 2 + disttask/framework/storage/table_test.go | 15 ++- disttask/framework/storage/task_table.go | 126 ++++++++---------- domain/domain.go | 12 +- 6 files changed, 96 insertions(+), 87 deletions(-) diff --git a/disttask/framework/dispatcher/BUILD.bazel b/disttask/framework/dispatcher/BUILD.bazel index c2b44f291e868..1c3dd3ca3505d 100644 --- a/disttask/framework/dispatcher/BUILD.bazel +++ b/disttask/framework/dispatcher/BUILD.bazel @@ -45,6 +45,7 @@ go_test( "//testkit", "//testkit/testsetup", "//util/logutil", + "@com_github_ngaut_pools//:pools", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", diff --git a/disttask/framework/dispatcher/dispatcher_test.go b/disttask/framework/dispatcher/dispatcher_test.go index f915cad317cba..d3a06a0044155 100644 --- a/disttask/framework/dispatcher/dispatcher_test.go +++ b/disttask/framework/dispatcher/dispatcher_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/ngaut/pools" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/disttask/framework/dispatcher" @@ -34,15 +35,13 @@ import ( "github.com/tikv/client-go/v2/util" ) -func MockDispatcher(t *testing.T) (dispatcher.Dispatch, *storage.TaskManager, kv.Storage) { - store := testkit.CreateMockStore(t) - gtk := testkit.NewTestKit(t, store) +func MockDispatcher(t *testing.T, pool *pools.ResourcePool) (dispatcher.Dispatch, *storage.TaskManager) { ctx := context.Background() - mgr := storage.NewTaskManager(util.WithInternalSourceType(ctx, "taskManager"), gtk.Session()) + mgr := storage.NewTaskManager(util.WithInternalSourceType(ctx, "taskManager"), pool) storage.SetTaskManager(mgr) dsp, err := dispatcher.NewDispatcher(util.WithInternalSourceType(ctx, "dispatcher"), mgr) require.NoError(t, err) - return dsp, mgr, store + return dsp, mgr } func deleteTasks(t *testing.T, store kv.Storage, taskID int64) { @@ -52,7 +51,14 @@ func deleteTasks(t *testing.T, store kv.Storage, taskID int64) { func TestGetInstance(t *testing.T) { ctx := context.Background() - dsp, mgr, _ := MockDispatcher(t) + store := testkit.CreateMockStore(t) + gtk := testkit.NewTestKit(t, store) + pool := pools.NewResourcePool(func() (pools.Resource, error) { + return gtk.Session(), nil + }, 1, 1, time.Second) + defer pool.Close() + + dsp, mgr := MockDispatcher(t, pool) makeFailpointRes := func(v interface{}) string { bytes, err := json.Marshal(v) @@ -139,7 +145,14 @@ func checkDispatch(t *testing.T, taskCnt int, isSucc bool) { dispatcher.DefaultDispatchConcurrency = 1 } - dsp, mgr, store := MockDispatcher(t) + store := testkit.CreateMockStore(t) + gtk := testkit.NewTestKit(t, store) + pool := pools.NewResourcePool(func() (pools.Resource, error) { + return gtk.Session(), nil + }, 1, 1, time.Second) + defer pool.Close() + + dsp, mgr := MockDispatcher(t, pool) dsp.Start() defer func() { dsp.Stop() diff --git a/disttask/framework/storage/BUILD.bazel b/disttask/framework/storage/BUILD.bazel index 08a526f31595a..b1db15854e1bb 100644 --- a/disttask/framework/storage/BUILD.bazel +++ b/disttask/framework/storage/BUILD.bazel @@ -12,6 +12,7 @@ go_library( "//util/chunk", "//util/logutil", "//util/sqlexec", + "@com_github_ngaut_pools//:pools", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_tikv_client_go_v2//util", @@ -29,6 +30,7 @@ go_test( "//disttask/framework/proto", "//testkit", "//testkit/testsetup", + "@com_github_ngaut_pools//:pools", "@com_github_stretchr_testify//require", "@org_uber_go_goleak//:goleak", ], diff --git a/disttask/framework/storage/table_test.go b/disttask/framework/storage/table_test.go index ca3058fb0ba89..a8e199e4cb947 100644 --- a/disttask/framework/storage/table_test.go +++ b/disttask/framework/storage/table_test.go @@ -19,6 +19,7 @@ import ( "testing" "time" + "github.com/ngaut/pools" "github.com/pingcap/tidb/disttask/framework/proto" "github.com/pingcap/tidb/disttask/framework/storage" "github.com/pingcap/tidb/testkit" @@ -41,8 +42,11 @@ func TestGlobalTaskTable(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) - - gm := storage.NewTaskManager(context.Background(), tk.Session()) + pool := pools.NewResourcePool(func() (pools.Resource, error) { + return tk.Session(), nil + }, 1, 1, time.Second) + defer pool.Close() + gm := storage.NewTaskManager(context.Background(), pool) storage.SetTaskManager(gm) gm, err := storage.GetTaskManager() @@ -97,8 +101,11 @@ func TestSubTaskTable(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) - - sm := storage.NewTaskManager(context.Background(), tk.Session()) + pool := pools.NewResourcePool(func() (pools.Resource, error) { + return tk.Session(), nil + }, 1, 1, time.Second) + defer pool.Close() + sm := storage.NewTaskManager(context.Background(), pool) storage.SetTaskManager(sm) sm, err := storage.GetTaskManager() diff --git a/disttask/framework/storage/task_table.go b/disttask/framework/storage/task_table.go index 42208a9972b48..2ebacc675ed1b 100644 --- a/disttask/framework/storage/task_table.go +++ b/disttask/framework/storage/task_table.go @@ -18,10 +18,10 @@ import ( "context" "strconv" "strings" - "sync" "sync/atomic" "time" + "github.com/ngaut/pools" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/disttask/framework/proto" @@ -36,19 +36,18 @@ import ( // TaskManager is the manager of global/sub task. type TaskManager struct { - ctx context.Context - se sessionctx.Context - mu sync.Mutex + ctx context.Context + sePool *pools.ResourcePool } var taskManagerInstance atomic.Pointer[TaskManager] // NewTaskManager creates a new task manager. -func NewTaskManager(ctx context.Context, se sessionctx.Context) *TaskManager { +func NewTaskManager(ctx context.Context, sePool *pools.ResourcePool) *TaskManager { ctx = util.WithInternalSourceType(ctx, kv.InternalDistTask) return &TaskManager{ - ctx: ctx, - se: se, + ctx: ctx, + sePool: sePool, } } @@ -105,30 +104,58 @@ func row2GlobeTask(r chunk.Row) *proto.Task { return task } -// AddNewGlobalTask adds a new task to global task table. -func (stm *TaskManager) AddNewGlobalTask(key, tp string, concurrency int, meta []byte) (int64, error) { - stm.mu.Lock() - defer stm.mu.Unlock() +func (stm *TaskManager) withNewSession(fn func(se sessionctx.Context) error) error { + se, err := stm.sePool.Get() + if err != nil { + return err + } + defer stm.sePool.Put(se) + return fn(se.(sessionctx.Context)) +} + +func (stm *TaskManager) executeSQLWithNewSession(ctx context.Context, sql string, args ...interface{}) (rs []chunk.Row, err error) { + err = stm.withNewSession(func(se sessionctx.Context) error { + rs, err = execSQL(ctx, se, sql, args...) + return err + }) - _, err := execSQL(stm.ctx, stm.se, "insert into mysql.tidb_global_task(task_key, type, state, concurrency, meta, state_update_time) values (%?, %?, %?, %?, %?, %?)", key, tp, proto.TaskStatePending, concurrency, meta, time.Now().UTC().String()) if err != nil { - return 0, err + return nil, err } - rs, err := execSQL(stm.ctx, stm.se, "select @@last_insert_id") + return +} + +// AddNewGlobalTask adds a new task to global task table. +func (stm *TaskManager) AddNewGlobalTask(key, tp string, concurrency int, meta []byte) (taskID int64, err error) { + err = stm.withNewSession(func(se sessionctx.Context) error { + _, err = execSQL(stm.ctx, se, "insert into mysql.tidb_global_task(task_key, type, state, concurrency, meta, state_update_time) values (%?, %?, %?, %?, %?, %?)", key, tp, proto.TaskStatePending, concurrency, meta, time.Now().UTC().String()) + if err != nil { + return err + } + + rs, err := execSQL(stm.ctx, se, "select @@last_insert_id") + if err != nil { + return err + } + + taskID, err = strconv.ParseInt(rs[0].GetString(0), 10, 64) + if err != nil { + return err + } + + return nil + }) + if err != nil { return 0, err } - - return strconv.ParseInt(rs[0].GetString(0), 10, 64) + return } // GetNewGlobalTask get a new task from global task table, it's used by dispatcher only. func (stm *TaskManager) GetNewGlobalTask() (task *proto.Task, err error) { - stm.mu.Lock() - defer stm.mu.Unlock() - - rs, err := execSQL(stm.ctx, stm.se, "select id, task_key, type, dispatcher_id, state, start_time, state_update_time, meta, concurrency, step from mysql.tidb_global_task where state = %? limit 1", proto.TaskStatePending) + rs, err := stm.executeSQLWithNewSession(stm.ctx, "select id, task_key, type, dispatcher_id, state, start_time, state_update_time, meta, concurrency, step from mysql.tidb_global_task where state = %? limit 1", proto.TaskStatePending) if err != nil { return task, err } @@ -147,10 +174,8 @@ func (stm *TaskManager) UpdateGlobalTask(task *proto.Task) error { failpoint.Return(errors.New("updateTaskErr")) } }) - stm.mu.Lock() - defer stm.mu.Unlock() - _, err := execSQL(stm.ctx, stm.se, "update mysql.tidb_global_task set state = %?, dispatcher_id = %?, step = %?, state_update_time = %?, concurrency = %? where id = %?", + _, err := stm.executeSQLWithNewSession(stm.ctx, "update mysql.tidb_global_task set state = %?, dispatcher_id = %?, step = %?, state_update_time = %?, concurrency = %? where id = %?", task.State, task.DispatcherID, task.Step, task.StateUpdateTime.UTC().String(), task.Concurrency, task.ID) if err != nil { return err @@ -161,14 +186,11 @@ func (stm *TaskManager) UpdateGlobalTask(task *proto.Task) error { // GetGlobalTasksInStates gets the tasks in the states. func (stm *TaskManager) GetGlobalTasksInStates(states ...interface{}) (task []*proto.Task, err error) { - stm.mu.Lock() - defer stm.mu.Unlock() - if len(states) == 0 { return task, nil } - rs, err := execSQL(stm.ctx, stm.se, "select id, task_key, type, dispatcher_id, state, start_time, state_update_time, meta, concurrency, step from mysql.tidb_global_task where state in ("+strings.Repeat("%?,", len(states)-1)+"%?)", states...) + rs, err := stm.executeSQLWithNewSession(stm.ctx, "select id, task_key, type, dispatcher_id, state, start_time, state_update_time, meta, concurrency, step from mysql.tidb_global_task where state in ("+strings.Repeat("%?,", len(states)-1)+"%?)", states...) if err != nil { return task, err } @@ -181,10 +203,7 @@ func (stm *TaskManager) GetGlobalTasksInStates(states ...interface{}) (task []*p // GetGlobalTaskByID gets the task by the global task ID. func (stm *TaskManager) GetGlobalTaskByID(taskID int64) (task *proto.Task, err error) { - stm.mu.Lock() - defer stm.mu.Unlock() - - rs, err := execSQL(stm.ctx, stm.se, "select id, task_key, type, dispatcher_id, state, start_time, state_update_time, meta, concurrency, step from mysql.tidb_global_task where id = %?", taskID) + rs, err := stm.executeSQLWithNewSession(stm.ctx, "select id, task_key, type, dispatcher_id, state, start_time, state_update_time, meta, concurrency, step from mysql.tidb_global_task where id = %?", taskID) if err != nil { return task, err } @@ -197,10 +216,7 @@ func (stm *TaskManager) GetGlobalTaskByID(taskID int64) (task *proto.Task, err e // GetGlobalTaskByKey gets the task by the task key func (stm *TaskManager) GetGlobalTaskByKey(key string) (task *proto.Task, err error) { - stm.mu.Lock() - defer stm.mu.Unlock() - - rs, err := execSQL(stm.ctx, stm.se, "select id, task_key, type, dispatcher_id, state, start_time, state_update_time, meta, concurrency, step from mysql.tidb_global_task where task_key = %?", key) + rs, err := stm.executeSQLWithNewSession(stm.ctx, "select id, task_key, type, dispatcher_id, state, start_time, state_update_time, meta, concurrency, step from mysql.tidb_global_task where task_key = %?", key) if err != nil { return task, err } @@ -231,15 +247,12 @@ func row2SubTask(r chunk.Row) *proto.Subtask { // AddNewSubTask adds a new task to subtask table. func (stm *TaskManager) AddNewSubTask(globalTaskID int64, designatedTiDBID string, meta []byte, tp string, isRevert bool) error { - stm.mu.Lock() - defer stm.mu.Unlock() - st := proto.TaskStatePending if isRevert { st = proto.TaskStateRevertPending } - _, err := execSQL(stm.ctx, stm.se, "insert into mysql.tidb_background_subtask(task_key, exec_id, meta, state, type, checkpoint) values (%?, %?, %?, %?, %?, %?)", globalTaskID, designatedTiDBID, meta, st, proto.Type2Int(tp), []byte{}) + _, err := stm.executeSQLWithNewSession(stm.ctx, "insert into mysql.tidb_background_subtask(task_key, exec_id, meta, state, type, checkpoint) values (%?, %?, %?, %?, %?, %?)", globalTaskID, designatedTiDBID, meta, st, proto.Type2Int(tp), []byte{}) if err != nil { return err } @@ -249,12 +262,9 @@ func (stm *TaskManager) AddNewSubTask(globalTaskID int64, designatedTiDBID strin // GetSubtaskInStates gets the subtask in the states. func (stm *TaskManager) GetSubtaskInStates(tidbID string, taskID int64, states ...interface{}) (*proto.Subtask, error) { - stm.mu.Lock() - defer stm.mu.Unlock() - args := []interface{}{tidbID, taskID} args = append(args, states...) - rs, err := execSQL(stm.ctx, stm.se, "select * from mysql.tidb_background_subtask where exec_id = %? and task_key = %? and state in ("+strings.Repeat("%?,", len(states)-1)+"%?)", args...) + rs, err := stm.executeSQLWithNewSession(stm.ctx, "select * from mysql.tidb_background_subtask where exec_id = %? and task_key = %? and state in ("+strings.Repeat("%?,", len(states)-1)+"%?)", args...) if err != nil { return nil, err } @@ -267,12 +277,9 @@ func (stm *TaskManager) GetSubtaskInStates(tidbID string, taskID int64, states . // GetSubtaskInStatesCnt gets the subtask count in the states. func (stm *TaskManager) GetSubtaskInStatesCnt(taskID int64, states ...interface{}) (int64, error) { - stm.mu.Lock() - defer stm.mu.Unlock() - args := []interface{}{taskID} args = append(args, states...) - rs, err := execSQL(stm.ctx, stm.se, "select count(*) from mysql.tidb_background_subtask where task_key = %? and state in ("+strings.Repeat("%?,", len(states)-1)+"%?)", args...) + rs, err := stm.executeSQLWithNewSession(stm.ctx, "select count(*) from mysql.tidb_background_subtask where task_key = %? and state in ("+strings.Repeat("%?,", len(states)-1)+"%?)", args...) if err != nil { return 0, err } @@ -282,12 +289,9 @@ func (stm *TaskManager) GetSubtaskInStatesCnt(taskID int64, states ...interface{ // HasSubtasksInStates checks if there are subtasks in the states. func (stm *TaskManager) HasSubtasksInStates(tidbID string, taskID int64, states ...interface{}) (bool, error) { - stm.mu.Lock() - defer stm.mu.Unlock() - args := []interface{}{tidbID, taskID} args = append(args, states...) - rs, err := execSQL(stm.ctx, stm.se, "select 1 from mysql.tidb_background_subtask where exec_id = %? and task_key = %? and state in ("+strings.Repeat("%?,", len(states)-1)+"%?) limit 1", args...) + rs, err := stm.executeSQLWithNewSession(stm.ctx, "select 1 from mysql.tidb_background_subtask where exec_id = %? and task_key = %? and state in ("+strings.Repeat("%?,", len(states)-1)+"%?) limit 1", args...) if err != nil { return false, err } @@ -297,28 +301,19 @@ func (stm *TaskManager) HasSubtasksInStates(tidbID string, taskID int64, states // UpdateSubtaskState updates the subtask state. func (stm *TaskManager) UpdateSubtaskState(id int64, state string) error { - stm.mu.Lock() - defer stm.mu.Unlock() - - _, err := execSQL(stm.ctx, stm.se, "update mysql.tidb_background_subtask set state = %? where id = %?", state, id) + _, err := stm.executeSQLWithNewSession(stm.ctx, "update mysql.tidb_background_subtask set state = %? where id = %?", state, id) return err } // UpdateSubtaskHeartbeat updates the heartbeat of the subtask. func (stm *TaskManager) UpdateSubtaskHeartbeat(instanceID string, taskID int64, heartbeat time.Time) error { - stm.mu.Lock() - defer stm.mu.Unlock() - - _, err := execSQL(stm.ctx, stm.se, "update mysql.tidb_background_subtask set exec_expired = %? where exec_id = %? and task_key = %?", heartbeat.String(), instanceID, taskID) + _, err := stm.executeSQLWithNewSession(stm.ctx, "update mysql.tidb_background_subtask set exec_expired = %? where exec_id = %? and task_key = %?", heartbeat.String(), instanceID, taskID) return err } // DeleteSubtasksByTaskID deletes the subtask of the given global task ID. func (stm *TaskManager) DeleteSubtasksByTaskID(taskID int64) error { - stm.mu.Lock() - defer stm.mu.Unlock() - - _, err := execSQL(stm.ctx, stm.se, "delete from mysql.tidb_background_subtask where task_key = %?", taskID) + _, err := stm.executeSQLWithNewSession(stm.ctx, "delete from mysql.tidb_background_subtask where task_key = %?", taskID) if err != nil { return err } @@ -328,10 +323,7 @@ func (stm *TaskManager) DeleteSubtasksByTaskID(taskID int64) error { // GetSchedulerIDsByTaskID gets the scheduler IDs of the given global task ID. func (stm *TaskManager) GetSchedulerIDsByTaskID(taskID int64) ([]string, error) { - stm.mu.Lock() - defer stm.mu.Unlock() - - rs, err := execSQL(stm.ctx, stm.se, "select distinct(exec_id) from mysql.tidb_background_subtask where task_key = %?", taskID) + rs, err := stm.executeSQLWithNewSession(stm.ctx, "select distinct(exec_id) from mysql.tidb_background_subtask where task_key = %?", taskID) if err != nil { return nil, err } diff --git a/domain/domain.go b/domain/domain.go index 360f55d9763ec..791c9fe62e212 100644 --- a/domain/domain.go +++ b/domain/domain.go @@ -1155,7 +1155,7 @@ func (do *Domain) Init( return err } - if err = do.initDistTaskLoop(ctx); err != nil { + if err = do.initDistTaskLoop(ctx, sysCtxPool); err != nil { return err } // step 3: start the ddl after the domain reload, avoiding some internal sql running before infoSchema construction. @@ -1345,21 +1345,16 @@ func (do *Domain) checkReplicaRead(ctx context.Context, pdClient pd.Client) erro return nil } -func (do *Domain) initDistTaskLoop(ctx context.Context) error { +func (do *Domain) initDistTaskLoop(ctx context.Context, sePool *pools.ResourcePool) error { failpoint.Inject("MockDisableDistTask", func(val failpoint.Value) { if val.(bool) { failpoint.Return(nil) } }) - se, err := do.sysExecutorFactory(do) - if err != nil { - return err - } - taskManager := storage.NewTaskManager(kv.WithInternalSourceType(ctx, kv.InternalDistTask), se.(sessionctx.Context)) + taskManager := storage.NewTaskManager(ctx, sePool) schedulerManager, err := scheduler.NewManagerBuilder().BuildManager(ctx, do.ddl.GetID(), taskManager) if err != nil { - se.Close() return err } @@ -1367,7 +1362,6 @@ func (do *Domain) initDistTaskLoop(ctx context.Context) error { do.wg.Run(func() { defer func() { storage.SetTaskManager(nil) - se.Close() }() do.distTaskFrameworkLoop(ctx, taskManager, schedulerManager) }, "distTaskFrameworkLoop") From 915b39b8845ae78f39349f6ef6b7ed47222b1221 Mon Sep 17 00:00:00 2001 From: Ling Jin <7138436+3AceShowHand@users.noreply.github.com> Date: Thu, 6 Apr 2023 17:44:57 +0800 Subject: [PATCH 12/12] ddl: DDL job add charset and collate when create the index (#42750) close pingcap/tidb#42748 --- ddl/ddl_api.go | 3 +++ ddl/index.go | 3 +++ executor/seqtest/seq_executor_test.go | 2 +- parser/model/ddl.go | 7 +++++++ parser/model/ddl_test.go | 2 +- 5 files changed, 15 insertions(+), 2 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 37cf2f1e44879..58cc5030d24b0 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -6703,6 +6703,7 @@ func (d *ddl) createIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.Inde } tzName, tzOffset := ddlutil.GetTimeZone(ctx) + charset, collate := ctx.GetSessionVars().GetCharsetInfo() job := &model.Job{ SchemaID: schema.ID, TableID: t.Meta().ID, @@ -6718,6 +6719,8 @@ func (d *ddl) createIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.Inde }, Args: []interface{}{unique, indexName, indexPartSpecifications, indexOption, hiddenCols, global}, Priority: ctx.GetSessionVars().DDLReorgPriority, + Charset: charset, + Collate: collate, } err = d.DoDDLJob(ctx, job) diff --git a/ddl/index.go b/ddl/index.go index b095bc7eed634..0acfd4fd79f6f 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -699,6 +699,9 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo if job.ReorgMeta.ReorgTp == model.ReorgTypeLitMerge { ingest.LitBackCtxMgr.Unregister(job.ID) } + logutil.BgLogger().Info("[ddl] run add index job done", + zap.String("charset", job.Charset), + zap.String("collation", job.Collate)) default: err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("index", tblInfo.State) } diff --git a/executor/seqtest/seq_executor_test.go b/executor/seqtest/seq_executor_test.go index 5af9e3dc18196..1ca8a92f6c958 100644 --- a/executor/seqtest/seq_executor_test.go +++ b/executor/seqtest/seq_executor_test.go @@ -940,7 +940,7 @@ func TestBatchInsertDelete(t *testing.T) { kv.TxnTotalSizeLimit.Store(originLimit) }() // Set the limitation to a small value, make it easier to reach the limitation. - kv.TxnTotalSizeLimit.Store(5900) + kv.TxnTotalSizeLimit.Store(6000) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") diff --git a/parser/model/ddl.go b/parser/model/ddl.go index 23638519e01d9..d6d8790962382 100644 --- a/parser/model/ddl.go +++ b/parser/model/ddl.go @@ -344,6 +344,8 @@ func (sub *SubJob) ToProxyJob(parentJob *Job) Job { MultiSchemaInfo: &MultiSchemaInfo{Revertible: sub.Revertible}, Priority: parentJob.Priority, SeqNum: parentJob.SeqNum, + Charset: parentJob.Charset, + Collate: parentJob.Collate, } } @@ -423,6 +425,11 @@ type Job struct { // SeqNum is the total order in all DDLs, it's used to identify the order of DDL. SeqNum uint64 `json:"seq_num"` + + // Charset is the charset when the DDL Job is created. + Charset string `json:"charset"` + // Collate is the collation the DDL Job is created. + Collate string `json:"collate"` } // FinishTableJob is called when a job is finished. diff --git a/parser/model/ddl_test.go b/parser/model/ddl_test.go index 04d2992aed939..1b501327239cf 100644 --- a/parser/model/ddl_test.go +++ b/parser/model/ddl_test.go @@ -50,7 +50,7 @@ func TestJobSize(t *testing.T) { - SubJob.ToProxyJob() ` job := model.Job{} - require.Equal(t, 288, int(unsafe.Sizeof(job)), msg) + require.Equal(t, 320, int(unsafe.Sizeof(job)), msg) } func TestBackfillMetaCodec(t *testing.T) {