Skip to content

Commit

Permalink
Merge branch 'master' into join-reorder-dp
Browse files Browse the repository at this point in the history
  • Loading branch information
winoros committed Dec 26, 2018
2 parents 868a44d + 123aba2 commit 70099c9
Show file tree
Hide file tree
Showing 39 changed files with 646 additions and 225 deletions.
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,16 @@
# TiDB Changelog
All notable changes to this project will be documented in this file. See also [Release Notes](https://github.com/pingcap/docs/blob/master/releases/rn.md), [TiKV Changelog](https://github.com/tikv/tikv/blob/master/CHANGELOG.md) and [PD Changelog](https://github.com/pingcap/pd/blob/master/CHANGELOG.md).

## [2.1.2] 2018-12-21
* Make TiDB compatible with TiDB-Binlog of the Kafka version [#8747](https://github.com/pingcap/tidb/pull/8747)
* Improve the exit mechanism of TiDB in a rolling update [#8707](https://github.com/pingcap/tidb/pull/8707)
* Fix the panic issue caused by adding the index for the generated column in some cases [#8676](https://github.com/pingcap/tidb/pull/8676)
* Fix the issue that the optimizer cannot find the optimal query plan when `TIDB_SMJ Hint` exists in the SQL statement in some cases [#8729](https://github.com/pingcap/tidb/pull/8729)
* Fix the issue that `AntiSemiJoin` returns an incorrect result in some cases [#8730](https://github.com/pingcap/tidb/pull/8730)
* Improve the valid character check of the `utf8` character set [#8754](https://github.com/pingcap/tidb/pull/8754)
* Fix the issue that the field of the time type might return an incorrect result when the write operation is performed before the read operation in a transaction [#8746](https://github.com/pingcap/tidb/pull/8746)


## [2.1.1] 2018-12-12

### SQL Optimizer/Executor
Expand Down
50 changes: 25 additions & 25 deletions cmd/explaintest/r/explain_easy.result
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ Projection_12 10000.00 root eq(test.t1.c2, test.t2.c2)
└─Limit_21 1.00 root offset:0, count:1
└─IndexLookUp_43 1.00 root
├─Limit_42 1.00 cop offset:0, count:1
│ └─IndexScan_40 1.25 cop table:t2, index:c1, range: decided by [eq(test.t1.c1, test.t2.c1)], keep order:true, stats:pseudo
│ └─IndexScan_40 1.00 cop table:t2, index:c1, range: decided by [eq(test.t1.c1, test.t2.c1)], keep order:true, stats:pseudo
└─TableScan_41 1.00 cop table:t2, keep order:false
explain select * from t1 order by c1 desc limit 1;
id count task operator info
Expand Down Expand Up @@ -280,11 +280,11 @@ Projection_11 10000.00 root 9_aux_0
├─TableReader_15 10000.00 root data:TableScan_14
│ └─TableScan_14 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo
└─StreamAgg_20 1.00 root funcs:count(1)
└─IndexJoin_44 10000.00 root inner join, inner:TableReader_43, outer key:s.a, inner key:t1.a
└─IndexJoin_44 12.50 root inner join, inner:TableReader_43, outer key:s.a, inner key:t1.a
├─TableReader_52 1.00 root data:TableScan_51
│ └─TableScan_51 1.00 cop table:s, range: decided by [eq(s.a, test.t.a)], keep order:false, stats:pseudo
└─TableReader_43 8000.00 root data:Selection_42
└─Selection_42 8000.00 cop eq(t1.a, test.t.a)
└─TableReader_43 10.00 root data:Selection_42
└─Selection_42 10.00 cop eq(t1.a, test.t.a)
└─TableScan_41 10.00 cop table:t1, range: decided by [s.a], keep order:false, stats:pseudo
explain select t.c in (select count(*) from t s use index(idx), t t1 where s.b = t.a and s.a = t1.a) from t;
id count task operator info
Expand All @@ -293,9 +293,9 @@ Projection_11 10000.00 root 9_aux_0
├─TableReader_15 10000.00 root data:TableScan_14
│ └─TableScan_14 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo
└─StreamAgg_20 1.00 root funcs:count(1)
└─IndexJoin_32 10000.00 root inner join, inner:TableReader_31, outer key:s.a, inner key:t1.a
├─IndexReader_36 10000.00 root index:IndexScan_35
│ └─IndexScan_35 10000.00 cop table:s, index:b, range: decided by [eq(s.b, test.t.a)], keep order:false, stats:pseudo
└─IndexJoin_32 12.50 root inner join, inner:TableReader_31, outer key:s.a, inner key:t1.a
├─IndexReader_36 10.00 root index:IndexScan_35
│ └─IndexScan_35 10.00 cop table:s, index:b, range: decided by [eq(s.b, test.t.a)], keep order:false, stats:pseudo
└─TableReader_31 10.00 root data:TableScan_30
└─TableScan_30 10.00 cop table:t1, range: decided by [s.a], keep order:false, stats:pseudo
explain select t.c in (select count(*) from t s use index(idx), t t1 where s.b = t.a and s.c = t1.a) from t;
Expand All @@ -305,10 +305,10 @@ Projection_11 10000.00 root 9_aux_0
├─TableReader_15 10000.00 root data:TableScan_14
│ └─TableScan_14 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo
└─StreamAgg_20 1.00 root funcs:count(1)
└─IndexJoin_33 10000.00 root inner join, inner:TableReader_32, outer key:s.c, inner key:t1.a
├─IndexLookUp_38 10000.00 root
│ ├─IndexScan_36 10000.00 cop table:s, index:b, range: decided by [eq(s.b, test.t.a)], keep order:false, stats:pseudo
│ └─TableScan_37 10000.00 cop table:t, keep order:false, stats:pseudo
└─IndexJoin_33 12.50 root inner join, inner:TableReader_32, outer key:s.c, inner key:t1.a
├─IndexLookUp_38 10.00 root
│ ├─IndexScan_36 10.00 cop table:s, index:b, range: decided by [eq(s.b, test.t.a)], keep order:false, stats:pseudo
│ └─TableScan_37 10.00 cop table:t, keep order:false, stats:pseudo
└─TableReader_32 10.00 root data:TableScan_31
└─TableScan_31 10.00 cop table:t1, range: decided by [s.c], keep order:false, stats:pseudo
drop table if exists t;
Expand Down Expand Up @@ -479,14 +479,14 @@ Projection_12 10000.00 root 9_aux_0
├─TableReader_16 10000.00 root data:TableScan_15
│ └─TableScan_15 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo
└─HashAgg_19 1.00 root funcs:count(join_agg_0)
└─HashLeftJoin_20 8000.00 root inner join, inner:HashAgg_30, equal:[eq(s.a, t1.a)]
├─TableReader_24 8000.00 root data:Selection_23
│ └─Selection_23 8000.00 cop eq(s.a, test.t.a)
└─HashLeftJoin_20 10.00 root inner join, inner:HashAgg_30, equal:[eq(s.a, t1.a)]
├─TableReader_24 10.00 root data:Selection_23
│ └─Selection_23 10.00 cop eq(s.a, test.t.a)
│ └─TableScan_22 10000.00 cop table:s, range:[-inf,+inf], keep order:false, stats:pseudo
└─HashAgg_30 6400.00 root group by:col_2, funcs:count(col_0), firstrow(col_1)
└─TableReader_31 6400.00 root data:HashAgg_25
└─HashAgg_25 6400.00 cop group by:t1.a, funcs:count(1), firstrow(t1.a)
└─Selection_29 8000.00 cop eq(t1.a, test.t.a)
└─HashAgg_30 8.00 root group by:col_2, funcs:count(col_0), firstrow(col_1)
└─TableReader_31 8.00 root data:HashAgg_25
└─HashAgg_25 8.00 cop group by:t1.a, funcs:count(1), firstrow(t1.a)
└─Selection_29 10.00 cop eq(t1.a, test.t.a)
└─TableScan_28 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
explain select * from t ta left outer join t tb on ta.nb = tb.nb and ta.a > 1 where ifnull(tb.a, 1) or tb.a is null;
id count task operator info
Expand All @@ -513,14 +513,14 @@ Projection_14 10000.00 root 9_aux_0
│ └─TableReader_19 10000.00 root data:TableScan_18
│ └─TableScan_18 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo
└─HashAgg_23 1.00 root funcs:count(join_agg_0)
└─HashLeftJoin_24 8000.00 root inner join, inner:HashAgg_34, equal:[eq(s.a, t1.a)]
├─TableReader_28 8000.00 root data:Selection_27
│ └─Selection_27 8000.00 cop eq(s.a, test.t.a)
└─HashLeftJoin_24 10.00 root inner join, inner:HashAgg_34, equal:[eq(s.a, t1.a)]
├─TableReader_28 10.00 root data:Selection_27
│ └─Selection_27 10.00 cop eq(s.a, test.t.a)
│ └─TableScan_26 10000.00 cop table:s, range:[-inf,+inf], keep order:false, stats:pseudo
└─HashAgg_34 6400.00 root group by:col_2, funcs:count(col_0), firstrow(col_1)
└─TableReader_35 6400.00 root data:HashAgg_29
└─HashAgg_29 6400.00 cop group by:t1.a, funcs:count(1), firstrow(t1.a)
└─Selection_33 8000.00 cop eq(t1.a, test.t.a)
└─HashAgg_34 8.00 root group by:col_2, funcs:count(col_0), firstrow(col_1)
└─TableReader_35 8.00 root data:HashAgg_29
└─HashAgg_29 8.00 cop group by:t1.a, funcs:count(1), firstrow(t1.a)
└─Selection_33 10.00 cop eq(t1.a, test.t.a)
└─TableScan_32 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo
drop table if exists t;
create table t(a int);
Expand Down
2 changes: 1 addition & 1 deletion ddl/db_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func (s *testIntegrationSuite) TearDownTest(c *C) {
func (s *testIntegrationSuite) SetUpSuite(c *C) {
var err error
testleak.BeforeTest()
s.lease = 200 * time.Millisecond
s.lease = 50 * time.Millisecond

s.cluster = mocktikv.NewCluster()
mocktikv.BootstrapWithSingleStore(s.cluster)
Expand Down
4 changes: 2 additions & 2 deletions ddl/db_partition_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1021,6 +1021,8 @@ func (s *testIntegrationSuite) TestPartitionCancelAddIndex(c *C) {
var c3IdxInfo *model.IndexInfo
hook := &ddl.TestDDLCallback{}
hook.OnJobUpdatedExported, c3IdxInfo, checkErr = backgroundExecOnJobUpdatedExported(c, s.store, s.ctx, hook)
originHook := s.dom.DDL().GetHook()
defer s.dom.DDL().(ddl.DDLForTest).SetHook(originHook)
s.dom.DDL().(ddl.DDLForTest).SetHook(hook)
done := make(chan error, 1)
go backgroundExec(s.store, "create index c3_index on t1 (c3)", done)
Expand Down Expand Up @@ -1064,8 +1066,6 @@ LOOP:
checkDelRangeDone(c, s.ctx, idx)

tk.MustExec("drop table t1")
callback := &ddl.TestDDLCallback{}
s.dom.DDL().(ddl.DDLForTest).SetHook(callback)
}

func backgroundExecOnJobUpdatedExported(c *C, store kv.Storage, ctx sessionctx.Context, hook *ddl.TestDDLCallback) (func(*model.Job), *model.IndexInfo, error) {
Expand Down
98 changes: 91 additions & 7 deletions ddl/db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,6 @@ func (s *testDBSuite) TestCancelDropIndex(c *C) {
for i := 0; i < 5; i++ {
s.mustExec(c, "insert into t values (?, ?)", i, i)
}

testCases := []struct {
needAddIndex bool
jobState model.JobState
Expand All @@ -410,7 +409,6 @@ func (s *testDBSuite) TestCancelDropIndex(c *C) {
{false, model.JobStateRunning, model.StateDeleteOnly, false},
{true, model.JobStateRunning, model.StateDeleteReorganization, false},
}

var checkErr error
hook := &ddl.TestDDLCallback{}
var jobID int64
Expand All @@ -436,12 +434,10 @@ func (s *testDBSuite) TestCancelDropIndex(c *C) {
checkErr = errors.Trace(err)
return
}

if errs[0] != nil {
checkErr = errors.Trace(errs[0])
return
}

checkErr = txn.Commit(context.Background())
}
}
Expand All @@ -456,22 +452,19 @@ func (s *testDBSuite) TestCancelDropIndex(c *C) {
if rs != nil {
rs.Close()
}

t := s.testGetTable(c, "t")
indexInfo := schemautil.FindIndexByName("idx_c2", t.Meta().Indices)
if testCase.cancelSucc {
c.Assert(checkErr, IsNil)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:12]cancelled DDL job")

c.Assert(indexInfo, NotNil)
c.Assert(indexInfo.State, Equals, model.StatePublic)
} else {
err1 := admin.ErrCannotCancelDDLJob.GenWithStackByArgs(jobID)
c.Assert(err, IsNil)
c.Assert(checkErr, NotNil)
c.Assert(checkErr.Error(), Equals, err1.Error())

c.Assert(indexInfo, IsNil)
}
}
Expand All @@ -480,6 +473,97 @@ func (s *testDBSuite) TestCancelDropIndex(c *C) {
s.mustExec(c, "alter table t drop index idx_c2")
}

// TestCancelDropTable tests cancel ddl job which type is drop table.
func (s *testDBSuite) TestCancelDropTableAndSchema(c *C) {
s.tk = testkit.NewTestKit(c, s.store)
testCases := []struct {
needAddTableOrDB bool
action model.ActionType
jobState model.JobState
JobSchemaState model.SchemaState
cancelSucc bool
}{
// Check drop table.
// model.JobStateNone means the jobs is canceled before the first run.
{true, model.ActionDropTable, model.JobStateNone, model.StateNone, true},
{false, model.ActionDropTable, model.JobStateRunning, model.StateWriteOnly, false},
{true, model.ActionDropTable, model.JobStateRunning, model.StateDeleteOnly, false},

// Check drop database.
{true, model.ActionDropSchema, model.JobStateNone, model.StateNone, true},
{false, model.ActionDropSchema, model.JobStateRunning, model.StateWriteOnly, false},
{true, model.ActionDropSchema, model.JobStateRunning, model.StateDeleteOnly, false},
}
var checkErr error
oldReorgWaitTimeout := ddl.ReorgWaitTimeout
ddl.ReorgWaitTimeout = 50 * time.Millisecond
defer func() { ddl.ReorgWaitTimeout = oldReorgWaitTimeout }()
hook := &ddl.TestDDLCallback{}
var jobID int64
testCase := &testCases[0]
hook.OnJobRunBeforeExported = func(job *model.Job) {
if job.Type == testCase.action && job.State == testCase.jobState && job.SchemaState == testCase.JobSchemaState {
jobIDs := []int64{job.ID}
jobID = job.ID
hookCtx := mock.NewContext()
hookCtx.Store = s.store
err := hookCtx.NewTxn(context.TODO())
if err != nil {
checkErr = errors.Trace(err)
return
}
txn, err := hookCtx.Txn(true)
if err != nil {
checkErr = errors.Trace(err)
return
}
errs, err := admin.CancelJobs(txn, jobIDs)
if err != nil {
checkErr = errors.Trace(err)
return
}
if errs[0] != nil {
checkErr = errors.Trace(errs[0])
return
}
checkErr = txn.Commit(context.Background())
}
}
originHook := s.dom.DDL().GetHook()
defer s.dom.DDL().(ddl.DDLForTest).SetHook(originHook)
s.dom.DDL().(ddl.DDLForTest).SetHook(hook)
var err error
sql := ""
for i := range testCases {
testCase = &testCases[i]
if testCase.needAddTableOrDB {
s.mustExec(c, "create database if not exists test_drop_db")
s.mustExec(c, "use test_drop_db")
s.mustExec(c, "create table if not exists t(c1 int, c2 int)")
}

if testCase.action == model.ActionDropTable {
sql = "drop table t;"
} else if testCase.action == model.ActionDropSchema {
sql = "drop database test_drop_db;"
}

_, err = s.tk.Exec(sql)
if testCase.cancelSucc {
c.Assert(checkErr, IsNil)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:12]cancelled DDL job")
s.mustExec(c, "insert into t values (?, ?)", i, i)
} else {
c.Assert(err, IsNil)
c.Assert(checkErr, NotNil)
c.Assert(checkErr.Error(), Equals, admin.ErrCannotCancelDDLJob.GenWithStackByArgs(jobID).Error())
_, err = s.tk.Exec("insert into t values (?, ?)", i, i)
c.Assert(err, NotNil)
}
}
}

func (s *testDBSuite) TestAddAnonymousIndex(c *C) {
s.tk = testkit.NewTestKit(c, s.store)
s.tk.MustExec("use " + s.schemaName)
Expand Down
4 changes: 0 additions & 4 deletions ddl/ddl_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -989,10 +989,6 @@ func (d *ddl) CreateTable(ctx sessionctx.Context, s *ast.CreateTableStmt) (err e
if err != nil {
return errors.Trace(err)
}
if len(tbInfo.Charset) != 0 && strings.ToLower(tbInfo.Charset) != mysql.DefaultCharset {
ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf(`TiDB only supports the "utf8mb4" character set, so "%s" does not take effect.`, tbInfo.Charset))
}

err = d.doDDLJob(ctx, job)
if err == nil {
if tbInfo.AutoIncID > 1 {
Expand Down
25 changes: 21 additions & 4 deletions ddl/ddl_worker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ func checkCancelState(txn kv.Transaction, job *model.Job, test *testCancelJob) e
// If the action is adding index and the state is writing reorganization, it wants to test the case of cancelling the job when backfilling indexes.
// When the job satisfies this case of addIndexFirstReorg, the worker hasn't started to backfill indexes.
if test.cancelState == job.SchemaState && !addIndexFirstReorg {
if job.SchemaState == model.StateNone && job.State != model.JobStateDone {
if job.SchemaState == model.StateNone && job.State != model.JobStateDone && job.Type != model.ActionCreateTable && job.Type != model.ActionCreateSchema {
// If the schema state is none, we only test the job is finished.
} else {
errs, err := admin.CancelJobs(txn, test.jobIDs)
Expand Down Expand Up @@ -365,13 +365,15 @@ func buildCancelJobTests(firstID int64) []testCancelJob {

// Test cancel drop index job , see TestCancelDropIndex.

// TODO: add create table back after we fix the cancel bug.
//{act: model.ActionCreateTable, jobIDs: []int64{firstID + 9}, cancelRetErrs: noErrs, cancelState: model.StatePublic, ddlRetErr: err},

{act: model.ActionAddColumn, jobIDs: []int64{firstID + 5}, cancelRetErrs: noErrs, cancelState: model.StateDeleteOnly, ddlRetErr: err},
{act: model.ActionAddColumn, jobIDs: []int64{firstID + 6}, cancelRetErrs: noErrs, cancelState: model.StateWriteOnly, ddlRetErr: err},
{act: model.ActionAddColumn, jobIDs: []int64{firstID + 7}, cancelRetErrs: noErrs, cancelState: model.StateWriteReorganization, ddlRetErr: err},
{act: model.ActionAddColumn, jobIDs: []int64{firstID + 8}, cancelRetErrs: []error{admin.ErrCancelFinishedDDLJob.GenWithStackByArgs(firstID + 8)}, cancelState: model.StatePublic, ddlRetErr: err},

// Test create table, watch out, table id will alloc a globalID.
{act: model.ActionCreateTable, jobIDs: []int64{firstID + 10}, cancelRetErrs: noErrs, cancelState: model.StateNone, ddlRetErr: err},
// Test create database, watch out, database id will alloc a globalID.
{act: model.ActionCreateSchema, jobIDs: []int64{firstID + 12}, cancelRetErrs: noErrs, cancelState: model.StateNone, ddlRetErr: err},
}

return tests
Expand Down Expand Up @@ -524,6 +526,21 @@ func (s *testDDLSuite) TestCancelJob(c *C) {
testAddColumn(c, ctx, d, dbInfo, tblInfo, addColumnArgs)
c.Check(errors.ErrorStack(checkErr), Equals, "")
s.checkAddColumn(c, d, dbInfo.ID, tblInfo.ID, addingColName, true)

// for create table
tblInfo1 := testTableInfo(c, d, "t1", 2)
test = &tests[8]
doDDLJobErrWithSchemaState(ctx, d, c, dbInfo.ID, tblInfo1.ID, model.ActionCreateTable, []interface{}{tblInfo1}, &cancelState)
c.Check(checkErr, IsNil)
testCheckTableState(c, d, dbInfo, tblInfo1, model.StateNone)

// for create database
dbInfo1 := testSchemaInfo(c, d, "test_cancel_job1")
test = &tests[9]
doDDLJobErrWithSchemaState(ctx, d, c, dbInfo1.ID, 0, model.ActionCreateSchema, []interface{}{dbInfo1}, &cancelState)
c.Check(checkErr, IsNil)
testCheckSchemaState(c, d, dbInfo1, model.StateNone)

}

func (s *testDDLSuite) TestIgnorableSpec(c *C) {
Expand Down
2 changes: 2 additions & 0 deletions ddl/rollingback.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,8 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job)
ver, err = rollingbackAddindex(w, d, t, job)
case model.ActionDropIndex:
ver, err = rollingbackDropIndex(t, job)
case model.ActionDropTable, model.ActionDropSchema:
job.State = model.JobStateRollingback
default:
job.State = model.JobStateCancelled
err = errCancelledDDLJob
Expand Down
11 changes: 10 additions & 1 deletion ddl/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,11 +80,20 @@ func onDropSchema(t *meta.Meta, job *model.Job) (ver int64, _ error) {
return ver, infoschema.ErrDatabaseDropExists.GenWithStackByArgs("")
}

if job.IsRollingback() {
// To simplify the rollback logic, cannot be canceled after job start to run.
// Normally won't fetch here, because there is check when cancel ddl jobs. see function: isJobRollbackable.
if dbInfo.State == model.StatePublic {
job.State = model.JobStateCancelled
return ver, errCancelledDDLJob
}
job.State = model.JobStateRunning
}

ver, err = updateSchemaVersion(t, job)
if err != nil {
return ver, errors.Trace(err)
}

switch dbInfo.State {
case model.StatePublic:
// public -> write only
Expand Down
Loading

0 comments on commit 70099c9

Please sign in to comment.