diff --git a/cmd/explaintest/r/explain_complex.result b/cmd/explaintest/r/explain_complex.result index 6f273706d1e94..eeb6051e261e8 100644 --- a/cmd/explaintest/r/explain_complex.result +++ b/cmd/explaintest/r/explain_complex.result @@ -126,7 +126,7 @@ Projection_13 1.00 root test.gad.id, test.dd.id, test.gad.aid, test.gad.cm, test └─IndexLookUp_33 3.33 root ├─IndexScan_30 3333.33 cop table:gad, index:t, range:(1478143908,+inf], keep order:false, stats:pseudo └─Selection_32 3.33 cop eq(test.gad.pt, "android"), not(isnull(test.gad.ip)) - └─TableScan_31 3333.33 cop table:st, keep order:false, stats:pseudo + └─TableScan_31 3333.33 cop table:gad, keep order:false, stats:pseudo explain select gad.id as gid,sdk.id as sid,gad.aid as aid,gad.cm as cm,sdk.dic as dic,sdk.ip as ip, sdk.t as t, gad.p1 as p1, gad.p2 as p2, gad.p3 as p3, gad.p4 as p4, gad.p5 as p5, gad.p6_md5 as p6, gad.p7_md5 as p7, gad.ext as ext from st gad join dd sdk on gad.aid = sdk.aid and gad.dic = sdk.mac and gad.t < sdk.t where gad.t > 1477971479 and gad.bm = 0 and gad.pt = 'ios' and gad.dit = 'mac' and sdk.t > 1477971479 and sdk.bm = 0 and sdk.pt = 'ios' limit 3000; id count task operator info Projection_10 0.00 root test.gad.id, test.sdk.id, test.gad.aid, test.gad.cm, test.sdk.dic, test.sdk.ip, test.sdk.t, test.gad.p1, test.gad.p2, test.gad.p3, test.gad.p4, test.gad.p5, test.gad.p6_md5, test.gad.p7_md5, test.gad.ext @@ -135,11 +135,11 @@ Projection_10 0.00 root test.gad.id, test.sdk.id, test.gad.aid, test.gad.cm, tes ├─IndexLookUp_27 0.00 root │ ├─IndexScan_24 3333.33 cop table:gad, index:t, range:(1477971479,+inf], keep order:false, stats:pseudo │ └─Selection_26 0.00 cop eq(test.gad.bm, 0), eq(test.gad.dit, "mac"), eq(test.gad.pt, "ios"), not(isnull(test.gad.dic)) - │ └─TableScan_25 3333.33 cop table:st, keep order:false, stats:pseudo + │ └─TableScan_25 3333.33 cop table:gad, keep order:false, stats:pseudo └─IndexLookUp_17 0.00 root ├─IndexScan_14 10.00 cop table:sdk, index:aid, dic, range: decided by [eq(test.sdk.aid, test.gad.aid)], keep order:false, stats:pseudo └─Selection_16 0.00 cop eq(test.sdk.bm, 0), eq(test.sdk.pt, "ios"), gt(test.sdk.t, 1477971479), not(isnull(test.sdk.mac)), not(isnull(test.sdk.t)) - └─TableScan_15 10.00 cop table:dd, keep order:false, stats:pseudo + └─TableScan_15 10.00 cop table:sdk, keep order:false, stats:pseudo explain SELECT cm, p1, p2, p3, p4, p5, p6_md5, p7_md5, count(1) as click_pv, count(DISTINCT ip) as click_ip FROM st WHERE (t between 1478188800 and 1478275200) and aid='cn.sbkcq' and pt='android' GROUP BY cm, p1, p2, p3, p4, p5, p6_md5, p7_md5; id count task operator info Projection_5 1.00 root test.st.cm, test.st.p1, test.st.p2, test.st.p3, test.st.p4, test.st.p5, test.st.p6_md5, test.st.p7_md5, 3_col_0, 3_col_1 @@ -250,12 +250,12 @@ Sort_10 1.00 root test.d.left_value:asc │ ├─IndexLookUp_45 0.01 root │ │ ├─IndexScan_42 10.00 cop table:d, index:ctx, range:[1,1], keep order:false, stats:pseudo │ │ └─Selection_44 0.01 cop eq(test.d.status, 1000) - │ │ └─TableScan_43 10.00 cop table:org_department, keep order:false, stats:pseudo + │ │ └─TableScan_43 10.00 cop table:d, keep order:false, stats:pseudo │ └─IndexLookUp_28 0.01 root │ ├─Selection_26 9.99 cop not(isnull(test.p.department_id)) │ │ └─IndexScan_24 10.00 cop table:p, index:department_id, range: decided by [eq(test.p.department_id, test.d.id)], keep order:false, stats:pseudo │ └─Selection_27 0.01 cop eq(test.p.status, 1000) - │ └─TableScan_25 9.99 cop table:org_position, keep order:false, stats:pseudo + │ └─TableScan_25 9.99 cop table:p, keep order:false, stats:pseudo └─TableReader_55 9.99 root data:Selection_54 └─Selection_54 9.99 cop eq(test.ep.status, 1000), not(isnull(test.ep.position_id)) └─TableScan_53 10000.00 cop table:ep, range:[-inf,+inf], keep order:false, stats:pseudo diff --git a/cmd/explaintest/r/explain_complex_stats.result b/cmd/explaintest/r/explain_complex_stats.result index 419d1ba3834d8..2217d2bed2adb 100644 --- a/cmd/explaintest/r/explain_complex_stats.result +++ b/cmd/explaintest/r/explain_complex_stats.result @@ -147,7 +147,7 @@ Projection_10 170.34 root test.gad.id, test.sdk.id, test.gad.aid, test.gad.cm, t └─IndexLookUp_17 0.25 root ├─IndexScan_14 1.00 cop table:sdk, index:aid, dic, range: decided by [eq(test.sdk.aid, test.gad.aid)], keep order:false └─Selection_16 0.25 cop eq(test.sdk.bm, 0), eq(test.sdk.pt, "ios"), gt(test.sdk.t, 1477971479), not(isnull(test.sdk.mac)), not(isnull(test.sdk.t)) - └─TableScan_15 1.00 cop table:dd, keep order:false + └─TableScan_15 1.00 cop table:sdk, keep order:false explain SELECT cm, p1, p2, p3, p4, p5, p6_md5, p7_md5, count(1) as click_pv, count(DISTINCT ip) as click_ip FROM st WHERE (t between 1478188800 and 1478275200) and aid='cn.sbkcq' and pt='android' GROUP BY cm, p1, p2, p3, p4, p5, p6_md5, p7_md5; id count task operator info Projection_5 39.28 root test.st.cm, test.st.p1, test.st.p2, test.st.p3, test.st.p4, test.st.p5, test.st.p6_md5, test.st.p7_md5, 3_col_0, 3_col_1 diff --git a/cmd/explaintest/r/explain_easy.result b/cmd/explaintest/r/explain_easy.result index 7c54ef28088a3..f970810be57fa 100644 --- a/cmd/explaintest/r/explain_easy.result +++ b/cmd/explaintest/r/explain_easy.result @@ -345,7 +345,7 @@ Projection_11 10000.00 root 9_aux_0 ├─IndexLookUp_29 9.99 root │ ├─IndexScan_26 10.00 cop table:s, index:b, range: decided by [eq(test.s.b, test.t.a)], keep order:false, stats:pseudo │ └─Selection_28 9.99 cop not(isnull(test.s.c)) - │ └─TableScan_27 10.00 cop table:t, keep order:false, stats:pseudo + │ └─TableScan_27 10.00 cop table:s, keep order:false, stats:pseudo └─TableReader_33 1.00 root data:TableScan_32 └─TableScan_32 1.00 cop table:t1, range: decided by [test.s.c], keep order:false, stats:pseudo insert into t values(1, 1, 1), (2, 2 ,2), (3, 3, 3), (4, 3, 4),(5,3,5); @@ -684,7 +684,7 @@ Projection_8 8320.83 root test.t.a, test.t1.a └─UnionScan_14 6656.67 root not(and(ge(test.t1.a, 1), le(test.t1.a, 2))), not(isnull(test.t1.a)) └─TableReader_17 6656.67 root data:Selection_16 └─Selection_16 6656.67 cop not(isnull(test.t1.a)), or(lt(test.t1.a, 1), gt(test.t1.a, 2)) - └─TableScan_15 10000.00 cop table:t, range:[-inf,+inf], keep order:false, stats:pseudo + └─TableScan_15 10000.00 cop table:t1, range:[-inf,+inf], keep order:false, stats:pseudo rollback; drop table if exists t; create table t(a time, b date); diff --git a/cmd/explaintest/r/topn_push_down.result b/cmd/explaintest/r/topn_push_down.result index 9deb403ecceca..4aba3b9758704 100644 --- a/cmd/explaintest/r/topn_push_down.result +++ b/cmd/explaintest/r/topn_push_down.result @@ -240,19 +240,19 @@ explain select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 on t1.a = t2.a limi id count task operator info Limit_11 5.00 root offset:0, count:5 └─MergeJoin_12 5.00 root inner join, left key:test.t1.a, right key:test.t2.a - ├─IndexReader_14 4.00 root index:IndexScan_13 - │ └─IndexScan_13 4.00 cop table:t1, index:a, range:[NULL,+inf], keep order:true, stats:pseudo - └─IndexReader_16 4.00 root index:IndexScan_15 - └─IndexScan_15 4.00 cop table:t2, index:a, range:[NULL,+inf], keep order:true, stats:pseudo + ├─IndexReader_15 4.00 root index:IndexScan_14 + │ └─IndexScan_14 4.00 cop table:t1, index:a, range:[NULL,+inf], keep order:true, stats:pseudo + └─IndexReader_17 4.00 root index:IndexScan_16 + └─IndexScan_16 4.00 cop table:t2, index:a, range:[NULL,+inf], keep order:true, stats:pseudo explain select /*+ TIDB_SMJ(t1, t2) */ * from t t1 left join t t2 on t1.a = t2.a where t2.a is null limit 5; id count task operator info Limit_12 5.00 root offset:0, count:5 └─Selection_13 5.00 root isnull(test.t2.a) └─MergeJoin_14 5.00 root left outer join, left key:test.t1.a, right key:test.t2.a - ├─IndexReader_16 4.00 root index:IndexScan_15 - │ └─IndexScan_15 4.00 cop table:t1, index:a, range:[NULL,+inf], keep order:true, stats:pseudo - └─IndexReader_18 4.00 root index:IndexScan_17 - └─IndexScan_17 4.00 cop table:t2, index:a, range:[NULL,+inf], keep order:true, stats:pseudo + ├─IndexReader_17 4.00 root index:IndexScan_16 + │ └─IndexScan_16 4.00 cop table:t1, index:a, range:[NULL,+inf], keep order:true, stats:pseudo + └─IndexReader_19 4.00 root index:IndexScan_18 + └─IndexScan_18 4.00 cop table:t2, index:a, range:[NULL,+inf], keep order:true, stats:pseudo explain select /*+ TIDB_HJ(t1, t2) */ * from t t1 join t t2 on t1.a = t2.a limit 5; id count task operator info Limit_11 5.00 root offset:0, count:5 diff --git a/cmd/explaintest/r/tpch.result b/cmd/explaintest/r/tpch.result index 434610bf692cf..34dc2a1920320 100644 --- a/cmd/explaintest/r/tpch.result +++ b/cmd/explaintest/r/tpch.result @@ -1241,11 +1241,11 @@ Projection_25 1.00 root tpch.supplier.s_name, 17_col_0 │ │ └─TableScan_59 1.00 cop table:orders, range: decided by [tpch.l1.l_orderkey], keep order:false │ └─IndexLookUp_55 1.00 root │ ├─IndexScan_53 1.00 cop table:l2, index:L_ORDERKEY, L_LINENUMBER, range: decided by [eq(tpch.l2.l_orderkey, tpch.l1.l_orderkey)], keep order:false - │ └─TableScan_54 1.00 cop table:lineitem, keep order:false + │ └─TableScan_54 1.00 cop table:l2, keep order:false └─IndexLookUp_39 0.80 root ├─IndexScan_36 1.00 cop table:l3, index:L_ORDERKEY, L_LINENUMBER, range: decided by [eq(tpch.l3.l_orderkey, tpch.l1.l_orderkey)], keep order:false └─Selection_38 0.80 cop gt(tpch.l3.l_receiptdate, tpch.l3.l_commitdate) - └─TableScan_37 1.00 cop table:lineitem, keep order:false + └─TableScan_37 1.00 cop table:l3, keep order:false /* Q22 Global Sales Opportunity Query The Global Sales Opportunity Query identifies geographies where there are customers who may be likely to make a diff --git a/executor/index_lookup_join_test.go b/executor/index_lookup_join_test.go index 8f281c8497961..098e0cfded8c4 100644 --- a/executor/index_lookup_join_test.go +++ b/executor/index_lookup_join_test.go @@ -131,17 +131,17 @@ func (s *testSuite1) TestInapplicableIndexJoinHint(c *C) { tk.MustExec(`create table t1(a bigint, b bigint);`) tk.MustExec(`create table t2(a bigint, b bigint);`) tk.MustQuery(`select /*+ TIDB_INLJ(t1, t2) */ * from t1, t2;`).Check(testkit.Rows()) - tk.MustQuery(`show warnings;`).Check(testkit.Rows(`Warning 1815 Optimizer Hint /*+ TIDB_INLJ(t1, t2) */ is inapplicable without column equal ON condition`)) + tk.MustQuery(`show warnings;`).Check(testkit.Rows(`Warning 1815 Optimizer Hint /*+ INL_JOIN(t1, t2) */ or /*+ TIDB_INLJ(t1, t2) */ is inapplicable without column equal ON condition`)) tk.MustQuery(`select /*+ TIDB_INLJ(t1, t2) */ * from t1 join t2 on t1.a=t2.a;`).Check(testkit.Rows()) - tk.MustQuery(`show warnings;`).Check(testkit.Rows(`Warning 1815 Optimizer Hint /*+ TIDB_INLJ(t1, t2) */ is inapplicable`)) + tk.MustQuery(`show warnings;`).Check(testkit.Rows(`Warning 1815 Optimizer Hint /*+ INL_JOIN(t1, t2) */ or /*+ TIDB_INLJ(t1, t2) */ is inapplicable`)) tk.MustExec(`drop table if exists t1, t2;`) tk.MustExec(`create table t1(a bigint, b bigint, index idx_a(a));`) tk.MustExec(`create table t2(a bigint, b bigint);`) tk.MustQuery(`select /*+ TIDB_INLJ(t1) */ * from t1 left join t2 on t1.a=t2.a;`).Check(testkit.Rows()) - tk.MustQuery(`show warnings;`).Check(testkit.Rows(`Warning 1815 Optimizer Hint /*+ TIDB_INLJ(t1) */ is inapplicable`)) + tk.MustQuery(`show warnings;`).Check(testkit.Rows(`Warning 1815 Optimizer Hint /*+ INL_JOIN(t1) */ or /*+ TIDB_INLJ(t1) */ is inapplicable`)) tk.MustQuery(`select /*+ TIDB_INLJ(t2) */ * from t1 right join t2 on t1.a=t2.a;`).Check(testkit.Rows()) - tk.MustQuery(`show warnings;`).Check(testkit.Rows(`Warning 1815 Optimizer Hint /*+ TIDB_INLJ(t2) */ is inapplicable`)) + tk.MustQuery(`show warnings;`).Check(testkit.Rows(`Warning 1815 Optimizer Hint /*+ INL_JOIN(t2) */ or /*+ TIDB_INLJ(t2) */ is inapplicable`)) } func (s *testSuite) TestIndexJoinOverflow(c *C) { diff --git a/executor/join_test.go b/executor/join_test.go index 6c2211538be29..68cf928cf6dcd 100644 --- a/executor/join_test.go +++ b/executor/join_test.go @@ -143,13 +143,13 @@ func (s *testSuite2) TestJoin(c *C) { tk.MustQuery("select /*+ TIDB_INLJ(t1) */ * from t right outer join t1 on t.a=t1.a").Check(testkit.Rows("1 1 1 2", "1 1 1 3", "1 1 1 4", "3 3 3 4", " 4 5")) tk.MustQuery("select /*+ TIDB_INLJ(t) */ avg(t.b) from t right outer join t1 on t.a=t1.a").Check(testkit.Rows("1.5000")) - // Test that two conflict hints will return error. - err := tk.ExecToErr("select /*+ TIDB_INLJ(t) TIDB_SMJ(t) */ * from t join t1 on t.a=t1.a") - c.Assert(err, NotNil) - err = tk.ExecToErr("select /*+ TIDB_INLJ(t) TIDB_HJ(t) */ from t join t1 on t.a=t1.a") - c.Assert(err, NotNil) - err = tk.ExecToErr("select /*+ TIDB_SMJ(t) TIDB_HJ(t) */ from t join t1 on t.a=t1.a") - c.Assert(err, NotNil) + // Test that two conflict hints will return warning. + tk.MustExec("select /*+ TIDB_INLJ(t) TIDB_SMJ(t) */ * from t join t1 on t.a=t1.a") + c.Assert(tk.Se.GetSessionVars().StmtCtx.GetWarnings(), HasLen, 1) + tk.MustExec("select /*+ TIDB_INLJ(t) TIDB_HJ(t) */ * from t join t1 on t.a=t1.a") + c.Assert(tk.Se.GetSessionVars().StmtCtx.GetWarnings(), HasLen, 1) + tk.MustExec("select /*+ TIDB_SMJ(t) TIDB_HJ(t) */ * from t join t1 on t.a=t1.a") + c.Assert(tk.Se.GetSessionVars().StmtCtx.GetWarnings(), HasLen, 1) tk.MustExec("drop table if exists t") tk.MustExec("create table t(a int)") @@ -888,11 +888,11 @@ func (s *testSuite2) TestMergejoinOrder(c *C) { tk.MustExec("insert into t2 select a*100, b*100 from t1;") tk.MustQuery("explain select /*+ TIDB_SMJ(t2) */ * from t1 left outer join t2 on t1.a=t2.a and t1.a!=3 order by t1.a;").Check(testkit.Rows( - "MergeJoin_15 10000.00 root left outer join, left key:test.t1.a, right key:test.t2.a, left cond:[ne(test.t1.a, 3)]", - "├─TableReader_11 10000.00 root data:TableScan_10", - "│ └─TableScan_10 10000.00 cop table:t1, range:[-inf,+inf], keep order:true, stats:pseudo", - "└─TableReader_13 6666.67 root data:TableScan_12", - " └─TableScan_12 6666.67 cop table:t2, range:[-inf,3), (3,+inf], keep order:true, stats:pseudo", + "MergeJoin_20 10000.00 root left outer join, left key:test.t1.a, right key:test.t2.a, left cond:[ne(test.t1.a, 3)]", + "├─TableReader_12 10000.00 root data:TableScan_11", + "│ └─TableScan_11 10000.00 cop table:t1, range:[-inf,+inf], keep order:true, stats:pseudo", + "└─TableReader_14 6666.67 root data:TableScan_13", + " └─TableScan_13 6666.67 cop table:t2, range:[-inf,3), (3,+inf], keep order:true, stats:pseudo", )) tk.MustExec("set @@tidb_init_chunk_size=1") diff --git a/executor/show.go b/executor/show.go index b96864c9811eb..e922fa9750f2c 100644 --- a/executor/show.go +++ b/executor/show.go @@ -383,7 +383,7 @@ func (e *ShowExec) fetchShowColumns(ctx context.Context) error { if tb.Meta().IsView() { // Because view's undertable's column could change or recreate, so view's column type may change overtime. // To avoid this situation we need to generate a logical plan and extract current column types from Schema. - planBuilder := plannercore.NewPlanBuilder(e.ctx, e.is) + planBuilder := plannercore.NewPlanBuilder(e.ctx, e.is, &plannercore.BlockHintProcessor{}) viewLogicalPlan, err := planBuilder.BuildDataSourceFromView(ctx, e.DBName, tb.Meta()) if err != nil { return err diff --git a/go.mod b/go.mod index 14a28dfa99a0e..3f05885591ebd 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e github.com/pingcap/kvproto v0.0.0-20191106014506-c5d88d699a8d github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd - github.com/pingcap/parser v0.0.0-20191120072812-9dc33a611210 + github.com/pingcap/parser v0.0.0-20191121045207-8b5639e42f59 github.com/pingcap/pd v1.1.0-beta.0.20190912093418-dc03c839debd github.com/pingcap/tidb-tools v3.0.6-0.20191119150227-ff0a3c6e5763+incompatible github.com/pingcap/tipb v0.0.0-20191120045257-1b9900292ab6 diff --git a/go.sum b/go.sum index 88b1617d02a93..f7dff2f684b9c 100644 --- a/go.sum +++ b/go.sum @@ -153,8 +153,8 @@ github.com/pingcap/kvproto v0.0.0-20191106014506-c5d88d699a8d h1:zTHgLr8+0LTEJmj github.com/pingcap/kvproto v0.0.0-20191106014506-c5d88d699a8d/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd h1:hWDol43WY5PGhsh3+8794bFHY1bPrmu6bTalpssCrGg= github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= -github.com/pingcap/parser v0.0.0-20191120072812-9dc33a611210 h1:RtNufGeP4yfSgjN0e9TSiNwq1eI4f5YKwNVNqX2OIXM= -github.com/pingcap/parser v0.0.0-20191120072812-9dc33a611210/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/parser v0.0.0-20191121045207-8b5639e42f59 h1:D422KNsb0XuoDX0XZtmO0FYJ/KT4opcXHEc44hgYNQo= +github.com/pingcap/parser v0.0.0-20191121045207-8b5639e42f59/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/pd v1.1.0-beta.0.20190912093418-dc03c839debd h1:bKj6hodu/ro78B0oN2yicdGn0t4yd9XjnyoW95qmWic= github.com/pingcap/pd v1.1.0-beta.0.20190912093418-dc03c839debd/go.mod h1:I7TEby5BHTYIxgHszfsOJSBsk8b2Qt8QrSIgdv5n5QQ= github.com/pingcap/tidb-tools v3.0.6-0.20191119150227-ff0a3c6e5763+incompatible h1:I8HirWsu1MZp6t9G/g8yKCEjJJxtHooKakEgccvdJ4M= diff --git a/planner/core/cbo_test.go b/planner/core/cbo_test.go index fff0ef7c17037..3bc9622e56e74 100644 --- a/planner/core/cbo_test.go +++ b/planner/core/cbo_test.go @@ -724,7 +724,7 @@ func (s *testAnalyzeSuite) TestCorrelatedEstimation(c *C) { " └─IndexLookUp_21 0.10 root ", " ├─IndexScan_18 1.00 cop table:t1, index:c, range: decided by [eq(test.t1.c, test.t.c)], keep order:false", " └─Selection_20 0.10 cop eq(test.t1.a, test.t.a)", - " └─TableScan_19 1.00 cop table:t, keep order:false", + " └─TableScan_19 1.00 cop table:t1, keep order:false", )) } diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 25e6de5922a76..cdb887aa8fa0d 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -118,7 +118,7 @@ func (p *PhysicalMergeJoin) tryToGetChildReqProp(prop *property.PhysicalProperty } func (p *LogicalJoin) getMergeJoin(prop *property.PhysicalProperty) []PhysicalPlan { - joins := make([]PhysicalPlan, 0, len(p.leftProperties)) + joins := make([]PhysicalPlan, 0, len(p.leftProperties)+1) // The leftProperties caches all the possible properties that are provided by its children. for _, lhsChildProperty := range p.leftProperties { offsets := getMaxSortPrefix(lhsChildProperty, p.LeftJoinKeys) @@ -159,10 +159,10 @@ func (p *LogicalJoin) getMergeJoin(prop *property.PhysicalProperty) []PhysicalPl joins = append(joins, mergeJoin) } } - // If TiDB_SMJ hint is existed && no join keys in children property, - // it should to enforce merge join. - if len(joins) == 0 && (p.preferJoinType&preferMergeJoin) > 0 { - return p.getEnforcedMergeJoin(prop) + // If TiDB_SMJ hint is existed, it should consider enforce merge join, + // because we can't trust lhsChildProperty completely. + if (p.preferJoinType & preferMergeJoin) > 0 { + joins = append(joins, p.getEnforcedMergeJoin(prop)...) } return joins @@ -569,6 +569,7 @@ func (p *LogicalJoin) constructInnerIndexScan(ds *DataSource, idx *model.IndexIn ts := PhysicalTableScan{ Columns: ds.Columns, Table: is.Table, + TableAsName: ds.TableAsName, isPartition: ds.isPartition, physicalTableID: ds.physicalTableID, }.Init(ds.ctx) @@ -971,9 +972,11 @@ func (p *LogicalJoin) tryToGetIndexJoin(prop *property.PhysicalProperty) (indexJ defer func() { if !forced && hasIndexJoinHint { // Construct warning message prefix. - errMsg := "Optimizer Hint TIDB_INLJ is inapplicable" + errMsg := "Optimizer Hint INL_JOIN or TIDB_INLJ is inapplicable" if p.hintInfo != nil { - errMsg = fmt.Sprintf("Optimizer Hint %s is inapplicable", restore2JoinHint(TiDBIndexNestedLoopJoin, p.hintInfo.indexNestedLoopJoinTables)) + errMsg = fmt.Sprintf("Optimizer Hint %s or %s is inapplicable", + restore2JoinHint(HintINLJ, p.hintInfo.indexNestedLoopJoinTables), + restore2JoinHint(TiDBIndexNestedLoopJoin, p.hintInfo.indexNestedLoopJoinTables)) } // Append inapplicable reason. @@ -1178,11 +1181,50 @@ func (p *baseLogicalPlan) exhaustPhysicalPlans(_ *property.PhysicalProperty) []P panic("baseLogicalPlan.exhaustPhysicalPlans() should never be called.") } +func (la *LogicalAggregation) canPushToCop() bool { + // At present, only Aggregation, Limit, TopN can be pushed to cop task, and Projection will be supported in the future. + // When we push task to coprocessor, finishCopTask will close the cop task and create a root task in the current implementation. + // Thus, we can't push two different tasks to coprocessor now, and can only push task to coprocessor when the child is Datasource. + + // TODO: develop this function after supporting push several tasks to coprecessor and supporting Projection to coprocessor. + _, ok := la.children[0].(*DataSource) + return ok +} + +func (la *LogicalAggregation) getEnforcedStreamAggs(prop *property.PhysicalProperty) []PhysicalPlan { + _, desc := prop.AllSameOrder() + enforcedAggs := make([]PhysicalPlan, 0, len(wholeTaskTypes)) + childProp := &property.PhysicalProperty{ + ExpectedCnt: math.Max(prop.ExpectedCnt*la.inputCount/la.stats.RowCount, prop.ExpectedCnt), + Enforced: true, + Items: property.ItemsFromCols(la.groupByCols, desc), + } + + taskTypes := []property.TaskType{property.CopSingleReadTaskType, property.CopDoubleReadTaskType} + if !la.aggHints.preferAggToCop { + taskTypes = append(taskTypes, property.RootTaskType) + } + for _, taskTp := range taskTypes { + copiedChildProperty := new(property.PhysicalProperty) + *copiedChildProperty = *childProp // It's ok to not deep copy the "cols" field. + copiedChildProperty.TaskTp = taskTp + + agg := basePhysicalAgg{ + GroupByItems: la.GroupByItems, + AggFuncs: la.AggFuncs, + }.initForStream(la.ctx, la.stats.ScaleByExpectCnt(prop.ExpectedCnt), copiedChildProperty) + agg.SetSchema(la.schema.Clone()) + enforcedAggs = append(enforcedAggs, agg) + } + return enforcedAggs +} + func (la *LogicalAggregation) getStreamAggs(prop *property.PhysicalProperty) []PhysicalPlan { all, desc := prop.AllSameOrder() - if len(la.possibleProperties) == 0 || !all { + if !all { return nil } + for _, aggFunc := range la.AggFuncs { if aggFunc.Mode == aggregation.FinalMode { return nil @@ -1193,7 +1235,7 @@ func (la *LogicalAggregation) getStreamAggs(prop *property.PhysicalProperty) []P return nil } - streamAggs := make([]PhysicalPlan, 0, len(la.possibleProperties)*(len(wholeTaskTypes)-1)) + streamAggs := make([]PhysicalPlan, 0, len(la.possibleProperties)*(len(wholeTaskTypes)-1)+len(wholeTaskTypes)) childProp := &property.PhysicalProperty{ ExpectedCnt: math.Max(prop.ExpectedCnt*la.inputCount/la.stats.RowCount, prop.ExpectedCnt), } @@ -1211,7 +1253,11 @@ func (la *LogicalAggregation) getStreamAggs(prop *property.PhysicalProperty) []P // The table read of "CopDoubleReadTaskType" can't promises the sort // property that the stream aggregation required, no need to consider. - for _, taskTp := range []property.TaskType{property.CopSingleReadTaskType, property.RootTaskType} { + taskTypes := []property.TaskType{property.CopSingleReadTaskType} + if !la.aggHints.preferAggToCop { + taskTypes = append(taskTypes, property.RootTaskType) + } + for _, taskTp := range taskTypes { copiedChildProperty := new(property.PhysicalProperty) *copiedChildProperty = *childProp // It's ok to not deep copy the "cols" field. copiedChildProperty.TaskTp = taskTp @@ -1224,6 +1270,11 @@ func (la *LogicalAggregation) getStreamAggs(prop *property.PhysicalProperty) []P streamAggs = append(streamAggs, agg) } } + // If STREAM_AGG hint is existed, it should consider enforce stream aggregation, + // because we can't trust possibleChildProperty completely. + if (la.aggHints.preferAggType & preferStreamAgg) > 0 { + streamAggs = append(streamAggs, la.getEnforcedStreamAggs(prop)...) + } return streamAggs } @@ -1232,7 +1283,11 @@ func (la *LogicalAggregation) getHashAggs(prop *property.PhysicalProperty) []Phy return nil } hashAggs := make([]PhysicalPlan, 0, len(wholeTaskTypes)) - for _, taskTp := range wholeTaskTypes { + taskTypes := []property.TaskType{property.CopSingleReadTaskType, property.CopDoubleReadTaskType} + if !la.aggHints.preferAggToCop { + taskTypes = append(taskTypes, property.RootTaskType) + } + for _, taskTp := range taskTypes { agg := basePhysicalAgg{ GroupByItems: la.GroupByItems, AggFuncs: la.AggFuncs, @@ -1244,9 +1299,44 @@ func (la *LogicalAggregation) getHashAggs(prop *property.PhysicalProperty) []Phy } func (la *LogicalAggregation) exhaustPhysicalPlans(prop *property.PhysicalProperty) []PhysicalPlan { - aggs := make([]PhysicalPlan, 0, len(la.possibleProperties)+1) - aggs = append(aggs, la.getHashAggs(prop)...) - aggs = append(aggs, la.getStreamAggs(prop)...) + if la.aggHints.preferAggToCop { + if !la.canPushToCop() { + errMsg := "Optimizer Hint AGG_TO_COP is inapplicable" + warning := ErrInternal.GenWithStack(errMsg) + la.ctx.GetSessionVars().StmtCtx.AppendWarning(warning) + la.aggHints.preferAggToCop = false + } + } + + preferHash := (la.aggHints.preferAggType & preferHashAgg) > 0 + preferStream := (la.aggHints.preferAggType & preferStreamAgg) > 0 + if preferHash && preferStream { + errMsg := "Optimizer aggregation hints are conflicted" + warning := ErrInternal.GenWithStack(errMsg) + la.ctx.GetSessionVars().StmtCtx.AppendWarning(warning) + la.aggHints.preferAggType = 0 + preferHash, preferStream = false, false + } + + hashAggs := la.getHashAggs(prop) + if hashAggs != nil && preferHash { + return hashAggs + } + + streamAggs := la.getStreamAggs(prop) + if streamAggs != nil && preferStream { + return streamAggs + } + + if streamAggs == nil && preferStream { + errMsg := "Optimizer Hint STREAM_AGG is inapplicable" + warning := ErrInternal.GenWithStack(errMsg) + la.ctx.GetSessionVars().StmtCtx.AppendWarning(warning) + } + + aggs := make([]PhysicalPlan, 0, len(hashAggs)+len(streamAggs)) + aggs = append(aggs, hashAggs...) + aggs = append(aggs, streamAggs...) return aggs } diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index 2f7cbea61fa24..1c13d6e348500 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -44,8 +44,9 @@ func evalAstExpr(sctx sessionctx.Context, expr ast.ExprNode) (types.Datum, error return val.Datum, nil } b := &PlanBuilder{ - ctx: sctx, - colMapper: make(map[*ast.ColumnNameExpr]int), + ctx: sctx, + colMapper: make(map[*ast.ColumnNameExpr]int), + hintProcessor: &BlockHintProcessor{}, } if sctx.GetSessionVars().TxnCtx.InfoSchema != nil { b.is = sctx.GetSessionVars().TxnCtx.InfoSchema.(infoschema.InfoSchema) @@ -441,6 +442,9 @@ func (er *expressionRewriter) handleCompareSubquery(ctx context.Context, v *ast. // it will be rewrote to t.id < (select max(s.id) from s). func (er *expressionRewriter) handleOtherComparableSubq(lexpr, rexpr expression.Expression, np LogicalPlan, useMin bool, cmpFunc string, all bool) { plan4Agg := LogicalAggregation{}.Init(er.sctx) + if hint := er.b.TableHints(); hint != nil { + plan4Agg.aggHints = hint.aggHints + } plan4Agg.SetChildren(np) // Create a "max" or "min" aggregation. @@ -567,6 +571,9 @@ func (er *expressionRewriter) handleNEAny(lexpr, rexpr expression.Expression, np plan4Agg := LogicalAggregation{ AggFuncs: []*aggregation.AggFuncDesc{firstRowFunc, countFunc}, }.Init(er.sctx) + if hint := er.b.TableHints(); hint != nil { + plan4Agg.aggHints = hint.aggHints + } plan4Agg.SetChildren(np) firstRowResultCol := &expression.Column{ ColName: model.NewCIStr("col_firstRow"), @@ -601,6 +608,9 @@ func (er *expressionRewriter) handleEQAll(lexpr, rexpr expression.Expression, np plan4Agg := LogicalAggregation{ AggFuncs: []*aggregation.AggFuncDesc{firstRowFunc, countFunc}, }.Init(er.sctx) + if hint := er.b.TableHints(); hint != nil { + plan4Agg.aggHints = hint.aggHints + } plan4Agg.SetChildren(np) firstRowResultCol := &expression.Column{ ColName: model.NewCIStr("col_firstRow"), @@ -758,10 +768,7 @@ func (er *expressionRewriter) handleInSubquery(ctx context.Context, v *ast.Patte join.attachOnConds(expression.SplitCNFItems(checkCondition)) // Set join hint for this join. if er.b.TableHints() != nil { - er.err = join.setPreferredJoinType(er.b.TableHints()) - if er.err != nil { - return v, true - } + join.setPreferredJoinType(er.b.TableHints()) } er.p = join } else { diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 0030510e1e31d..dff5343d88b8e 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -519,6 +519,7 @@ func (ds *DataSource) convertToIndexScan(prop *property.PhysicalProperty, candid ts := PhysicalTableScan{ Columns: ds.Columns, Table: is.Table, + TableAsName: ds.TableAsName, isPartition: ds.isPartition, physicalTableID: ds.physicalTableID, }.Init(ds.ctx) diff --git a/planner/core/hints.go b/planner/core/hints.go new file mode 100644 index 0000000000000..8b88473f78ec7 --- /dev/null +++ b/planner/core/hints.go @@ -0,0 +1,156 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "strconv" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/parser/ast" + "github.com/pingcap/parser/format" + "github.com/pingcap/parser/model" + "github.com/pingcap/tidb/sessionctx" +) + +// BlockHintProcessor processes hints at different level of sql statement. +type BlockHintProcessor struct { + QbNameMap map[string]int // Map from query block name to select stmt offset. + QbHints map[int][]*ast.TableOptimizerHint // Group all hints at same query block. + Ctx sessionctx.Context + selectStmtOffset int +} + +// Enter implements Visitor interface. +func (p *BlockHintProcessor) Enter(in ast.Node) (ast.Node, bool) { + switch node := in.(type) { + case *ast.UpdateStmt: + p.checkQueryBlockHints(node.TableHints, 0) + case *ast.DeleteStmt: + p.checkQueryBlockHints(node.TableHints, 0) + case *ast.SelectStmt: + p.selectStmtOffset++ + node.QueryBlockOffset = p.selectStmtOffset + p.checkQueryBlockHints(node.TableHints, node.QueryBlockOffset) + } + return in, false +} + +// Leave implements Visitor interface. +func (p *BlockHintProcessor) Leave(in ast.Node) (ast.Node, bool) { + return in, true +} + +const hintQBName = "qb_name" + +// checkQueryBlockHints checks the validity of query blocks and records the map of query block name to select offset. +func (p *BlockHintProcessor) checkQueryBlockHints(hints []*ast.TableOptimizerHint, offset int) { + var qbName string + for _, hint := range hints { + if hint.HintName.L != hintQBName { + continue + } + if qbName != "" { + p.Ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New(fmt.Sprintf("There are more than two query names in same query block,, using the first one %s", qbName))) + } else { + qbName = hint.QBName.L + } + } + if qbName == "" { + return + } + if p.QbNameMap == nil { + p.QbNameMap = make(map[string]int) + } + if _, ok := p.QbNameMap[qbName]; ok { + p.Ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New(fmt.Sprintf("Duplicate query block name %s, only the first one is effective", qbName))) + } else { + p.QbNameMap[qbName] = offset + } +} + +const ( + defaultUpdateBlockName = "upd_1" + defaultDeleteBlockName = "del_1" + defaultSelectBlockPrefix = "sel_" +) + +type nodeType int + +const ( + typeUpdate nodeType = iota + typeDelete + typeSelect +) + +// getBlockName finds the offset of query block name. It use 0 as offset for top level update or delete, +// -1 for invalid block name. +func (p *BlockHintProcessor) getBlockOffset(blockName model.CIStr, nodeType nodeType) int { + if p.QbNameMap != nil { + level, ok := p.QbNameMap[blockName.L] + if ok { + return level + } + } + // Handle the default query block name. + if nodeType == typeUpdate && blockName.L == defaultUpdateBlockName { + return 0 + } + if nodeType == typeDelete && blockName.L == defaultDeleteBlockName { + return 0 + } + if nodeType == typeSelect && strings.HasPrefix(blockName.L, defaultSelectBlockPrefix) { + suffix := blockName.L[len(defaultSelectBlockPrefix):] + level, err := strconv.ParseInt(suffix, 10, 64) + if err != nil || level > int64(p.selectStmtOffset) { + return -1 + } + return int(level) + } + return -1 +} + +// getHintOffset gets the offset of stmt that the hints take effects. +func (p *BlockHintProcessor) getHintOffset(hint *ast.TableOptimizerHint, nodeType nodeType, currentOffset int) int { + if hint.QBName.L != "" { + return p.getBlockOffset(hint.QBName, nodeType) + } + return currentOffset +} + +// getCurrentStmtHints extracts all hints that take effects at current stmt. +func (p *BlockHintProcessor) getCurrentStmtHints(hints []*ast.TableOptimizerHint, nodeType nodeType, currentOffset int) []*ast.TableOptimizerHint { + if p.QbHints == nil { + p.QbHints = make(map[int][]*ast.TableOptimizerHint) + } + for _, hint := range hints { + if hint.HintName.L == hintQBName { + continue + } + offset := p.getHintOffset(hint, nodeType, currentOffset) + if offset < 0 { + var sb strings.Builder + ctx := format.NewRestoreCtx(format.DefaultRestoreFlags, &sb) + err := hint.Restore(ctx) + // There won't be any error for optimizer hint. + if err == nil { + p.Ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New(fmt.Sprintf("Hint %s is ignored due to unknown query block name", sb.String()))) + } + continue + } + p.QbHints[offset] = append(p.QbHints[offset], hint) + } + return p.QbHints[currentOffset] +} diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index b19fdbb1e1b1e..9ab5155031881 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -51,10 +51,26 @@ import ( const ( // TiDBMergeJoin is hint enforce merge join. TiDBMergeJoin = "tidb_smj" + // HintSMJ is hint enforce merge join. + HintSMJ = "sm_join" // TiDBIndexNestedLoopJoin is hint enforce index nested loop join. TiDBIndexNestedLoopJoin = "tidb_inlj" + // HintINLJ is hint enforce index nested loop join. + HintINLJ = "inl_join" // TiDBHashJoin is hint enforce hash join. TiDBHashJoin = "tidb_hj" + // HintHJ is hint enforce hash join. + HintHJ = "hash_join" + // HintHashAgg is hint enforce hash aggregation. + HintHashAgg = "hash_agg" + // HintStreamAgg is hint enforce stream aggregation. + HintStreamAgg = "stream_agg" + // HintUseIndex is hint enforce using some indexes. + HintUseIndex = "use_index" + // HintIgnoreIndex is hint enforce ignoring some indexes. + HintIgnoreIndex = "ignore_index" + // HintAggToCop is hint enforce pushing aggregation to coprocessor. + HintAggToCop = "agg_to_cop" ) const ( @@ -86,6 +102,9 @@ func (b *PlanBuilder) buildAggregation(ctx context.Context, p LogicalPlan, aggFu b.optFlag = b.optFlag | flagEliminateProjection plan4Agg := LogicalAggregation{AggFuncs: make([]*aggregation.AggFuncDesc, 0, len(aggFuncList))}.Init(b.ctx) + if hint := b.TableHints(); hint != nil { + plan4Agg.aggHints = hint.aggHints + } schema4Agg := expression.NewSchema(make([]*expression.Column, 0, len(aggFuncList)+p.Schema().Len())...) // aggIdxMap maps the old index to new index after applying common aggregation functions elimination. aggIndexMap := make(map[int]int) @@ -152,7 +171,7 @@ func (b *PlanBuilder) buildResultSetNode(ctx context.Context, node ast.ResultSet case *ast.UnionStmt: p, err = b.buildUnion(ctx, v) case *ast.TableName: - p, err = b.buildDataSource(ctx, v) + p, err = b.buildDataSource(ctx, v, &x.AsName) default: err = ErrUnsupportedType.GenWithStackByArgs(v) } @@ -160,9 +179,6 @@ func (b *PlanBuilder) buildResultSetNode(ctx context.Context, node ast.ResultSet return nil, err } - if v, ok := p.(*DataSource); ok { - v.TableAsName = &x.AsName - } for _, col := range p.Schema().Columns { col.OrigTblName = col.TblName if x.AsName.L != "" { @@ -328,9 +344,9 @@ func extractTableAlias(p LogicalPlan) *model.CIStr { return nil } -func (p *LogicalJoin) setPreferredJoinType(hintInfo *tableHintInfo) error { +func (p *LogicalJoin) setPreferredJoinType(hintInfo *tableHintInfo) { if hintInfo == nil { - return nil + return } lhsAlias := extractTableAlias(p.children[0]) @@ -356,9 +372,10 @@ func (p *LogicalJoin) setPreferredJoinType(hintInfo *tableHintInfo) error { // If there're multiple join types and one of them is not index join hint, // then there is a conflict of join types. if bits.OnesCount(p.preferJoinType) > 1 && (p.preferJoinType^preferRightAsIndexInner^preferLeftAsIndexInner) > 0 { - return errors.New("Join hints are conflict, you can only specify one type of join") + errMsg := "Join hints are conflict, you can only specify one type of join" + warning := ErrInternal.GenWithStack(errMsg) + p.ctx.GetSessionVars().StmtCtx.AppendWarning(warning) } - return nil } func resetNotNullFlag(schema *expression.Schema, start, end int) { @@ -425,10 +442,7 @@ func (b *PlanBuilder) buildJoin(ctx context.Context, joinNode *ast.Join) (Logica joinPlan.redundantSchema = expression.MergeSchema(lRedundant, rRedundant) // Set preferred join algorithm if some join hints is specified by user. - err = joinPlan.setPreferredJoinType(b.TableHints()) - if err != nil { - return nil, err - } + joinPlan.setPreferredJoinType(b.TableHints()) // "NATURAL JOIN" doesn't have "ON" or "USING" conditions. // @@ -781,6 +795,9 @@ func (b *PlanBuilder) buildDistinct(child LogicalPlan, length int) (*LogicalAggr AggFuncs: make([]*aggregation.AggFuncDesc, 0, child.Schema().Len()), GroupByItems: expression.Column2Exprs(child.Schema().Clone().Columns[:length]), }.Init(b.ctx) + if hint := b.TableHints(); hint != nil { + plan4Agg.aggHints = hint.aggHints + } plan4Agg.collectGroupByColumns() for _, col := range child.Schema().Columns { aggDesc, err := aggregation.NewAggFuncDesc(b.ctx, ast.AggFuncFirstRow, []expression.Expression{col}, false) @@ -1941,25 +1958,60 @@ func (b *PlanBuilder) unfoldWildStar(p LogicalPlan, selectFields []*ast.SelectFi return resultList, nil } -func (b *PlanBuilder) pushTableHints(hints []*ast.TableOptimizerHint) bool { - var sortMergeTables, INLJTables, hashJoinTables []hintTableInfo +func (b *PlanBuilder) pushTableHints(hints []*ast.TableOptimizerHint, nodeType nodeType, currentLevel int) bool { + hints = b.hintProcessor.getCurrentStmtHints(hints, nodeType, currentLevel) + var ( + sortMergeTables, INLJTables, hashJoinTables []hintTableInfo + indexHintList []indexHintInfo + aggHints aggHintInfo + ) for _, hint := range hints { switch hint.HintName.L { - case TiDBMergeJoin: - sortMergeTables = tableNames2HintTableInfo(hint.Tables) - case TiDBIndexNestedLoopJoin: - INLJTables = tableNames2HintTableInfo(hint.Tables) - case TiDBHashJoin: - hashJoinTables = tableNames2HintTableInfo(hint.Tables) + case TiDBMergeJoin, HintSMJ: + sortMergeTables = append(sortMergeTables, tableNames2HintTableInfo(hint.Tables)...) + case TiDBIndexNestedLoopJoin, HintINLJ: + INLJTables = append(INLJTables, tableNames2HintTableInfo(hint.Tables)...) + case TiDBHashJoin, HintHJ: + hashJoinTables = append(hashJoinTables, tableNames2HintTableInfo(hint.Tables)...) + case HintHashAgg: + aggHints.preferAggType |= preferHashAgg + case HintStreamAgg: + aggHints.preferAggType |= preferStreamAgg + case HintAggToCop: + aggHints.preferAggToCop = true + case HintUseIndex: + if len(hint.Tables) != 0 { + indexHintList = append(indexHintList, indexHintInfo{ + tblName: hint.Tables[0].TableName, + indexHint: &ast.IndexHint{ + IndexNames: hint.Indexes, + HintType: ast.HintUse, + HintScope: ast.HintForScan, + }, + }) + } + case HintIgnoreIndex: + if len(hint.Tables) != 0 { + indexHintList = append(indexHintList, indexHintInfo{ + tblName: hint.Tables[0].TableName, + indexHint: &ast.IndexHint{ + IndexNames: hint.Indexes, + HintType: ast.HintIgnore, + HintScope: ast.HintForScan, + }, + }) + } default: // ignore hints that not implemented } } - if len(sortMergeTables)+len(INLJTables)+len(hashJoinTables) > 0 { + if len(sortMergeTables)+len(INLJTables)+len(hashJoinTables)+len(indexHintList) > 0 || aggHints.preferAggType != 0 || aggHints.preferAggToCop { b.tableHintInfo = append(b.tableHintInfo, tableHintInfo{ sortMergeJoinTables: sortMergeTables, indexNestedLoopJoinTables: INLJTables, hashJoinTables: hashJoinTables, + indexHintList: indexHintList, + aggHints: aggHints, }) return true } @@ -1968,19 +2020,19 @@ func (b *PlanBuilder) pushTableHints(hints []*ast.TableOptimizerHint) bool { func (b *PlanBuilder) popTableHints() { hintInfo := b.tableHintInfo[len(b.tableHintInfo)-1] - b.appendUnmatchedJoinHintWarning(TiDBIndexNestedLoopJoin, hintInfo.indexNestedLoopJoinTables) - b.appendUnmatchedJoinHintWarning(TiDBMergeJoin, hintInfo.sortMergeJoinTables) - b.appendUnmatchedJoinHintWarning(TiDBHashJoin, hintInfo.hashJoinTables) + b.appendUnmatchedJoinHintWarning(HintINLJ, TiDBIndexNestedLoopJoin, hintInfo.indexNestedLoopJoinTables) + b.appendUnmatchedJoinHintWarning(HintSMJ, TiDBMergeJoin, hintInfo.sortMergeJoinTables) + b.appendUnmatchedJoinHintWarning(HintHJ, TiDBHashJoin, hintInfo.hashJoinTables) b.tableHintInfo = b.tableHintInfo[:len(b.tableHintInfo)-1] } -func (b *PlanBuilder) appendUnmatchedJoinHintWarning(joinType string, hintTables []hintTableInfo) { +func (b *PlanBuilder) appendUnmatchedJoinHintWarning(joinType string, joinTypeAlias string, hintTables []hintTableInfo) { unMatchedTables := extractUnmatchedTables(hintTables) if len(unMatchedTables) == 0 { return } - errMsg := fmt.Sprintf("There are no matching table names for (%s) in optimizer hint %s. Maybe you can use the table alias name", - strings.Join(unMatchedTables, ", "), restore2JoinHint(joinType, hintTables)) + errMsg := fmt.Sprintf("There are no matching table names for (%s) in optimizer hint %s or %s. Maybe you can use the table alias name", + strings.Join(unMatchedTables, ", "), restore2JoinHint(joinType, hintTables), restore2JoinHint(joinTypeAlias, hintTables)) b.ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack(errMsg)) } @@ -1993,10 +2045,11 @@ func (b *PlanBuilder) TableHints() *tableHintInfo { } func (b *PlanBuilder) buildSelect(ctx context.Context, sel *ast.SelectStmt) (p LogicalPlan, err error) { - if b.pushTableHints(sel.TableHints) { + if b.pushTableHints(sel.TableHints, typeSelect, sel.QueryBlockOffset) { // table hints are only visible in the current SELECT statement. defer b.popTableHints() } + if sel.SelectStmtOpts != nil { origin := b.inStraightJoin b.inStraightJoin = sel.SelectStmtOpts.StraightJoin @@ -2211,7 +2264,7 @@ func getStatsTable(ctx sessionctx.Context, tblInfo *model.TableInfo, pid int64) return statsTbl } -func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName) (LogicalPlan, error) { +func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, asName *model.CIStr) (LogicalPlan, error) { dbName := tn.Schema if dbName.L == "" { dbName = model.NewCIStr(b.ctx.GetSessionVars().CurrentDB) @@ -2246,7 +2299,11 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName) (L return nil, ErrPartitionClauseOnNonpartitioned } - possiblePaths, err := getPossibleAccessPaths(tn.IndexHints, tableInfo) + tblName := *asName + if tblName.L == "" { + tblName = tn.Name + } + possiblePaths, err := b.getPossibleAccessPaths(tn.IndexHints, tableInfo, tblName) if err != nil { return nil, err } @@ -2271,6 +2328,7 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName) (L ds := DataSource{ DBName: dbName, + TableAsName: asName, table: tbl, tableInfo: tableInfo, statisticTable: statisticTable, @@ -2574,7 +2632,7 @@ func (b *PlanBuilder) buildSemiJoin(outerPlan, innerPlan LogicalPlan, onConditio } func (b *PlanBuilder) buildUpdate(ctx context.Context, update *ast.UpdateStmt) (Plan, error) { - if b.pushTableHints(update.TableHints) { + if b.pushTableHints(update.TableHints, typeUpdate, 0) { // table hints are only visible in the current UPDATE statement. defer b.popTableHints() } @@ -2757,7 +2815,7 @@ func (b *PlanBuilder) buildUpdateLists(ctx context.Context, tableList []*ast.Tab } func (b *PlanBuilder) buildDelete(ctx context.Context, delete *ast.DeleteStmt) (Plan, error) { - if b.pushTableHints(delete.TableHints) { + if b.pushTableHints(delete.TableHints, typeDelete, 0) { // table hints are only visible in the current DELETE statement. defer b.popTableHints() } diff --git a/planner/core/logical_plan_test.go b/planner/core/logical_plan_test.go index 897a104097c8f..a5f06c68d1204 100644 --- a/planner/core/logical_plan_test.go +++ b/planner/core/logical_plan_test.go @@ -1803,9 +1803,10 @@ func (s *testPlanSuite) TestVisitInfo(c *C) { c.Assert(err, IsNil, comment) Preprocess(s.ctx, stmt, s.is) builder := &PlanBuilder{ - colMapper: make(map[*ast.ColumnNameExpr]int), - ctx: MockContext(), - is: s.is, + colMapper: make(map[*ast.ColumnNameExpr]int), + ctx: MockContext(), + is: s.is, + hintProcessor: &BlockHintProcessor{}, } builder.ctx.GetSessionVars().HashJoinConcurrency = 1 _, err = builder.Build(context.TODO(), stmt) @@ -1922,9 +1923,10 @@ func (s *testPlanSuite) TestUnion(c *C) { c.Assert(err, IsNil, comment) Preprocess(s.ctx, stmt, s.is) builder := &PlanBuilder{ - ctx: MockContext(), - is: s.is, - colMapper: make(map[*ast.ColumnNameExpr]int), + ctx: MockContext(), + is: s.is, + colMapper: make(map[*ast.ColumnNameExpr]int), + hintProcessor: &BlockHintProcessor{}, } plan, err := builder.Build(ctx, stmt) if tt.err { @@ -2055,9 +2057,10 @@ func (s *testPlanSuite) TestTopNPushDown(c *C) { c.Assert(err, IsNil, comment) Preprocess(s.ctx, stmt, s.is) builder := &PlanBuilder{ - ctx: MockContext(), - is: s.is, - colMapper: make(map[*ast.ColumnNameExpr]int), + ctx: MockContext(), + is: s.is, + colMapper: make(map[*ast.ColumnNameExpr]int), + hintProcessor: &BlockHintProcessor{}, } p, err := builder.Build(ctx, stmt) c.Assert(err, IsNil) @@ -2182,9 +2185,10 @@ func (s *testPlanSuite) TestOuterJoinEliminator(c *C) { c.Assert(err, IsNil, comment) Preprocess(s.ctx, stmt, s.is) builder := &PlanBuilder{ - ctx: MockContext(), - is: s.is, - colMapper: make(map[*ast.ColumnNameExpr]int), + ctx: MockContext(), + is: s.is, + colMapper: make(map[*ast.ColumnNameExpr]int), + hintProcessor: &BlockHintProcessor{}, } p, err := builder.Build(ctx, stmt) c.Assert(err, IsNil) @@ -2214,9 +2218,10 @@ func (s *testPlanSuite) TestSelectView(c *C) { c.Assert(err, IsNil, comment) Preprocess(s.ctx, stmt, s.is) builder := &PlanBuilder{ - ctx: MockContext(), - is: s.is, - colMapper: make(map[*ast.ColumnNameExpr]int), + ctx: MockContext(), + is: s.is, + colMapper: make(map[*ast.ColumnNameExpr]int), + hintProcessor: &BlockHintProcessor{}, } p, err := builder.Build(ctx, stmt) c.Assert(err, IsNil) @@ -2541,7 +2546,7 @@ func (s *testPlanSuite) optimize(ctx context.Context, sql string) (PhysicalPlan, if err != nil { return nil, nil, err } - builder := NewPlanBuilder(MockContext(), s.is) + builder := NewPlanBuilder(MockContext(), s.is, &BlockHintProcessor{}) p, err := builder.Build(ctx, stmt) if err != nil { return nil, nil, err @@ -2624,9 +2629,10 @@ func (s *testPlanSuite) TestSkylinePruning(c *C) { c.Assert(err, IsNil, comment) Preprocess(s.ctx, stmt, s.is) builder := &PlanBuilder{ - ctx: MockContext(), - is: s.is, - colMapper: make(map[*ast.ColumnNameExpr]int), + ctx: MockContext(), + is: s.is, + colMapper: make(map[*ast.ColumnNameExpr]int), + hintProcessor: &BlockHintProcessor{}, } p, err := builder.Build(ctx, stmt) if err != nil { diff --git a/planner/core/logical_plans.go b/planner/core/logical_plans.go index b9da02491ee04..00d52d75db3cd 100644 --- a/planner/core/logical_plans.go +++ b/planner/core/logical_plans.go @@ -98,6 +98,8 @@ const ( preferRightAsIndexInner preferHashJoin preferMergeJoin + preferHashAgg + preferStreamAgg ) // LogicalJoin is the logical join plan. @@ -247,6 +249,9 @@ type LogicalAggregation struct { // groupByCols stores the columns that are group-by items. groupByCols []*expression.Column + // aggHints stores aggregation hint information. + aggHints aggHintInfo + possibleProperties [][]*expression.Column inputCount float64 // inputCount is the input count of this plan. } diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index 04ac09e2be074..a2e76fbe03ac5 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -76,9 +76,10 @@ func BuildLogicalPlan(ctx context.Context, sctx sessionctx.Context, node ast.Nod sctx.GetSessionVars().PlanID = 0 sctx.GetSessionVars().PlanColumnID = 0 builder := &PlanBuilder{ - ctx: sctx, - is: is, - colMapper: make(map[*ast.ColumnNameExpr]int), + ctx: sctx, + is: is, + colMapper: make(map[*ast.ColumnNameExpr]int), + hintProcessor: &BlockHintProcessor{}, } p, err := builder.Build(ctx, node) if err != nil { diff --git a/planner/core/physical_plan_test.go b/planner/core/physical_plan_test.go index fc508d11935fb..2ed72b2442761 100644 --- a/planner/core/physical_plan_test.go +++ b/planner/core/physical_plan_test.go @@ -613,7 +613,7 @@ func (s *testPlanSuite) TestUnmatchedTableInHint(c *C) { } } -func (s *testPlanSuite) TestIndexJoinHint(c *C) { +func (s *testPlanSuite) TestJoinHints(c *C) { defer testleak.AfterTest(c)() store, dom, err := newStoreWithBootstrap() c.Assert(err, IsNil) @@ -661,3 +661,351 @@ func (s *testPlanSuite) TestIndexJoinHint(c *C) { } } } + +func (s *testPlanSuite) TestAggregationHints(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + defer func() { + dom.Close() + store.Close() + }() + se, err := session.CreateSession4Test(store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test") + c.Assert(err, IsNil) + + tests := []struct { + sql string + best string + warning string + aggPushDown bool + }{ + // without Aggregation hints + { + sql: "select count(*) from t t1, t t2 where t1.a = t2.b", + best: "LeftHashJoin{IndexReader(Index(t.c_d_e)[[NULL,+inf]])->TableReader(Table(t))}(test.t1.a,test.t2.b)->StreamAgg", + }, + { + sql: "select count(t1.a) from t t1, t t2 where t1.a = t2.a*2 group by t1.a", + best: "LeftHashJoin{IndexReader(Index(t.c_d_e)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]])->Projection}(test.t1.a,mul(test.t2.a, 2))->HashAgg", + }, + // with Aggregation hints + { + sql: "select /*+ HASH_AGG(), USE_INDEX(t1), USE_INDEX(t2) */ count(*) from t t1, t t2 where t1.a = t2.b", + best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t1.a,test.t2.b)->HashAgg", + }, + { + sql: "select /*+ STREAM_AGG(), USE_INDEX(t1), USE_INDEX(t2) */ count(t1.a) from t t1, t t2 where t1.a = t2.a*2 group by t1.a", + best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))->Projection}(test.t1.a,mul(test.t2.a, 2))->Sort->StreamAgg", + }, + // test conflict warning + { + sql: "select /*+ HASH_AGG(), STREAM_AGG(), USE_INDEX(t1), USE_INDEX(t2) */ count(*) from t t1, t t2 where t1.a = t2.b", + best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t1.a,test.t2.b)->StreamAgg", + warning: "[planner:1815]Optimizer aggregation hints are conflicted", + }, + // additional test + { + sql: "select /*+ STREAM_AGG(), USE_INDEX(t) */ distinct a from t", + best: "TableReader(Table(t)->StreamAgg)->StreamAgg", + }, + { + sql: "select /*+ HASH_AGG(), USE_INDEX(t1) */ t1.a from t t1 where t1.a < any(select t2.b from t t2)", + best: "LeftHashJoin{TableReader(Table(t)->Sel([if(isnull(test.t1.a), , 1)]))->TableReader(Table(t)->HashAgg)->HashAgg->Sel([ne(agg_col_cnt, 0)])}->Projection->Projection", + }, + { + sql: "select /*+ hash_agg(), USE_INDEX(t1) */ t1.a from t t1 where t1.a != any(select t2.b from t t2)", + best: "LeftHashJoin{TableReader(Table(t)->Sel([if(isnull(test.t1.a), , 1)]))->TableReader(Table(t))->Projection->HashAgg->Sel([ne(agg_col_cnt, 0)])}->Projection->Projection", + }, + { + sql: "select /*+ hash_agg(), USE_INDEX(t1) */ t1.a from t t1 where t1.a = all(select t2.b from t t2)", + best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))->Projection->HashAgg}->Projection->Projection", + }, + { + sql: "select /*+ STREAM_AGG(), USE_INDEX(t1), USE_INDEX(t2) */ sum(t1.a) from t t1 join t t2 on t1.b = t2.b group by t1.b", + best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))->Sort->Projection->StreamAgg}(test.t2.b,test.t1.b)->HashAgg", + warning: "[planner:1815]Optimizer Hint STREAM_AGG is inapplicable", + aggPushDown: true, + }, + } + ctx := context.Background() + for i, test := range tests { + comment := Commentf("case:%v sql:%s", i, test) + se.GetSessionVars().StmtCtx.SetWarnings(nil) + se.GetSessionVars().AllowAggPushDown = test.aggPushDown + + stmt, err := s.ParseOneStmt(test.sql, "", "") + c.Assert(err, IsNil, comment) + + p, err := planner.Optimize(ctx, se, stmt, s.is) + c.Assert(err, IsNil) + c.Assert(core.ToString(p), Equals, test.best, comment) + + warnings := se.GetSessionVars().StmtCtx.GetWarnings() + if test.warning == "" { + c.Assert(len(warnings), Equals, 0, comment) + } else { + c.Assert(len(warnings), Equals, 1, comment) + c.Assert(warnings[0].Level, Equals, stmtctx.WarnLevelWarning, comment) + c.Assert(warnings[0].Err.Error(), Equals, test.warning, comment) + } + } +} + +func (s *testPlanSuite) TestAggToCopHint(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + defer func() { + dom.Close() + store.Close() + }() + se, err := session.CreateSession4Test(store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test") + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "insert into mysql.opt_rule_blacklist values(\"aggregation_eliminate\")") + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "admin reload opt_rule_blacklist") + c.Assert(err, IsNil) + + tests := []struct { + sql string + best string + warning string + }{ + { + sql: "select /*+ AGG_TO_COP(), HASH_AGG(), USE_INDEX(t) */ sum(a) from t group by a", + best: "TableReader(Table(t)->HashAgg)->HashAgg", + }, + { + sql: "select /*+ AGG_TO_COP(), USE_INDEX(t) */ sum(b) from t group by b", + best: "TableReader(Table(t)->HashAgg)->HashAgg", + }, + { + sql: "select /*+ AGG_TO_COP(), HASH_AGG(), USE_INDEX(t) */ distinct a from t group by a", + best: "TableReader(Table(t)->HashAgg)->HashAgg->HashAgg", + warning: "[planner:1815]Optimizer Hint AGG_TO_COP is inapplicable", + }, + { + sql: "select /*+ AGG_TO_COP(), HASH_AGG(), HASH_JOIN(t1), USE_INDEX(t1), USE_INDEX(t2) */ sum(t1.a) from t t1, t t2 where t1.a = t2.b group by t1.a", + best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t1.a,test.t2.b)->Projection->HashAgg", + warning: "[planner:1815]Optimizer Hint AGG_TO_COP is inapplicable", + }, + } + ctx := context.Background() + for i, test := range tests { + comment := Commentf("case:%v sql:%s", i, test) + se.GetSessionVars().StmtCtx.SetWarnings(nil) + + stmt, err := s.ParseOneStmt(test.sql, "", "") + c.Assert(err, IsNil, comment) + + p, err := planner.Optimize(ctx, se, stmt, s.is) + c.Assert(err, IsNil) + c.Assert(core.ToString(p), Equals, test.best, comment) + + warnings := se.GetSessionVars().StmtCtx.GetWarnings() + if test.warning == "" { + c.Assert(len(warnings), Equals, 0, comment) + } else { + c.Assert(len(warnings), Equals, 1, comment) + c.Assert(warnings[0].Level, Equals, stmtctx.WarnLevelWarning, comment) + c.Assert(warnings[0].Err.Error(), Equals, test.warning, comment) + } + } +} + +func (s *testPlanSuite) TestHintAlias(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + defer func() { + dom.Close() + store.Close() + }() + se, err := session.CreateSession4Test(store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test") + c.Assert(err, IsNil) + + tests := []struct { + sql1 string + sql2 string + }{ + { + sql1: "select /*+ TIDB_SMJ(t1) */ t1.a, t1.b from t t1, (select /*+ TIDB_INLJ(t3) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + sql2: "select /*+ SM_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ INL_JOIN(t3) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + }, + { + sql1: "select /*+ TIDB_HJ(t1) */ t1.a, t1.b from t t1, (select /*+ TIDB_SMJ(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + sql2: "select /*+ HASH_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ SM_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + }, + { + sql1: "select /*+ TIDB_INLJ(t1) */ t1.a, t1.b from t t1, (select /*+ TIDB_HJ(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + sql2: "select /*+ INL_JOIN(t1) */ t1.a, t1.b from t t1, (select /*+ HASH_JOIN(t2) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + }, + } + ctx := context.TODO() + for i, tt := range tests { + comment := Commentf("case:%v sql1:%s sql2:%s", i, tt.sql1, tt.sql2) + stmt1, err := s.ParseOneStmt(tt.sql1, "", "") + c.Assert(err, IsNil, comment) + stmt2, err := s.ParseOneStmt(tt.sql2, "", "") + c.Assert(err, IsNil, comment) + + p1, err := planner.Optimize(ctx, se, stmt1, s.is) + c.Assert(err, IsNil) + p2, err := planner.Optimize(ctx, se, stmt2, s.is) + c.Assert(err, IsNil) + + c.Assert(core.ToString(p1), Equals, core.ToString(p2)) + } +} + +func (s *testPlanSuite) TestIndexHint(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + defer func() { + dom.Close() + store.Close() + }() + se, err := session.CreateSession4Test(store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test") + c.Assert(err, IsNil) + + tests := []struct { + sql string + best string + hasWarn bool + }{ + // simple case + { + sql: "select /*+ USE_INDEX(t, c_d_e) */ * from t", + best: "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + hasWarn: false, + }, + { + sql: "select /*+ USE_INDEX(t, c_d_e) */ * from t t1", + best: "TableReader(Table(t))", + hasWarn: false, + }, + { + sql: "select /*+ USE_INDEX(t1, c_d_e) */ * from t t1", + best: "IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))", + hasWarn: false, + }, + { + sql: "select /*+ USE_INDEX(t1, c_d_e), USE_INDEX(t2, f) */ * from t t1, t t2 where t1.a = t2.b", + best: "LeftHashJoin{IndexLookUp(Index(t.c_d_e)[[NULL,+inf]], Table(t))->IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))}(test.t1.a,test.t2.b)", + hasWarn: false, + }, + // test multiple indexes + { + sql: "select /*+ USE_INDEX(t, c_d_e, f, g) */ * from t order by f", + best: "IndexLookUp(Index(t.f)[[NULL,+inf]], Table(t))", + hasWarn: false, + }, + // use TablePath when the hint only contains table. + { + sql: "select /*+ USE_INDEX(t) */ f from t where f > 10", + best: "TableReader(Table(t)->Sel([gt(test.t.f, 10)]))", + hasWarn: false, + }, + // there will be a warning instead of error when index not exist + { + sql: "select /*+ USE_INDEX(t, no_such_index) */ * from t", + best: "TableReader(Table(t))", + hasWarn: true, + }, + } + ctx := context.Background() + for i, test := range tests { + comment := Commentf("case:%v sql:%s", i, test.sql) + se.GetSessionVars().StmtCtx.SetWarnings(nil) + + stmt, err := s.ParseOneStmt(test.sql, "", "") + c.Assert(err, IsNil, comment) + + p, err := planner.Optimize(ctx, se, stmt, s.is) + c.Assert(err, IsNil) + c.Assert(core.ToString(p), Equals, test.best, comment) + + warnings := se.GetSessionVars().StmtCtx.GetWarnings() + if test.hasWarn { + c.Assert(warnings, HasLen, 1, comment) + } else { + c.Assert(warnings, HasLen, 0, comment) + } + } +} + +func (s *testPlanSuite) TestQueryBlockHint(c *C) { + defer testleak.AfterTest(c)() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + defer func() { + dom.Close() + store.Close() + }() + se, err := session.CreateSession4Test(store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test") + c.Assert(err, IsNil) + + tests := []struct { + sql string + plan string + }{ + { + sql: "select /*+ SM_JOIN(@sel_1 t1), INL_JOIN(@sel_2 t3) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + plan: "MergeInnerJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t2.a,test.t3.c)}(test.t1.a,test.t2.a)->Projection", + }, + { + sql: "select /*+ SM_JOIN(@sel_1 t1), INL_JOIN(@qb t3) */ t1.a, t1.b from t t1, (select /*+ QB_NAME(qb) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + plan: "MergeInnerJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t2.a,test.t3.c)}(test.t1.a,test.t2.a)->Projection", + }, + { + sql: "select /*+ HASH_JOIN(@sel_1 t1), SM_JOIN(@sel_2 t2) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + plan: "RightHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t2.a,test.t3.c)}(test.t1.a,test.t2.a)->Projection", + }, + { + sql: "select /*+ HASH_JOIN(@sel_1 t1), SM_JOIN(@qb t2) */ t1.a, t1.b from t t1, (select /*+ QB_NAME(qb) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + plan: "RightHashJoin{TableReader(Table(t))->MergeInnerJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t2.a,test.t3.c)}(test.t1.a,test.t2.a)->Projection", + }, + { + sql: "select /*+ INL_JOIN(@sel_1 t1), HASH_JOIN(@sel_2 t2) */ t1.a, t1.b from t t1, (select t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + plan: "IndexJoin{TableReader(Table(t))->LeftHashJoin{IndexReader(Index(t.c_d_e)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t2.a,test.t3.c)}(test.t2.a,test.t1.a)->Projection", + }, + { + sql: "select /*+ INL_JOIN(@sel_1 t1), HASH_JOIN(@qb t2) */ t1.a, t1.b from t t1, (select /*+ QB_NAME(qb) */ t2.a from t t2, t t3 where t2.a = t3.c) s where t1.a=s.a", + plan: "IndexJoin{TableReader(Table(t))->LeftHashJoin{IndexReader(Index(t.c_d_e)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t2.a,test.t3.c)}(test.t2.a,test.t1.a)->Projection", + }, + { + sql: "select /*+ HASH_AGG(@sel_1), STREAM_AGG(@sel_2) */ count(*) from t t1 where t1.a < (select count(*) from t t2 where t1.a > t2.a)", + plan: "Apply{IndexReader(Index(t.c_d_e)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Sel([gt(test.t1.a, test.t2.a)])->StreamAgg)->StreamAgg->Sel([not(isnull(5_col_0))])}->HashAgg", + }, + { + sql: "select /*+ STREAM_AGG(@sel_1), HASH_AGG(@qb) */ count(*) from t t1 where t1.a < (select /*+ QB_NAME(qb) */ count(*) from t t2 where t1.a > t2.a)", + plan: "Apply{IndexReader(Index(t.c_d_e)[[NULL,+inf]])->IndexReader(Index(t.c_d_e)[[NULL,+inf]]->Sel([gt(test.t1.a, test.t2.a)])->HashAgg)->HashAgg->Sel([not(isnull(5_col_0))])}->StreamAgg", + }, + { + sql: "select /*+ HASH_AGG(@sel_2) */ a, (select count(*) from t t1 where t1.b > t.a) from t where b > (select b from t t2 where t2.b = t.a limit 1)", + plan: "Apply{Apply{TableReader(Table(t))->TableReader(Table(t)->Sel([eq(test.t2.b, test.t.a)])->Limit)->Limit}->TableReader(Table(t)->Sel([gt(test.t1.b, test.t.a)])->HashAgg)->HashAgg}->Projection", + }, + } + ctx := context.TODO() + for i, tt := range tests { + comment := Commentf("case:%v sql: %s", i, tt.sql) + stmt, err := s.ParseOneStmt(tt.sql, "", "") + c.Assert(err, IsNil, comment) + + p, err := planner.Optimize(ctx, se, stmt, s.is) + c.Assert(err, IsNil, comment) + + c.Assert(core.ToString(p), Equals, tt.plan, comment) + } +} diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 479c1c2c99a61..ee7bb36f5240e 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -56,6 +56,8 @@ type tableHintInfo struct { indexNestedLoopJoinTables []hintTableInfo sortMergeJoinTables []hintTableInfo hashJoinTables []hintTableInfo + indexHintList []indexHintInfo + aggHints aggHintInfo } type hintTableInfo struct { @@ -63,15 +65,25 @@ type hintTableInfo struct { matched bool } -func tableNames2HintTableInfo(tableNames []model.CIStr) []hintTableInfo { - if len(tableNames) == 0 { +type indexHintInfo struct { + tblName model.CIStr + indexHint *ast.IndexHint +} + +type aggHintInfo struct { + preferAggType uint + preferAggToCop bool +} + +func tableNames2HintTableInfo(hintTables []ast.HintTable) []hintTableInfo { + if len(hintTables) == 0 { return nil } - hintTables := make([]hintTableInfo, 0, len(tableNames)) - for _, tableName := range tableNames { - hintTables = append(hintTables, hintTableInfo{name: tableName}) + hintTableInfos := make([]hintTableInfo, len(hintTables)) + for i, hintTable := range hintTables { + hintTableInfos[i] = hintTableInfo{name: hintTable.TableName} } - return hintTables + return hintTableInfos } func (info *tableHintInfo) ifPreferMergeJoin(tableNames ...*model.CIStr) bool { @@ -200,6 +212,8 @@ type PlanBuilder struct { inStraightJoin bool windowSpecs map[string]*ast.WindowSpec + + hintProcessor *BlockHintProcessor } // GetVisitInfo gets the visitInfo of the PlanBuilder. @@ -233,11 +247,12 @@ func (b *PlanBuilder) GetOptFlag() uint64 { } // NewPlanBuilder creates a new PlanBuilder. -func NewPlanBuilder(sctx sessionctx.Context, is infoschema.InfoSchema) *PlanBuilder { +func NewPlanBuilder(sctx sessionctx.Context, is infoschema.InfoSchema, processor *BlockHintProcessor) *PlanBuilder { return &PlanBuilder{ - ctx: sctx, - is: is, - colMapper: make(map[*ast.ColumnNameExpr]int), + ctx: sctx, + is: is, + colMapper: make(map[*ast.ColumnNameExpr]int), + hintProcessor: processor, } } @@ -470,7 +485,7 @@ func isPrimaryIndex(indexName model.CIStr) bool { return indexName.L == "primary" } -func getPossibleAccessPaths(indexHints []*ast.IndexHint, tblInfo *model.TableInfo) ([]*accessPath, error) { +func (b *PlanBuilder) getPossibleAccessPaths(indexHints []*ast.IndexHint, tblInfo *model.TableInfo, tblName model.CIStr) ([]*accessPath, error) { publicPaths := make([]*accessPath, 0, len(tblInfo.Indices)+1) publicPaths = append(publicPaths, &accessPath{isTablePath: true}) for _, index := range tblInfo.Indices { @@ -482,7 +497,18 @@ func getPossibleAccessPaths(indexHints []*ast.IndexHint, tblInfo *model.TableInf hasScanHint, hasUseOrForce := false, false available := make([]*accessPath, 0, len(publicPaths)) ignored := make([]*accessPath, 0, len(publicPaths)) - for _, hint := range indexHints { + + // Extract comment-style index hint like /*+ INDEX(t, idx1, idx2) */. + indexHintsLen := len(indexHints) + if hints := b.TableHints(); hints != nil { + for _, hint := range hints.indexHintList { + if hint.tblName == tblName { + indexHints = append(indexHints, hint.indexHint) + } + } + } + + for i, hint := range indexHints { if hint.HintScope != ast.HintForScan { continue } @@ -502,7 +528,13 @@ func getPossibleAccessPaths(indexHints []*ast.IndexHint, tblInfo *model.TableInf for _, idxName := range hint.IndexNames { path := getPathByIndexName(publicPaths, idxName, tblInfo) if path == nil { - return nil, ErrKeyDoesNotExist.GenWithStackByArgs(idxName, tblInfo.Name) + err := ErrKeyDoesNotExist.GenWithStackByArgs(idxName, tblInfo.Name) + // if hint is from comment-style sql hints, we should throw a warning instead of error. + if i < indexHintsLen { + return nil, err + } + b.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + continue } if hint.HintType == ast.HintIgnore { // Collect all the ignored index hints. @@ -799,7 +831,7 @@ func (b *PlanBuilder) buildPhysicalIndexLookUpReader(ctx context.Context, dbName }.Init(b.ctx) is.stats = property.NewSimpleStats(0) // It's double read case. - ts := PhysicalTableScan{Columns: tblReaderCols, Table: is.Table}.Init(b.ctx) + ts := PhysicalTableScan{Columns: tblReaderCols, Table: is.Table, TableAsName: &tblInfo.Name}.Init(b.ctx) ts.SetSchema(tblSchema) if tbl.Meta().GetPartitionInfo() != nil { pid := tbl.(table.PhysicalTable).GetPhysicalID() diff --git a/planner/core/planbuilder_test.go b/planner/core/planbuilder_test.go index eca40338aa44a..54636648f2abd 100644 --- a/planner/core/planbuilder_test.go +++ b/planner/core/planbuilder_test.go @@ -97,7 +97,8 @@ func (s *testPlanBuilderSuite) TestGetPathByIndexName(c *C) { func (s *testPlanBuilderSuite) TestRewriterPool(c *C) { builder := &PlanBuilder{ - ctx: MockContext(), + ctx: MockContext(), + hintProcessor: &BlockHintProcessor{}, } // Make sure PlanBuilder.getExpressionRewriter() provides clean rewriter from pool. @@ -151,7 +152,7 @@ func (s *testPlanBuilderSuite) TestDisableFold(c *C) { stmt := st.(*ast.SelectStmt) expr := stmt.Fields.Fields[0].Expr - builder := &PlanBuilder{ctx: ctx} + builder := &PlanBuilder{ctx: ctx, hintProcessor: &BlockHintProcessor{}} builder.rewriterCounter++ rewriter := builder.getExpressionRewriter(context.TODO(), nil) c.Assert(rewriter, NotNil) diff --git a/planner/core/prepare_test.go b/planner/core/prepare_test.go index 829ec099fface..f468661bb005a 100644 --- a/planner/core/prepare_test.go +++ b/planner/core/prepare_test.go @@ -163,7 +163,7 @@ func (s *testPlanSuite) TestPrepareCacheDeferredFunction(c *C) { stmt, err := s.ParseOneStmt(sql1, "", "") c.Check(err, IsNil) is := tk.Se.GetSessionVars().TxnCtx.InfoSchema.(infoschema.InfoSchema) - builder := core.NewPlanBuilder(tk.Se, is) + builder := core.NewPlanBuilder(tk.Se, is, &core.BlockHintProcessor{}) p, err := builder.Build(ctx, stmt) c.Check(err, IsNil) execPlan, ok := p.(*core.Execute) diff --git a/planner/core/rule_aggregation_push_down.go b/planner/core/rule_aggregation_push_down.go index 6f7488667b0ac..caa6a8dd4fcfe 100644 --- a/planner/core/rule_aggregation_push_down.go +++ b/planner/core/rule_aggregation_push_down.go @@ -189,7 +189,7 @@ func (a *aggregationPushDownSolver) decompose(ctx sessionctx.Context, aggFunc *a // tryToPushDownAgg tries to push down an aggregate function into a join path. If all aggFuncs are first row, we won't // process it temporarily. If not, We will add additional group by columns and first row functions. We make a new aggregation operator. // If the pushed aggregation is grouped by unique key, it's no need to push it down. -func (a *aggregationPushDownSolver) tryToPushDownAgg(aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column, join *LogicalJoin, childIdx int) (_ LogicalPlan, err error) { +func (a *aggregationPushDownSolver) tryToPushDownAgg(aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column, join *LogicalJoin, childIdx int, aggHints aggHintInfo) (_ LogicalPlan, err error) { child := join.children[childIdx] if aggregation.IsAllFirstRow(aggFuncs) { return child, nil @@ -204,7 +204,7 @@ func (a *aggregationPushDownSolver) tryToPushDownAgg(aggFuncs []*aggregation.Agg return child, nil } } - agg, err := a.makeNewAgg(join.ctx, aggFuncs, gbyCols) + agg, err := a.makeNewAgg(join.ctx, aggFuncs, gbyCols, aggHints) if err != nil { return nil, err } @@ -247,10 +247,11 @@ func (a *aggregationPushDownSolver) checkAnyCountAndSum(aggFuncs []*aggregation. return false } -func (a *aggregationPushDownSolver) makeNewAgg(ctx sessionctx.Context, aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column) (*LogicalAggregation, error) { +func (a *aggregationPushDownSolver) makeNewAgg(ctx sessionctx.Context, aggFuncs []*aggregation.AggFuncDesc, gbyCols []*expression.Column, aggHints aggHintInfo) (*LogicalAggregation, error) { agg := LogicalAggregation{ GroupByItems: expression.Column2Exprs(gbyCols), groupByCols: gbyCols, + aggHints: aggHints, }.Init(ctx) aggLen := len(aggFuncs) + len(gbyCols) newAggFuncDescs := make([]*aggregation.AggFuncDesc, 0, aggLen) @@ -284,6 +285,7 @@ func (a *aggregationPushDownSolver) pushAggCrossUnion(agg *LogicalAggregation, u newAgg := LogicalAggregation{ AggFuncs: make([]*aggregation.AggFuncDesc, 0, len(agg.AggFuncs)), GroupByItems: make([]expression.Expression, 0, len(agg.GroupByItems)), + aggHints: agg.aggHints, }.Init(ctx) newAgg.SetSchema(agg.schema.Clone()) for _, aggFunc := range agg.AggFuncs { @@ -340,7 +342,7 @@ func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan) (_ LogicalPlan, e if rightInvalid { rChild = join.children[1] } else { - rChild, err = a.tryToPushDownAgg(rightAggFuncs, rightGbyCols, join, 1) + rChild, err = a.tryToPushDownAgg(rightAggFuncs, rightGbyCols, join, 1, agg.aggHints) if err != nil { return nil, err } @@ -348,7 +350,7 @@ func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan) (_ LogicalPlan, e if leftInvalid { lChild = join.children[0] } else { - lChild, err = a.tryToPushDownAgg(leftAggFuncs, leftGbyCols, join, 0) + lChild, err = a.tryToPushDownAgg(leftAggFuncs, leftGbyCols, join, 0, agg.aggHints) if err != nil { return nil, err } @@ -380,7 +382,7 @@ func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan) (_ LogicalPlan, e } else if union, ok1 := child.(*LogicalUnionAll); ok1 { var gbyCols []*expression.Column gbyCols = expression.ExtractColumnsFromExpressions(gbyCols, agg.GroupByItems, nil) - pushedAgg, err := a.makeNewAgg(agg.ctx, agg.AggFuncs, gbyCols) + pushedAgg, err := a.makeNewAgg(agg.ctx, agg.AggFuncs, gbyCols, agg.aggHints) if err != nil { return nil, err } diff --git a/planner/core/testdata/plan_suite_in.json b/planner/core/testdata/plan_suite_in.json index 030fd3f91cc28..a292cab4be475 100644 --- a/planner/core/testdata/plan_suite_in.json +++ b/planner/core/testdata/plan_suite_in.json @@ -381,10 +381,10 @@ ] }, { - "name": "TestIndexJoinHint", + "name": "TestJoinHints", "cases": [ - "select /*+ TIDB_INLJ(t1) */ t1.a, t2.a, t3.a from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a;", - "select /*+ TIDB_INLJ(t1) */ t1.b, t2.a from t t1, t t2 where t1.b = t2.a;", + "select /*+ TIDB_INLJ(t1), USE_INDEX(t1), USE_INDEX(t2), USE_INDEX(t3) */ t1.a, t2.a, t3.a from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a;", + "select /*+ TIDB_INLJ(t1), USE_INDEX(t1), USE_INDEX(t2) */ t1.b, t2.a from t t1, t t2 where t1.b = t2.a;", "select /*+ TIDB_INLJ(t2) */ t1.b, t2.a from t2 t1, t2 t2 where t1.b=t2.b and t2.c=-1;" ] }, diff --git a/planner/core/testdata/plan_suite_out.json b/planner/core/testdata/plan_suite_out.json index 37f089b700b23..29e6b77127585 100644 --- a/planner/core/testdata/plan_suite_out.json +++ b/planner/core/testdata/plan_suite_out.json @@ -865,15 +865,15 @@ "Cases": [ { "SQL": "SELECT /*+ TIDB_SMJ(t3, t4) */ * from t t1, t t2 where t1.a = t2.a", - "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ TIDB_SMJ(t3, t4) */. Maybe you can use the table alias name" + "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ SM_JOIN(t3, t4) */ or /*+ TIDB_SMJ(t3, t4) */. Maybe you can use the table alias name" }, { "SQL": "SELECT /*+ TIDB_HJ(t3, t4) */ * from t t1, t t2 where t1.a = t2.a", - "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ TIDB_HJ(t3, t4) */. Maybe you can use the table alias name" + "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ HASH_JOIN(t3, t4) */ or /*+ TIDB_HJ(t3, t4) */. Maybe you can use the table alias name" }, { "SQL": "SELECT /*+ TIDB_INLJ(t3, t4) */ * from t t1, t t2 where t1.a = t2.a", - "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ TIDB_INLJ(t3, t4) */. Maybe you can use the table alias name" + "Warning": "[planner:1815]There are no matching table names for (t3, t4) in optimizer hint /*+ INL_JOIN(t3, t4) */ or /*+ TIDB_INLJ(t3, t4) */. Maybe you can use the table alias name" }, { "SQL": "SELECT /*+ TIDB_SMJ(t1, t2) */ * from t t1, t t2 where t1.a = t2.a", @@ -881,27 +881,27 @@ }, { "SQL": "SELECT /*+ TIDB_SMJ(t3, t4) */ * from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a", - "Warning": "[planner:1815]There are no matching table names for (t4) in optimizer hint /*+ TIDB_SMJ(t3, t4) */. Maybe you can use the table alias name" + "Warning": "[planner:1815]There are no matching table names for (t4) in optimizer hint /*+ SM_JOIN(t3, t4) */ or /*+ TIDB_SMJ(t3, t4) */. Maybe you can use the table alias name" } ] }, { - "Name": "TestIndexJoinHint", + "Name": "TestJoinHints", "Cases": [ { - "SQL": "select /*+ TIDB_INLJ(t1) */ t1.a, t2.a, t3.a from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a;", + "SQL": "select /*+ TIDB_INLJ(t1), USE_INDEX(t1), USE_INDEX(t2), USE_INDEX(t3) */ t1.a, t2.a, t3.a from t t1, t t2, t t3 where t1.a = t2.a and t2.a = t3.a;", "Best": "MergeInnerJoin{TableReader(Table(t))->IndexJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t2.a,test.t1.a)}(test.t3.a,test.t2.a)->Projection", "Warning": "" }, { - "SQL": "select /*+ TIDB_INLJ(t1) */ t1.b, t2.a from t t1, t t2 where t1.b = t2.a;", - "Best": "LeftHashJoin{TableReader(Table(t))->IndexReader(Index(t.c_d_e)[[NULL,+inf]])}(test.t1.b,test.t2.a)", - "Warning": "[planner:1815]Optimizer Hint /*+ TIDB_INLJ(t1) */ is inapplicable" + "SQL": "select /*+ TIDB_INLJ(t1), USE_INDEX(t1), USE_INDEX(t2) */ t1.b, t2.a from t t1, t t2 where t1.b = t2.a;", + "Best": "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t))}(test.t1.b,test.t2.a)", + "Warning": "[planner:1815]Optimizer Hint /*+ INL_JOIN(t1) */ or /*+ TIDB_INLJ(t1) */ is inapplicable" }, { "SQL": "select /*+ TIDB_INLJ(t2) */ t1.b, t2.a from t2 t1, t2 t2 where t1.b=t2.b and t2.c=-1;", "Best": "IndexJoin{IndexReader(Index(t2.b)[[NULL,+inf]])->IndexReader(Index(t2.b_c)[[NULL,+inf]]->Sel([eq(test.t2.c, -1)]))}(test.t2.b,test.t1.b)->Projection", - "Warning": "[planner:1815]Optimizer Hint /*+ TIDB_INLJ(t2) */ is inapplicable" + "Warning": "[planner:1815]Optimizer Hint /*+ INL_JOIN(t2) */ or /*+ TIDB_INLJ(t2) */ is inapplicable" } ] }, diff --git a/planner/optimize.go b/planner/optimize.go index a0f3008802dc0..c9da8c010641d 100644 --- a/planner/optimize.go +++ b/planner/optimize.go @@ -35,7 +35,9 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in // build logical plan sctx.GetSessionVars().PlanID = 0 sctx.GetSessionVars().PlanColumnID = 0 - builder := plannercore.NewPlanBuilder(sctx, is) + hintProcessor := &plannercore.BlockHintProcessor{Ctx: sctx} + node.Accept(hintProcessor) + builder := plannercore.NewPlanBuilder(sctx, is, hintProcessor) p, err := builder.Build(ctx, node) if err != nil { return nil, err diff --git a/store/tikv/region_cache_test.go b/store/tikv/region_cache_test.go index cd4f2213ba435..e98b94e644545 100644 --- a/store/tikv/region_cache_test.go +++ b/store/tikv/region_cache_test.go @@ -774,7 +774,7 @@ func (s *testRegionCacheSuite) TestReplaceNewAddrAndOldOfflineImmediately(c *C) s.cluster.ChangeLeader(s.region1, s.peer2) loc, err = client.regionCache.LocateKey(s.bo, testKey) c.Assert(err, IsNil) - fctx, err := client.regionCache.GetRPCContext(s.bo, loc.Region) + fctx, err := client.regionCache.GetRPCContext(s.bo, loc.Region, kv.ReplicaReadLeader, 0) c.Assert(err, IsNil) c.Assert(fctx.Store.storeID, Equals, s.store2) c.Assert(fctx.Addr, Equals, "store2")