Skip to content

Commit

Permalink
planner: support a hint to force using a IndexMerge path (#12843)
Browse files Browse the repository at this point in the history
  • Loading branch information
hailanwhu authored and sre-bot committed Nov 27, 2019
1 parent e4bdb7b commit 596fb64
Show file tree
Hide file tree
Showing 16 changed files with 247 additions and 22 deletions.
30 changes: 30 additions & 0 deletions cmd/explaintest/r/explain_indexmerge.result
Original file line number Diff line number Diff line change
Expand Up @@ -81,3 +81,33 @@ label = "cop"
"IndexMerge_12" -> "Selection_11"
}

set session tidb_enable_index_merge = off;
explain select /*+ use_index_merge(t, tb, tc) */ * from t where b < 50 or c < 5000000;
id count task operator info
IndexMerge_8 5000000.00 root
├─IndexScan_5 49.00 cop[tikv] table:t, index:b, range:[-inf,50), keep order:false
├─IndexScan_6 4999999.00 cop[tikv] table:t, index:c, range:[-inf,5000000), keep order:false
└─TableScan_7 5000000.00 cop[tikv] table:t, keep order:false
explain select /*+ use_index_merge(t, tb, tc) */ * from t where (b < 10000 or c < 10000) and (a < 10 or d < 10) and f < 10;
id count task operator info
IndexMerge_9 0.00 root
├─IndexScan_5 9999.00 cop[tikv] table:t, index:b, range:[-inf,10000), keep order:false
├─IndexScan_6 9999.00 cop[tikv] table:t, index:c, range:[-inf,10000), keep order:false
└─Selection_8 0.00 cop[tikv] lt(Column#6, 10), or(lt(Column#1, 10), lt(Column#4, 10))
└─TableScan_7 19998.00 cop[tikv] table:t, keep order:false
explain select /*+ use_index_merge(t, tb) */ * from t where b < 50 or c < 5000000;
id count task operator info
TableReader_7 4000000.00 root data:Selection_6
└─Selection_6 4000000.00 cop[tikv] or(lt(Column#2, 50), lt(Column#3, 5000000))
└─TableScan_5 5000000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false
explain select /*+ no_index_merge(), use_index_merge(t, tb, tc) */ * from t where b < 50 or c < 5000000;
id count task operator info
TableReader_7 4000000.00 root data:Selection_6
└─Selection_6 4000000.00 cop[tikv] or(lt(Column#2, 50), lt(Column#3, 5000000))
└─TableScan_5 5000000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false
explain select /*+ use_index_merge(t, primary, tb) */ * from t where a < 50 or b < 5000000;
id count task operator info
IndexMerge_8 5000000.00 root
├─TableScan_5 49.00 cop[tikv] table:t, range:[-inf,50), keep order:false
├─IndexScan_6 4999999.00 cop[tikv] table:t, index:b, range:[-inf,5000000), keep order:false
└─TableScan_7 5000000.00 cop[tikv] table:t, keep order:false
11 changes: 11 additions & 0 deletions cmd/explaintest/t/explain_indexmerge.test
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@ create table t (a int primary key, b int, c int, d int, e int, f int);
create index tb on t (b);
create index tc on t (c);
create index td on t (d);
# generate a, b, c, d, e, f from 0 to 5000000 and a = b = c = d = e = f
load stats 's/explain_indexmerge_stats_t.json';
set session tidb_enable_index_merge = on;
# choose the best plan based on cost
explain select * from t where a < 50 or b < 50;
explain select * from t where (a < 50 or b < 50) and f > 100;
explain select * from t where a < 50 or b < 5000000;
Expand All @@ -13,3 +15,12 @@ explain select * from t where b < 50 or c < 5000000;
explain select * from t where a < 50 or b < 50 or c < 50;
explain select * from t where (b < 10000 or c < 10000) and (a < 10 or d < 10) and f < 10;
explain format="dot" select * from t where (a < 50 or b < 50) and f > 100;
set session tidb_enable_index_merge = off;
# be forced to use IndexMerge
explain select /*+ use_index_merge(t, tb, tc) */ * from t where b < 50 or c < 5000000;
explain select /*+ use_index_merge(t, tb, tc) */ * from t where (b < 10000 or c < 10000) and (a < 10 or d < 10) and f < 10;
explain select /*+ use_index_merge(t, tb) */ * from t where b < 50 or c < 5000000;
# no_index_merge hint
explain select /*+ no_index_merge(), use_index_merge(t, tb, tc) */ * from t where b < 50 or c < 5000000;
# tableScan can be a partial path to fetch handle
explain select /*+ use_index_merge(t, primary, tb) */ * from t where a < 50 or b < 5000000;
3 changes: 1 addition & 2 deletions executor/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -1468,8 +1468,7 @@ func handleStmtHints(hints []*ast.TableOptimizerHint) (stmtHints stmtctx.StmtHin
warn := errors.New("There are multiple NO_INDEX_MERGE hints, only the last one will take effect")
warns = append(warns, warn)
}
stmtHints.HasEnableIndexMergeHint = true
stmtHints.EnableIndexMerge = false
stmtHints.NoIndexMergeHint = true
}
// Handle READ_CONSISTENT_REPLICA
if readReplicaHintCnt != 0 {
Expand Down
2 changes: 2 additions & 0 deletions planner/core/find_best_task.go
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty) (t task, err

t = invalidTask
candidates := ds.skylinePruning(prop)

for _, candidate := range candidates {
path := candidate.path
if path.partialIndexPaths != nil {
Expand Down Expand Up @@ -450,6 +451,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty) (t task, err
t = idxTask
}
}

return
}

Expand Down
20 changes: 20 additions & 0 deletions planner/core/hints.go
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,26 @@ func genHintsFromPhysicalPlan(p PhysicalPlan, nodeType nodeType) (res []*ast.Tab
Tables: []ast.HintTable{{DBName: index.DBName, TableName: getTableName(index.Table.Name, index.TableAsName)}},
Indexes: []model.CIStr{index.Index.Name},
})
case *PhysicalIndexMergeReader:
Indexs := make([]model.CIStr, 0, 2)
var tableName model.CIStr
var tableAsName *model.CIStr
for _, partialPlan := range pp.PartialPlans {
if index, ok := partialPlan[0].(*PhysicalIndexScan); ok {
Indexs = append(Indexs, index.Index.Name)
tableName = index.Table.Name
tableAsName = index.TableAsName
} else {
indexName := model.NewCIStr("PRIMARY")
Indexs = append(Indexs, indexName)
}
}
res = append(res, &ast.TableOptimizerHint{
QBName: generateQBName(nodeType, pp.blockOffset),
HintName: model.NewCIStr(HintIndexMerge),
Tables: []ast.HintTable{{TableName: getTableName(tableName, tableAsName)}},
Indexes: Indexs,
})
case *PhysicalHashAgg:
res = append(res, &ast.TableOptimizerHint{
QBName: generateQBName(nodeType, pp.blockOffset),
Expand Down
26 changes: 25 additions & 1 deletion planner/core/logical_plan_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ const (
HintTiFlash = "tiflash"
// HintTiKV is a label represents the tikv storage type.
HintTiKV = "tikv"
// HintIndexMerge is a hint to enforce using some indexes at the same time.
HintIndexMerge = "use_index_merge"
)

const (
Expand Down Expand Up @@ -2127,7 +2129,7 @@ func (b *PlanBuilder) pushTableHints(hints []*ast.TableOptimizerHint, nodeType n
hints = b.hintProcessor.getCurrentStmtHints(hints, nodeType, currentLevel)
var (
sortMergeTables, INLJTables, INLHJTables, INLMJTables, hashJoinTables []hintTableInfo
indexHintList []indexHintInfo
indexHintList, indexMergeHintList []indexHintInfo
tiflashTables, tikvTables []hintTableInfo
aggHints aggHintInfo
)
Expand Down Expand Up @@ -2188,6 +2190,17 @@ func (b *PlanBuilder) pushTableHints(hints []*ast.TableOptimizerHint, nodeType n
if hint.StoreType.L == HintTiKV {
tikvTables = tableNames2HintTableInfo(b.ctx, hint.Tables, b.hintProcessor, nodeType, currentLevel)
}
case HintIndexMerge:
if len(hint.Tables) != 0 {
indexMergeHintList = append(indexMergeHintList, indexHintInfo{
tblName: hint.Tables[0].TableName,
indexHint: &ast.IndexHint{
IndexNames: hint.Indexes,
HintType: ast.HintUse,
HintScope: ast.HintForScan,
},
})
}
default:
// ignore hints that not implemented
}
Expand All @@ -2200,6 +2213,7 @@ func (b *PlanBuilder) pushTableHints(hints []*ast.TableOptimizerHint, nodeType n
tiflashTables: tiflashTables,
tikvTables: tikvTables,
aggHints: aggHints,
indexMergeHintList: indexMergeHintList,
})
}

Expand Down Expand Up @@ -2529,13 +2543,23 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as
statisticTable = getStatsTable(b.ctx, tbl.Meta(), tbl.Meta().ID)
}

// extract the IndexMergeHint
var indexMergeHints []*ast.IndexHint
if hints := b.TableHints(); hints != nil {
for _, hint := range hints.indexMergeHintList {
if hint.tblName.L == tblName.L {
indexMergeHints = append(indexMergeHints, hint.indexHint)
}
}
}
ds := DataSource{
DBName: dbName,
TableAsName: asName,
table: tbl,
tableInfo: tableInfo,
statisticTable: statisticTable,
indexHints: tn.IndexHints,
indexMergeHints: indexMergeHints,
possibleAccessPaths: possiblePaths,
Columns: make([]*model.ColumnInfo, 0, len(columns)),
partitionNames: tn.PartitionNames,
Expand Down
3 changes: 2 additions & 1 deletion planner/core/logical_plans.go
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,8 @@ type DataSource struct {
DBName model.CIStr

TableAsName *model.CIStr

// indexMergeHints are the hint for indexmerge.
indexMergeHints []*ast.IndexHint
// pushedDownConds are the conditions that will be pushed down to coprocessor.
pushedDownConds []expression.Expression
// allConds contains all the filters on this table. For now it's maintained
Expand Down
50 changes: 50 additions & 0 deletions planner/core/physical_plan_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/planner"
Expand Down Expand Up @@ -966,6 +967,55 @@ func (s *testPlanSuite) TestIndexHint(c *C) {
}
}

func (s *testPlanSuite) TestIndexMergeHint(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
c.Assert(err, IsNil)
defer func() {
dom.Close()
store.Close()
}()
se, err := session.CreateSession4Test(store)
c.Assert(err, IsNil)
_, err = se.Execute(context.Background(), "use test")
c.Assert(err, IsNil)

var input []string
var output []struct {
SQL string
Best string
HasWarn bool
Hints string
}
s.testData.GetTestCases(c, &input, &output)
ctx := context.Background()
for i, test := range input {
comment := Commentf("case:%v sql:%s", i, test)
se.GetSessionVars().StmtCtx.SetWarnings(nil)
stmt, err := s.ParseOneStmt(test, "", "")
c.Assert(err, IsNil, comment)
sctx := se.(sessionctx.Context)
err = executor.ResetContextOfStmt(sctx, stmt)
c.Assert(err, IsNil)
p, _, err := planner.Optimize(ctx, se, stmt, s.is)
c.Assert(err, IsNil)
s.testData.OnRecord(func() {
output[i].SQL = test
output[i].Best = core.ToString(p)
output[i].HasWarn = len(se.GetSessionVars().StmtCtx.GetWarnings()) > 0
output[i].Hints = core.GenHintsFromPhysicalPlan(p)
})
c.Assert(core.ToString(p), Equals, output[i].Best, comment)
warnings := se.GetSessionVars().StmtCtx.GetWarnings()
if output[i].HasWarn {
c.Assert(warnings, HasLen, 1, comment)
} else {
c.Assert(warnings, HasLen, 0, comment)
}
c.Assert(core.GenHintsFromPhysicalPlan(p), Equals, output[i].Hints, comment)
}
}

func (s *testPlanSuite) TestQueryBlockHint(c *C) {
defer testleak.AfterTest(c)()
store, dom, err := newStoreWithBootstrap()
Expand Down
1 change: 1 addition & 0 deletions planner/core/planbuilder.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ type tableHintInfo struct {
tiflashTables []hintTableInfo
tikvTables []hintTableInfo
aggHints aggHintInfo
indexMergeHintList []indexHintInfo
}

type hintTableInfo struct {
Expand Down
64 changes: 54 additions & 10 deletions planner/core/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package core
import (
"math"

"github.com/pingcap/errors"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/tidb/expression"
Expand Down Expand Up @@ -196,21 +197,42 @@ func (ds *DataSource) DeriveStats(childStats []*property.StatsInfo, selfSchema *
}
}
// Consider the IndexMergePath. Now, we just generate `IndexMergePath` in DNF case.
if len(ds.pushedDownConds) > 0 && len(ds.possibleAccessPaths) > 1 && ds.ctx.GetSessionVars().GetEnableIndexMerge() {
needConsiderIndexMerge := true
for i := 1; i < len(ds.possibleAccessPaths); i++ {
if len(ds.possibleAccessPaths[i].accessConds) != 0 {
needConsiderIndexMerge = false
break
}
}
if needConsiderIndexMerge {
ds.generateIndexMergeOrPaths()
isPossibleIdxMerge := len(ds.pushedDownConds) > 0 && len(ds.possibleAccessPaths) > 1
sessionAndStmtPermission := (ds.ctx.GetSessionVars().GetEnableIndexMerge() || ds.indexMergeHints != nil) && !ds.ctx.GetSessionVars().StmtCtx.NoIndexMergeHint
// If there is an index path, we current do not consider `IndexMergePath`.
needConsiderIndexMerge := true
for i := 1; i < len(ds.possibleAccessPaths); i++ {
if len(ds.possibleAccessPaths[i].accessConds) != 0 {
needConsiderIndexMerge = false
break
}
}
if isPossibleIdxMerge && sessionAndStmtPermission && needConsiderIndexMerge {
ds.generateAndPruneIndexMergePath()
} else if ds.indexMergeHints != nil {
ds.indexMergeHints = nil
ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable or disabled"))
}
return ds.stats, nil
}

func (ds *DataSource) generateAndPruneIndexMergePath() {
regularPathCount := len(ds.possibleAccessPaths)
ds.generateIndexMergeOrPaths()
// If without hints, it means that `enableIndexMerge` is true
if ds.indexMergeHints == nil {
return
}
// With hints and without generated IndexMerge paths
if regularPathCount == len(ds.possibleAccessPaths) {
ds.indexMergeHints = nil
ds.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("IndexMerge is inapplicable or disabled"))
return
}
// Do not need to consider the regular paths in find_best_task().
ds.possibleAccessPaths = ds.possibleAccessPaths[regularPathCount:]
}

// DeriveStats implement LogicalPlan DeriveStats interface.
func (ts *TableScan) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (_ *property.StatsInfo, err error) {
// PushDownNot here can convert query 'not (a != 1)' to 'a = 1'.
Expand Down Expand Up @@ -272,12 +294,31 @@ func (ds *DataSource) generateIndexMergeOrPaths() {
}
}

// isInIndexMergeHints checks whether current index or primary key is in IndexMerge hints.
func (ds *DataSource) isInIndexMergeHints(name string) bool {
if ds.indexMergeHints == nil ||
(len(ds.indexMergeHints) == 1 && ds.indexMergeHints[0].IndexNames == nil) {
return true
}
for _, hint := range ds.indexMergeHints {
for _, index := range hint.IndexNames {
if name == index.L {
return true
}
}
}
return false
}

// accessPathsForConds generates all possible index paths for conditions.
func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, usedIndexCount int) []*accessPath {
var results = make([]*accessPath, 0, usedIndexCount)
for i := 0; i < usedIndexCount; i++ {
path := &accessPath{}
if ds.possibleAccessPaths[i].isTablePath {
if !ds.isInIndexMergeHints("primary") {
continue
}
path.isTablePath = true
noIntervalRanges, err := ds.deriveTablePathStats(path, conditions, true)
if err != nil {
Expand All @@ -292,6 +333,9 @@ func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, us
}
} else {
path.index = ds.possibleAccessPaths[i].index
if !ds.isInIndexMergeHints(path.index.Name.L) {
continue
}
noIntervalRanges, err := ds.deriveIndexPathStats(path, conditions, true)
if err != nil {
logutil.BgLogger().Debug("can not derive statistics of a path", zap.Error(err))
Expand Down
9 changes: 9 additions & 0 deletions planner/core/stringer.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,15 @@ func toString(in Plan, strs []string, idxs []int) ([]string, []int) {
str = fmt.Sprintf("IndexReader(%s)", ToString(x.indexPlan))
case *PhysicalIndexLookUpReader:
str = fmt.Sprintf("IndexLookUp(%s, %s)", ToString(x.indexPlan), ToString(x.tablePlan))
case *PhysicalIndexMergeReader:
str = "IndexMergeReader(PartialPlans->["
for i, paritalPlan := range x.partialPlans {
if i > 0 {
str += ", "
}
str += ToString(paritalPlan)
}
str += "], TablePlan->" + ToString(x.tablePlan) + ")"
case *PhysicalUnionScan:
str = fmt.Sprintf("UnionScan(%s)", x.Conditions)
case *PhysicalIndexJoin:
Expand Down
9 changes: 9 additions & 0 deletions planner/core/testdata/plan_suite_in.json
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,15 @@
"select /*+ USE_INDEX(t, c_d_e, f), IGNORE_INDEX(t, c_d_e) */ c from t order by c"
]
},
{
"name": "TestIndexMergeHint",
"cases": [
"select /*+ USE_INDEX_MERGE(t, c_d_e, f_g) */ * from t where c < 1 or f > 2",
"select /*+ USE_INDEX_MERGE(t, primary, f_g) */ * from t where a < 1 or f > 2",
"select /*+ USE_INDEX_MERGE(t, primary, f_g, c_d_e) */ * from t where a < 1 or f > 2",
"select /*+ NO_INDEX_MERGE(), USE_INDEX_MERGE(t, primary, f_g, c_d_e) */ * from t where a < 1 or f > 2"
]
},
{
"name": "TestDAGPlanBuilderSimpleCase",
"cases":[
Expand Down
Loading

0 comments on commit 596fb64

Please sign in to comment.