diff --git a/config/config.go b/config/config.go index 854546efda698..d6babffa84bb1 100644 --- a/config/config.go +++ b/config/config.go @@ -104,10 +104,11 @@ type Log struct { // File log config. File logutil.FileLogConfig `toml:"file" json:"file"` - SlowQueryFile string `toml:"slow-query-file" json:"slow-query-file"` - SlowThreshold uint64 `toml:"slow-threshold" json:"slow-threshold"` - ExpensiveThreshold uint `toml:"expensive-threshold" json:"expensive-threshold"` - QueryLogMaxLen uint64 `toml:"query-log-max-len" json:"query-log-max-len"` + SlowQueryFile string `toml:"slow-query-file" json:"slow-query-file"` + SlowThreshold uint64 `toml:"slow-threshold" json:"slow-threshold"` + ExpensiveThreshold uint `toml:"expensive-threshold" json:"expensive-threshold"` + QueryLogMaxLen uint64 `toml:"query-log-max-len" json:"query-log-max-len"` + RecordPlanInSlowLog uint32 `toml:"record-plan-in-slow-log" json:"record-plan-in-slow-log"` } // Security is the security section of the config. @@ -341,13 +342,14 @@ var defaultConf = Config{ }, LowerCaseTableNames: 2, Log: Log{ - Level: "info", - Format: "text", - File: logutil.NewFileLogConfig(true, logutil.DefaultLogMaxSize), - SlowQueryFile: "tidb-slow.log", - SlowThreshold: logutil.DefaultSlowThreshold, - ExpensiveThreshold: 10000, - QueryLogMaxLen: logutil.DefaultQueryLogMaxLen, + Level: "info", + Format: "text", + File: logutil.NewFileLogConfig(true, logutil.DefaultLogMaxSize), + SlowQueryFile: "tidb-slow.log", + SlowThreshold: logutil.DefaultSlowThreshold, + ExpensiveThreshold: 10000, + QueryLogMaxLen: logutil.DefaultQueryLogMaxLen, + RecordPlanInSlowLog: logutil.DefaultRecordPlanInSlowLog, }, Status: Status{ ReportStatus: true, diff --git a/config/config.toml.example b/config/config.toml.example index fbd375955719c..c56c56ff2e03c 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -73,6 +73,10 @@ slow-query-file = "tidb-slow.log" # Queries with execution time greater than this value will be logged. (Milliseconds) slow-threshold = 300 +# record-plan-in-slow-log is used to enable record query plan in slow log. +# 0 is disable. 1 is enable. +record-plan-in-slow-log = 1 + # Queries with internal result greater than this value will be logged. expensive-threshold = 10000 diff --git a/executor/adapter.go b/executor/adapter.go index 758ea8d0ef3a7..a2eb59239570f 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -687,6 +687,7 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) { ExecDetail: execDetail, MemMax: memMax, Succ: succ, + Plan: getPlanTree(a.Plan), Prepared: a.isPreparedStmt, HasMoreResults: hasMoreResults, } @@ -722,6 +723,35 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) { } } +// getPlanTree will try to get the select plan tree if the plan is select or the select plan of delete/update/insert statement. +func getPlanTree(p plannercore.Plan) string { + cfg := config.GetGlobalConfig() + if atomic.LoadUint32(&cfg.Log.RecordPlanInSlowLog) == 0 { + return "" + } + var selectPlan plannercore.PhysicalPlan + if physicalPlan, ok := p.(plannercore.PhysicalPlan); ok { + selectPlan = physicalPlan + } else { + switch x := p.(type) { + case *plannercore.Delete: + selectPlan = x.SelectPlan + case *plannercore.Update: + selectPlan = x.SelectPlan + case *plannercore.Insert: + selectPlan = x.SelectPlan + } + } + if selectPlan == nil { + return "" + } + planTree := plannercore.EncodePlan(selectPlan) + if len(planTree) == 0 { + return planTree + } + return variable.SlowLogPlanPrefix + planTree + variable.SlowLogPlanSuffix +} + // SummaryStmt collects statements for performance_schema.events_statements_summary_by_digest func (a *ExecStmt) SummaryStmt() { sessVars := a.Ctx.GetSessionVars() diff --git a/executor/set_test.go b/executor/set_test.go index 53529c6ef7f49..8f0f1d78cc03a 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -381,6 +381,11 @@ func (s *testSuite2) TestSetVar(c *C) { tk.MustExec("set @@tidb_expensive_query_time_threshold=70") tk.MustQuery("select @@tidb_expensive_query_time_threshold;").Check(testkit.Rows("70")) + + tk.MustExec("set @@tidb_record_plan_in_slow_log = 1") + tk.MustQuery("select @@tidb_record_plan_in_slow_log;").Check(testkit.Rows("1")) + tk.MustExec("set @@tidb_record_plan_in_slow_log = 0") + tk.MustQuery("select @@tidb_record_plan_in_slow_log;").Check(testkit.Rows("0")) } func (s *testSuite2) TestSetCharset(c *C) { diff --git a/expression/builtin.go b/expression/builtin.go index f779687d863f0..e86ec51d85614 100644 --- a/expression/builtin.go +++ b/expression/builtin.go @@ -415,7 +415,6 @@ var funcs = map[string]functionClass{ ast.Year: &yearFunctionClass{baseFunctionClass{ast.Year, 1, 1}}, ast.YearWeek: &yearWeekFunctionClass{baseFunctionClass{ast.YearWeek, 1, 2}}, ast.LastDay: &lastDayFunctionClass{baseFunctionClass{ast.LastDay, 1, 1}}, - ast.TiDBParseTso: &tidbParseTsoFunctionClass{baseFunctionClass{ast.TiDBParseTso, 1, 1}}, // string functions ast.ASCII: &asciiFunctionClass{baseFunctionClass{ast.ASCII, 1, 1}}, @@ -486,9 +485,6 @@ var funcs = map[string]functionClass{ ast.RowCount: &rowCountFunctionClass{baseFunctionClass{ast.RowCount, 0, 0}}, ast.SessionUser: &userFunctionClass{baseFunctionClass{ast.SessionUser, 0, 0}}, ast.SystemUser: &userFunctionClass{baseFunctionClass{ast.SystemUser, 0, 0}}, - // This function is used to show tidb-server version info. - ast.TiDBVersion: &tidbVersionFunctionClass{baseFunctionClass{ast.TiDBVersion, 0, 0}}, - ast.TiDBIsDDLOwner: &tidbIsDDLOwnerFunctionClass{baseFunctionClass{ast.TiDBIsDDLOwner, 0, 0}}, // control functions ast.If: &ifFunctionClass{baseFunctionClass{ast.If, 3, 3}}, @@ -600,4 +596,11 @@ var funcs = map[string]functionClass{ ast.JSONDepth: &jsonDepthFunctionClass{baseFunctionClass{ast.JSONDepth, 1, 1}}, ast.JSONKeys: &jsonKeysFunctionClass{baseFunctionClass{ast.JSONKeys, 1, 2}}, ast.JSONLength: &jsonLengthFunctionClass{baseFunctionClass{ast.JSONLength, 1, 2}}, + + // TiDB internal function. + // This function is used to show tidb-server version info. + ast.TiDBVersion: &tidbVersionFunctionClass{baseFunctionClass{ast.TiDBVersion, 0, 0}}, + ast.TiDBIsDDLOwner: &tidbIsDDLOwnerFunctionClass{baseFunctionClass{ast.TiDBIsDDLOwner, 0, 0}}, + ast.TiDBParseTso: &tidbParseTsoFunctionClass{baseFunctionClass{ast.TiDBParseTso, 1, 1}}, + ast.TiDBDecodePlan: &tidbDecodePlanFunctionClass{baseFunctionClass{ast.TiDBDecodePlan, 1, 1}}, } diff --git a/expression/builtin_info.go b/expression/builtin_info.go index 2e6ab91c6b5c5..fa945845d3e82 100644 --- a/expression/builtin_info.go +++ b/expression/builtin_info.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tidb/util/printer" ) @@ -44,6 +45,7 @@ var ( _ functionClass = &rowCountFunctionClass{} _ functionClass = &tidbVersionFunctionClass{} _ functionClass = &tidbIsDDLOwnerFunctionClass{} + _ functionClass = &tidbDecodePlanFunctionClass{} ) var ( @@ -589,3 +591,35 @@ func (b *builtinRowCountSig) evalInt(_ chunk.Row) (res int64, isNull bool, err e res = int64(b.ctx.GetSessionVars().StmtCtx.PrevAffectedRows) return res, false, nil } + +type tidbDecodePlanFunctionClass struct { + baseFunctionClass +} + +func (c *tidbDecodePlanFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETString, types.ETString) + sig := &builtinTiDBDecodePlanSig{bf} + return sig, nil +} + +type builtinTiDBDecodePlanSig struct { + baseBuiltinFunc +} + +func (b *builtinTiDBDecodePlanSig) Clone() builtinFunc { + newSig := &builtinTiDBDecodePlanSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinTiDBDecodePlanSig) evalString(row chunk.Row) (string, bool, error) { + planString, isNull, err := b.args[0].EvalString(b.ctx, row) + if isNull || err != nil { + return "", isNull, err + } + planTree, err := plancodec.DecodePlan(planString) + return planTree, false, err +} diff --git a/expression/integration_test.go b/expression/integration_test.go index 40ea1c8df4bf3..b4219ec1db11b 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -4010,6 +4010,25 @@ func (s *testIntegrationSuite) testTiDBIsOwnerFunc(c *C) { result.Check(testkit.Rows(fmt.Sprintf("%v", ret))) } +func (s *testIntegrationSuite) TestTiDBDecodePlanFunc(c *C) { + tk := testkit.NewTestKit(c, s.store) + defer s.cleanEnv(c) + tk.MustQuery("select tidb_decode_plan('')").Check(testkit.Rows("")) + tk.MustQuery("select tidb_decode_plan('7APIMAk1XzEzCTAJMQlmdW5jczpjb3VudCgxKQoxCTE3XzE0CTAJMAlpbm5lciBqb2luLCBp" + + "AQyQOlRhYmxlUmVhZGVyXzIxLCBlcXVhbDpbZXEoQ29sdW1uIzEsIA0KCDkpIBkXADIVFywxMCldCjIJMzJfMTgFZXhkYXRhOlNlbGVjdGlvbl" + + "8xNwozCTFfMTcJMQkwCWx0HVlATlVMTCksIG5vdChpc251bGwVHAApUhcAUDIpKQo0CTEwXzE2CTEJMTAwMDAJdAHB2Dp0MSwgcmFuZ2U6Wy1p" + + "bmYsK2luZl0sIGtlZXAgb3JkZXI6ZmFsc2UsIHN0YXRzOnBzZXVkbwoFtgAyAZcEMAk6tgAEMjAFtgQyMDq2AAg5LCBmtgAAMFa3AAA5FbcAO" + + "T63AAAyzrcA')").Check(testkit.Rows("" + + "\tStreamAgg_13 \troot\t1 \tfuncs:count(1)\n" + + "\t└─HashLeftJoin_14 \troot\t0 \tinner join, inner:TableReader_21, equal:[eq(Column#1, Column#9) eq(Column#2, Column#10)]\n" + + "\t ├─TableReader_18 \troot\t0 \tdata:Selection_17\n" + + "\t │ └─Selection_17 \tcop \t0 \tlt(Column#1, NULL), not(isnull(Column#1)), not(isnull(Column#2))\n" + + "\t │ └─TableScan_16\tcop \t10000\ttable:t1, range:[-inf,+inf], keep order:false, stats:pseudo\n" + + "\t └─TableReader_21 \troot\t0 \tdata:Selection_20\n" + + "\t └─Selection_20 \tcop \t0 \tlt(Column#9, NULL), not(isnull(Column#10)), not(isnull(Column#9))\n" + + "\t └─TableScan_19\tcop \t10000\ttable:t2, range:[-inf,+inf], keep order:false, stats:pseudo")) +} + func newStoreWithBootstrap() (kv.Storage, *domain.Domain, error) { store, err := mockstore.NewMockTikvStore() if err != nil { diff --git a/go.mod b/go.mod index 9585946cc4fa1..b93421c01fcc7 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4 github.com/gogo/protobuf v1.2.0 github.com/golang/protobuf v1.2.0 - github.com/golang/snappy v0.0.1 // indirect + github.com/golang/snappy v0.0.1 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c github.com/google/uuid v1.1.1 github.com/gorilla/context v1.1.1 // indirect @@ -42,7 +42,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e github.com/pingcap/kvproto v0.0.0-20190918085321-44e3817e1f18 github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd - github.com/pingcap/parser v0.0.0-20190910041007-2a177b291004 + github.com/pingcap/parser v0.0.0-20191018040038-555b97093a2a github.com/pingcap/pd v1.1.0-beta.0.20190912093418-dc03c839debd github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330 diff --git a/go.sum b/go.sum index 9259bb3784545..9b40b36d696e7 100644 --- a/go.sum +++ b/go.sum @@ -161,8 +161,8 @@ github.com/pingcap/kvproto v0.0.0-20190918085321-44e3817e1f18 h1:5vQV8S/8B9nE+I+ github.com/pingcap/kvproto v0.0.0-20190918085321-44e3817e1f18/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd h1:hWDol43WY5PGhsh3+8794bFHY1bPrmu6bTalpssCrGg= github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= -github.com/pingcap/parser v0.0.0-20190910041007-2a177b291004 h1:LaA55frHvXh8vTYcQj0xNsQiiPb8iU/JcU8cc2HA9Jg= -github.com/pingcap/parser v0.0.0-20190910041007-2a177b291004/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/parser v0.0.0-20191018040038-555b97093a2a h1:PMjYrxWKdVUlJ77+9YHbYVciDQCyqZ/noS9nIni76KQ= +github.com/pingcap/parser v0.0.0-20191018040038-555b97093a2a/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/pd v1.1.0-beta.0.20190912093418-dc03c839debd h1:bKj6hodu/ro78B0oN2yicdGn0t4yd9XjnyoW95qmWic= github.com/pingcap/pd v1.1.0-beta.0.20190912093418-dc03c839debd/go.mod h1:I7TEby5BHTYIxgHszfsOJSBsk8b2Qt8QrSIgdv5n5QQ= github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU= diff --git a/infoschema/slow_log.go b/infoschema/slow_log.go index 52ff866b8e3ab..16facca104d6a 100644 --- a/infoschema/slow_log.go +++ b/infoschema/slow_log.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/plancodec" "go.uber.org/zap" ) @@ -61,6 +62,7 @@ var slowQueryCols = []columnInfo{ {variable.SlowLogCopWaitAddr, mysql.TypeVarchar, 64, 0, nil, nil}, {variable.SlowLogMemMax, mysql.TypeLonglong, 20, 0, nil, nil}, {variable.SlowLogSucc, mysql.TypeTiny, 1, 0, nil, nil}, + {variable.SlowLogPlan, mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, {variable.SlowLogPrevStmt, mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, {variable.SlowLogQuerySQLStr, mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, } @@ -205,6 +207,7 @@ type slowQueryTuple struct { sql string isInternal bool succ bool + plan string } func (st *slowQueryTuple) setFieldValue(tz *time.Location, field, value string) error { @@ -274,6 +277,8 @@ func (st *slowQueryTuple) setFieldValue(tz *time.Location, field, value string) st.memMax, err = strconv.ParseInt(value, 10, 64) case variable.SlowLogSucc: st.succ, err = strconv.ParseBool(value) + case variable.SlowLogPlan: + st.plan = value case variable.SlowLogQuerySQLStr: st.sql = value } @@ -320,11 +325,26 @@ func (st *slowQueryTuple) convertToDatumRow() []types.Datum { } else { record = append(record, types.NewIntDatum(0)) } + record = append(record, types.NewStringDatum(parsePlan(st.plan))) record = append(record, types.NewStringDatum(st.prevStmt)) record = append(record, types.NewStringDatum(st.sql)) return record } +func parsePlan(planString string) string { + if len(planString) <= len(variable.SlowLogPlanPrefix)+len(variable.SlowLogPlanSuffix) { + return planString + } + planString = planString[len(variable.SlowLogPlanPrefix) : len(planString)-len(variable.SlowLogPlanSuffix)] + decodePlanString, err := plancodec.DecodePlan(planString) + if err == nil { + planString = decodePlanString + } else { + logutil.Logger(context.Background()).Error("decode plan in slow log failed", zap.String("plan", planString), zap.Error(err)) + } + return planString +} + // ParseTime exports for testing. func ParseTime(s string) (time.Time, error) { t, err := time.Parse(logutil.SlowLogTimeFormat, s) diff --git a/infoschema/slow_log_test.go b/infoschema/slow_log_test.go index efb0d02f700fb..9a14b128f3726 100644 --- a/infoschema/slow_log_test.go +++ b/infoschema/slow_log_test.go @@ -55,7 +55,7 @@ select * from t;`) } recordString += str } - expectRecordString := "2019-04-28 15:24:04.309074,405888132465033227,,,0,0.216905,0.021,0,0,1,637,0,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,0,update t set i = 1;,select * from t;" + expectRecordString := "2019-04-28 15:24:04.309074,405888132465033227,,,0,0.216905,0.021,0,0,1,637,0,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,0,,update t set i = 1;,select * from t;" c.Assert(expectRecordString, Equals, recordString) // fix sql contain '# ' bug diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 08da19bbfcd68..a92ca358d5c2f 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -458,6 +458,7 @@ func (s *testTableSuite) TestSlowQuery(c *C) { # Cop_wait_avg: 0.05 Cop_wait_p90: 0.6 Cop_wait_max: 0.8 Cop_wait_addr: 0.0.0.0:20160 # Mem_max: 70724 # Succ: true +# Plan: abcd # Prev_stmt: update t set i = 2; select * from t_slim;`)) c.Assert(f.Sync(), IsNil) @@ -467,10 +468,10 @@ select * from t_slim;`)) tk.MustExec("set time_zone = '+08:00';") re := tk.MustQuery("select * from information_schema.slow_query") re.Check(testutil.RowsWithSep("|", - "2019-02-12 19:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|update t set i = 2;|select * from t_slim;")) + "2019-02-12 19:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|abcd|update t set i = 2;|select * from t_slim;")) tk.MustExec("set time_zone = '+00:00';") re = tk.MustQuery("select * from information_schema.slow_query") - re.Check(testutil.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|update t set i = 2;|select * from t_slim;")) + re.Check(testutil.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|abcd|update t set i = 2;|select * from t_slim;")) // Test for long query. _, err = f.Write([]byte(` diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index a04a0151f4105..94aa35d50cb43 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -34,6 +34,7 @@ import ( driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/kvcache" + "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tidb/util/ranger" ) @@ -646,23 +647,6 @@ func (e *Explain) prepareOperatorInfo(p PhysicalPlan, taskType string, indent st e.Rows = append(e.Rows, row) } -const ( - // treeBody indicates the current operator sub-tree is not finished, still - // has child operators to be attached on. - treeBody = '│' - // treeMiddleNode indicates this operator is not the last child of the - // current sub-tree rooted by its parent. - treeMiddleNode = '├' - // treeLastNode indicates this operator is the last child of the current - // sub-tree rooted by its parent. - treeLastNode = '└' - // treeGap is used to represent the gap between the branches of the tree. - treeGap = ' ' - // treeNodeIdentifier is used to replace the treeGap once we need to attach - // a node to a sub-tree. - treeNodeIdentifier = '─' -) - func (e *Explain) prettyIdentifier(id, indent string, isLastChild bool) string { if len(indent) == 0 { return id @@ -670,44 +654,44 @@ func (e *Explain) prettyIdentifier(id, indent string, isLastChild bool) string { indentBytes := []rune(indent) for i := len(indentBytes) - 1; i >= 0; i-- { - if indentBytes[i] != treeBody { + if indentBytes[i] != plancodec.TreeBody { continue } // Here we attach a new node to the current sub-tree by changing - // the closest treeBody to a: - // 1. treeLastNode, if this operator is the last child. - // 2. treeMiddleNode, if this operator is not the last child.. + // the closest TreeBody to a: + // 1. TreeLastNode, if this operator is the last child. + // 2. TreeMiddleNode, if this operator is not the last child.. if isLastChild { - indentBytes[i] = treeLastNode + indentBytes[i] = plancodec.TreeLastNode } else { - indentBytes[i] = treeMiddleNode + indentBytes[i] = plancodec.TreeMiddleNode } break } - // Replace the treeGap between the treeBody and the node to a - // treeNodeIdentifier. - indentBytes[len(indentBytes)-1] = treeNodeIdentifier + // Replace the TreeGap between the TreeBody and the node to a + // TreeNodeIdentifier. + indentBytes[len(indentBytes)-1] = plancodec.TreeNodeIdentifier return string(indentBytes) + id } func (e *Explain) getIndent4Child(indent string, isLastChild bool) string { if !isLastChild { - return string(append([]rune(indent), treeBody, treeGap)) + return string(append([]rune(indent), plancodec.TreeBody, plancodec.TreeGap)) } // If the current node is the last node of the current operator tree, we - // need to end this sub-tree by changing the closest treeBody to a treeGap. + // need to end this sub-tree by changing the closest TreeBody to a TreeGap. indentBytes := []rune(indent) for i := len(indentBytes) - 1; i >= 0; i-- { - if indentBytes[i] == treeBody { - indentBytes[i] = treeGap + if indentBytes[i] == plancodec.TreeBody { + indentBytes[i] = plancodec.TreeGap break } } - return string(append(indentBytes, treeBody, treeGap)) + return string(append(indentBytes, plancodec.TreeBody, plancodec.TreeGap)) } func (e *Explain) prepareDotInfo(p PhysicalPlan) { diff --git a/planner/core/encode.go b/planner/core/encode.go new file mode 100644 index 0000000000000..4abb3ed9b7cd9 --- /dev/null +++ b/planner/core/encode.go @@ -0,0 +1,68 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "bytes" + "sync" + + "github.com/pingcap/tidb/util/plancodec" +) + +var encoderPool = sync.Pool{ + New: func() interface{} { + return &planEncoder{} + }, +} + +type planEncoder struct { + buf bytes.Buffer + encodedPlans map[int]bool +} + +// EncodePlan is used to encodePlan the plan to the plan tree with compressing. +func EncodePlan(p PhysicalPlan) string { + pn := encoderPool.Get().(*planEncoder) + defer encoderPool.Put(pn) + return pn.encodePlanTree(p) +} + +func (pn *planEncoder) encodePlanTree(p PhysicalPlan) string { + pn.encodedPlans = make(map[int]bool) + pn.buf.Reset() + pn.encodePlan(p, true, 0) + return plancodec.Compress(pn.buf.Bytes()) +} + +func (pn *planEncoder) encodePlan(p PhysicalPlan, isRoot bool, depth int) { + plancodec.EncodePlanNode(depth, p.ID(), p.TP(), isRoot, p.statsInfo().RowCount, p.ExplainInfo(), &pn.buf) + pn.encodedPlans[p.ID()] = true + + depth++ + for _, child := range p.Children() { + if pn.encodedPlans[child.ID()] { + continue + } + pn.encodePlan(child.(PhysicalPlan), isRoot, depth) + } + switch copPlan := p.(type) { + case *PhysicalTableReader: + pn.encodePlan(copPlan.tablePlan, false, depth) + case *PhysicalIndexReader: + pn.encodePlan(copPlan.indexPlan, false, depth) + case *PhysicalIndexLookUpReader: + pn.encodePlan(copPlan.indexPlan, false, depth) + pn.encodePlan(copPlan.tablePlan, false, depth) + } +} diff --git a/planner/core/initialize.go b/planner/core/initialize.go index f0f1bddb393bd..95ae4e3f426ba 100644 --- a/planner/core/initialize.go +++ b/planner/core/initialize.go @@ -16,108 +16,42 @@ package core import ( "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/sessionctx" -) - -const ( - // TypeSel is the type of Selection. - TypeSel = "Selection" - // TypeSet is the type of Set. - TypeSet = "Set" - // TypeProj is the type of Projection. - TypeProj = "Projection" - // TypeAgg is the type of Aggregation. - TypeAgg = "Aggregation" - // TypeStreamAgg is the type of StreamAgg. - TypeStreamAgg = "StreamAgg" - // TypeHashAgg is the type of HashAgg. - TypeHashAgg = "HashAgg" - // TypeShow is the type of show. - TypeShow = "Show" - // TypeJoin is the type of Join. - TypeJoin = "Join" - // TypeUnion is the type of Union. - TypeUnion = "Union" - // TypeTableScan is the type of TableScan. - TypeTableScan = "TableScan" - // TypeMemTableScan is the type of TableScan. - TypeMemTableScan = "MemTableScan" - // TypeUnionScan is the type of UnionScan. - TypeUnionScan = "UnionScan" - // TypeIdxScan is the type of IndexScan. - TypeIdxScan = "IndexScan" - // TypeSort is the type of Sort. - TypeSort = "Sort" - // TypeTopN is the type of TopN. - TypeTopN = "TopN" - // TypeLimit is the type of Limit. - TypeLimit = "Limit" - // TypeHashLeftJoin is the type of left hash join. - TypeHashLeftJoin = "HashLeftJoin" - // TypeHashRightJoin is the type of right hash join. - TypeHashRightJoin = "HashRightJoin" - // TypeMergeJoin is the type of merge join. - TypeMergeJoin = "MergeJoin" - // TypeIndexJoin is the type of index look up join. - TypeIndexJoin = "IndexJoin" - // TypeApply is the type of Apply. - TypeApply = "Apply" - // TypeMaxOneRow is the type of MaxOneRow. - TypeMaxOneRow = "MaxOneRow" - // TypeExists is the type of Exists. - TypeExists = "Exists" - // TypeDual is the type of TableDual. - TypeDual = "TableDual" - // TypeLock is the type of SelectLock. - TypeLock = "SelectLock" - // TypeInsert is the type of Insert - TypeInsert = "Insert" - // TypeUpdate is the type of Update. - TypeUpdate = "Update" - // TypeDelete is the type of Delete. - TypeDelete = "Delete" - // TypeIndexLookUp is the type of IndexLookUp. - TypeIndexLookUp = "IndexLookUp" - // TypeTableReader is the type of TableReader. - TypeTableReader = "TableReader" - // TypeIndexReader is the type of IndexReader. - TypeIndexReader = "IndexReader" - // TypeWindow is the type of Window. - TypeWindow = "Window" + "github.com/pingcap/tidb/util/plancodec" ) // Init initializes LogicalAggregation. func (la LogicalAggregation) Init(ctx sessionctx.Context) *LogicalAggregation { - la.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeAgg, &la) + la.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeAgg, &la) return &la } // Init initializes LogicalJoin. func (p LogicalJoin) Init(ctx sessionctx.Context) *LogicalJoin { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeJoin, &p) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeJoin, &p) return &p } // Init initializes DataSource. func (ds DataSource) Init(ctx sessionctx.Context) *DataSource { - ds.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeTableScan, &ds) + ds.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeTableScan, &ds) return &ds } // Init initializes LogicalApply. func (la LogicalApply) Init(ctx sessionctx.Context) *LogicalApply { - la.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeApply, &la) + la.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeApply, &la) return &la } // Init initializes LogicalSelection. func (p LogicalSelection) Init(ctx sessionctx.Context) *LogicalSelection { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeSel, &p) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeSel, &p) return &p } // Init initializes PhysicalSelection. func (p PhysicalSelection) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalSelection { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeSel, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeSel, &p) p.childrenReqProps = props p.stats = stats return &p @@ -125,19 +59,19 @@ func (p PhysicalSelection) Init(ctx sessionctx.Context, stats *property.StatsInf // Init initializes LogicalUnionScan. func (p LogicalUnionScan) Init(ctx sessionctx.Context) *LogicalUnionScan { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeUnionScan, &p) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeUnionScan, &p) return &p } // Init initializes LogicalProjection. func (p LogicalProjection) Init(ctx sessionctx.Context) *LogicalProjection { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeProj, &p) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeProj, &p) return &p } // Init initializes PhysicalProjection. func (p PhysicalProjection) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalProjection { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeProj, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeProj, &p) p.childrenReqProps = props p.stats = stats return &p @@ -145,13 +79,13 @@ func (p PhysicalProjection) Init(ctx sessionctx.Context, stats *property.StatsIn // Init initializes LogicalUnionAll. func (p LogicalUnionAll) Init(ctx sessionctx.Context) *LogicalUnionAll { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeUnion, &p) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeUnion, &p) return &p } // Init initializes PhysicalUnionAll. func (p PhysicalUnionAll) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalUnionAll { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeUnion, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeUnion, &p) p.childrenReqProps = props p.stats = stats return &p @@ -159,13 +93,13 @@ func (p PhysicalUnionAll) Init(ctx sessionctx.Context, stats *property.StatsInfo // Init initializes LogicalSort. func (ls LogicalSort) Init(ctx sessionctx.Context) *LogicalSort { - ls.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeSort, &ls) + ls.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeSort, &ls) return &ls } // Init initializes PhysicalSort. func (p PhysicalSort) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalSort { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeSort, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeSort, &p) p.childrenReqProps = props p.stats = stats return &p @@ -173,20 +107,20 @@ func (p PhysicalSort) Init(ctx sessionctx.Context, stats *property.StatsInfo, pr // Init initializes NominalSort. func (p NominalSort) Init(ctx sessionctx.Context, props ...*property.PhysicalProperty) *NominalSort { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeSort, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeSort, &p) p.childrenReqProps = props return &p } // Init initializes LogicalTopN. func (lt LogicalTopN) Init(ctx sessionctx.Context) *LogicalTopN { - lt.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeTopN, <) + lt.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeTopN, <) return < } // Init initializes PhysicalTopN. func (p PhysicalTopN) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalTopN { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeTopN, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeTopN, &p) p.childrenReqProps = props p.stats = stats return &p @@ -194,13 +128,13 @@ func (p PhysicalTopN) Init(ctx sessionctx.Context, stats *property.StatsInfo, pr // Init initializes LogicalLimit. func (p LogicalLimit) Init(ctx sessionctx.Context) *LogicalLimit { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeLimit, &p) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeLimit, &p) return &p } // Init initializes PhysicalLimit. func (p PhysicalLimit) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalLimit { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeLimit, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeLimit, &p) p.childrenReqProps = props p.stats = stats return &p @@ -208,26 +142,26 @@ func (p PhysicalLimit) Init(ctx sessionctx.Context, stats *property.StatsInfo, p // Init initializes LogicalTableDual. func (p LogicalTableDual) Init(ctx sessionctx.Context) *LogicalTableDual { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeDual, &p) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeDual, &p) return &p } // Init initializes PhysicalTableDual. func (p PhysicalTableDual) Init(ctx sessionctx.Context, stats *property.StatsInfo) *PhysicalTableDual { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeDual, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeDual, &p) p.stats = stats return &p } // Init initializes LogicalMaxOneRow. func (p LogicalMaxOneRow) Init(ctx sessionctx.Context) *LogicalMaxOneRow { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeMaxOneRow, &p) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeMaxOneRow, &p) return &p } // Init initializes PhysicalMaxOneRow. func (p PhysicalMaxOneRow) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalMaxOneRow { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeMaxOneRow, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeMaxOneRow, &p) p.childrenReqProps = props p.stats = stats return &p @@ -235,13 +169,13 @@ func (p PhysicalMaxOneRow) Init(ctx sessionctx.Context, stats *property.StatsInf // Init initializes LogicalWindow. func (p LogicalWindow) Init(ctx sessionctx.Context) *LogicalWindow { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeWindow, &p) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeWindow, &p) return &p } // Init initializes PhysicalWindow. func (p PhysicalWindow) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalWindow { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeWindow, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeWindow, &p) p.childrenReqProps = props p.stats = stats return &p @@ -249,25 +183,25 @@ func (p PhysicalWindow) Init(ctx sessionctx.Context, stats *property.StatsInfo, // Init initializes Update. func (p Update) Init(ctx sessionctx.Context) *Update { - p.basePlan = newBasePlan(ctx, TypeUpdate) + p.basePlan = newBasePlan(ctx, plancodec.TypeUpdate) return &p } // Init initializes Delete. func (p Delete) Init(ctx sessionctx.Context) *Delete { - p.basePlan = newBasePlan(ctx, TypeDelete) + p.basePlan = newBasePlan(ctx, plancodec.TypeDelete) return &p } // Init initializes Insert. func (p Insert) Init(ctx sessionctx.Context) *Insert { - p.basePlan = newBasePlan(ctx, TypeInsert) + p.basePlan = newBasePlan(ctx, plancodec.TypeInsert) return &p } // Init initializes Show. func (p Show) Init(ctx sessionctx.Context) *Show { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeShow, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeShow, &p) // Just use pseudo stats to avoid panic. p.stats = &property.StatsInfo{RowCount: 1} return &p @@ -275,13 +209,13 @@ func (p Show) Init(ctx sessionctx.Context) *Show { // Init initializes LogicalLock. func (p LogicalLock) Init(ctx sessionctx.Context) *LogicalLock { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeLock, &p) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeLock, &p) return &p } // Init initializes PhysicalLock. func (p PhysicalLock) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalLock { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeLock, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeLock, &p) p.childrenReqProps = props p.stats = stats return &p @@ -289,28 +223,28 @@ func (p PhysicalLock) Init(ctx sessionctx.Context, stats *property.StatsInfo, pr // Init initializes PhysicalTableScan. func (p PhysicalTableScan) Init(ctx sessionctx.Context) *PhysicalTableScan { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeTableScan, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeTableScan, &p) return &p } // Init initializes PhysicalIndexScan. func (p PhysicalIndexScan) Init(ctx sessionctx.Context) *PhysicalIndexScan { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIdxScan, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeIdxScan, &p) return &p } // Init initializes PhysicalMemTable. func (p PhysicalMemTable) Init(ctx sessionctx.Context, stats *property.StatsInfo) *PhysicalMemTable { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeMemTableScan, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeMemTableScan, &p) p.stats = stats return &p } // Init initializes PhysicalHashJoin. func (p PhysicalHashJoin) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalHashJoin { - tp := TypeHashRightJoin + tp := plancodec.TypeHashRightJoin if p.InnerChildIdx == 1 { - tp = TypeHashLeftJoin + tp = plancodec.TypeHashLeftJoin } p.basePhysicalPlan = newBasePhysicalPlan(ctx, tp, &p) p.childrenReqProps = props @@ -320,21 +254,21 @@ func (p PhysicalHashJoin) Init(ctx sessionctx.Context, stats *property.StatsInfo // Init initializes PhysicalMergeJoin. func (p PhysicalMergeJoin) Init(ctx sessionctx.Context, stats *property.StatsInfo) *PhysicalMergeJoin { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeMergeJoin, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeMergeJoin, &p) p.stats = stats return &p } // Init initializes basePhysicalAgg. func (base basePhysicalAgg) Init(ctx sessionctx.Context, stats *property.StatsInfo) *basePhysicalAgg { - base.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeHashAgg, &base) + base.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeHashAgg, &base) base.stats = stats return &base } func (base basePhysicalAgg) initForHash(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalHashAgg { p := &PhysicalHashAgg{base} - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeHashAgg, p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeHashAgg, p) p.childrenReqProps = props p.stats = stats return p @@ -342,7 +276,7 @@ func (base basePhysicalAgg) initForHash(ctx sessionctx.Context, stats *property. func (base basePhysicalAgg) initForStream(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalStreamAgg { p := &PhysicalStreamAgg{base} - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeStreamAgg, p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeStreamAgg, p) p.childrenReqProps = props p.stats = stats return p @@ -350,7 +284,7 @@ func (base basePhysicalAgg) initForStream(ctx sessionctx.Context, stats *propert // Init initializes PhysicalApply. func (p PhysicalApply) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalApply { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeApply, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeApply, &p) p.childrenReqProps = props p.stats = stats return &p @@ -358,7 +292,7 @@ func (p PhysicalApply) Init(ctx sessionctx.Context, stats *property.StatsInfo, p // Init initializes PhysicalUnionScan. func (p PhysicalUnionScan) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalUnionScan { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeUnionScan, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeUnionScan, &p) p.childrenReqProps = props p.stats = stats return &p @@ -366,7 +300,7 @@ func (p PhysicalUnionScan) Init(ctx sessionctx.Context, stats *property.StatsInf // Init initializes PhysicalIndexLookUpReader. func (p PhysicalIndexLookUpReader) Init(ctx sessionctx.Context) *PhysicalIndexLookUpReader { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIndexLookUp, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeIndexLookUp, &p) p.TablePlans = flattenPushDownPlan(p.tablePlan) p.IndexPlans = flattenPushDownPlan(p.indexPlan) p.schema = p.tablePlan.Schema() @@ -375,7 +309,7 @@ func (p PhysicalIndexLookUpReader) Init(ctx sessionctx.Context) *PhysicalIndexLo // Init initializes PhysicalTableReader. func (p PhysicalTableReader) Init(ctx sessionctx.Context) *PhysicalTableReader { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeTableReader, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeTableReader, &p) p.TablePlans = flattenPushDownPlan(p.tablePlan) p.schema = p.tablePlan.Schema() return &p @@ -383,7 +317,7 @@ func (p PhysicalTableReader) Init(ctx sessionctx.Context) *PhysicalTableReader { // Init initializes PhysicalIndexReader. func (p PhysicalIndexReader) Init(ctx sessionctx.Context) *PhysicalIndexReader { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIndexReader, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeIndexReader, &p) p.IndexPlans = flattenPushDownPlan(p.indexPlan) switch p.indexPlan.(type) { case *PhysicalHashAgg, *PhysicalStreamAgg: @@ -398,7 +332,7 @@ func (p PhysicalIndexReader) Init(ctx sessionctx.Context) *PhysicalIndexReader { // Init initializes PhysicalIndexJoin. func (p PhysicalIndexJoin) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalIndexJoin { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIndexJoin, &p) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeIndexJoin, &p) p.childrenReqProps = props p.stats = stats return &p diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 5ab28bd2e8e00..d838106a25b2e 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -45,6 +45,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/plancodec" ) const ( @@ -2484,7 +2485,7 @@ func (b *PlanBuilder) buildSemiApply(outerPlan, innerPlan LogicalPlan, condition } ap := &LogicalApply{LogicalJoin: *join} - ap.tp = TypeApply + ap.tp = plancodec.TypeApply ap.self = ap return ap, nil } diff --git a/planner/core/plan.go b/planner/core/plan.go index f2d90ddf648d6..35d8a96f7d306 100644 --- a/planner/core/plan.go +++ b/planner/core/plan.go @@ -35,6 +35,10 @@ type Plan interface { Schema() *expression.Schema // Get the ID. ID() int + + // TP get the plan type. + TP() string + // Get the ID in explain statement ExplainID() fmt.Stringer // replaceExprColumns replace all the column reference in the plan's expression node. @@ -272,6 +276,11 @@ func (p *basePlan) ExplainID() fmt.Stringer { }) } +// TP implements Plan interface. +func (p *basePlan) TP() string { + return p.tp +} + // Schema implements Plan Schema interface. func (p *baseLogicalPlan) Schema() *expression.Schema { return p.children[0].Schema() diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index e91eaaa6ac456..1323171805f89 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/parser_driver" + "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tipb/go-tipb" ) @@ -283,7 +284,7 @@ func tryPointGetPlan(ctx sessionctx.Context, selStmt *ast.SelectStmt) *PointGetP func newPointGetPlan(ctx sessionctx.Context, dbName string, schema *expression.Schema, tbl *model.TableInfo) *PointGetPlan { p := &PointGetPlan{ - basePlan: newBasePlan(ctx, "Point_Get"), + basePlan: newBasePlan(ctx, plancodec.TypePointGet), dbName: dbName, schema: schema, TblInfo: tbl, diff --git a/planner/core/rule_partition_processor.go b/planner/core/rule_partition_processor.go index 488c8877a54c0..af1105c263cb7 100644 --- a/planner/core/rule_partition_processor.go +++ b/planner/core/rule_partition_processor.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tidb/util/ranger" ) @@ -130,7 +131,7 @@ func (s *partitionProcessor) prune(ds *DataSource) (LogicalPlan, error) { // Not a deep copy. newDataSource := *ds - newDataSource.baseLogicalPlan = newBaseLogicalPlan(ds.context(), TypeTableScan, &newDataSource) + newDataSource.baseLogicalPlan = newBaseLogicalPlan(ds.context(), plancodec.TypeTableScan, &newDataSource) newDataSource.isPartition = true newDataSource.physicalTableID = pi.Definitions[i].ID // There are many expression nodes in the plan tree use the original datasource diff --git a/planner/core/task.go b/planner/core/task.go index f19621dc2552a..8c210bc6c2336 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/plancodec" ) // task is a new version of `PhysicalPlanInfo`. It stores cost information for a task. @@ -488,7 +489,7 @@ func (p *basePhysicalAgg) newPartialAggregate() (partial, final PhysicalPlan) { } // Create physical "final" aggregation. - if p.tp == TypeStreamAgg { + if p.tp == plancodec.TypeStreamAgg { finalAgg := basePhysicalAgg{ AggFuncs: finalAggFuncs, GroupByItems: groupByItems, diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 42b40d435a4a8..349d0762c073d 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -789,6 +789,8 @@ func (s *SessionVars) SetSystemVar(name string, val string) error { atomic.StoreUint32(&ProcessGeneralLog, uint32(tidbOptPositiveInt32(val, DefTiDBGeneralLog))) case TiDBSlowLogThreshold: atomic.StoreUint64(&config.GetGlobalConfig().Log.SlowThreshold, uint64(tidbOptInt64(val, logutil.DefaultSlowThreshold))) + case TiDBRecordPlanInSlowLog: + atomic.StoreUint32(&config.GetGlobalConfig().Log.RecordPlanInSlowLog, uint32(tidbOptInt64(val, logutil.DefaultRecordPlanInSlowLog))) case TiDBDDLSlowOprThreshold: atomic.StoreUint32(&DDLSlowOprThreshold, uint32(tidbOptPositiveInt32(val, DefTiDBDDLSlowOprThreshold))) case TiDBQueryLogMaxLen: @@ -1036,6 +1038,12 @@ const ( SlowLogSucc = "Succ" // SlowLogPrevStmt is used to show the previous executed statement. SlowLogPrevStmt = "Prev_stmt" + // SlowLogPlan is used to record the query plan. + SlowLogPlan = "Plan" + // SlowLogPlanPrefix is the prefix of the plan value. + SlowLogPlanPrefix = ast.TiDBDecodePlan + "('" + // SlowLogPlanSuffix is the suffix of the plan value. + SlowLogPlanSuffix = "')" // SlowLogPrevStmtPrefix is the prefix of Prev_stmt in slow log file. SlowLogPrevStmtPrefix = SlowLogPrevStmt + SlowLogSpaceMarkStr ) @@ -1058,6 +1066,7 @@ type SlowQueryLogItems struct { Prepared bool HasMoreResults bool PrevStmt string + Plan string } // SlowLogFormat uses for formatting slow log. @@ -1161,6 +1170,9 @@ func (s *SessionVars) SlowLogFormat(logItems *SlowQueryLogItems) string { writeSlowLogItem(&buf, SlowLogPrepared, strconv.FormatBool(logItems.Prepared)) writeSlowLogItem(&buf, SlowLogHasMoreResults, strconv.FormatBool(logItems.HasMoreResults)) writeSlowLogItem(&buf, SlowLogSucc, strconv.FormatBool(logItems.Succ)) + if len(logItems.Plan) != 0 { + writeSlowLogItem(&buf, SlowLogPlan, logItems.Plan) + } if logItems.PrevStmt != "" { writeSlowLogItem(&buf, SlowLogPrevStmt, logItems.PrevStmt) diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index d869b621d4470..a6996b154349b 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -693,6 +693,7 @@ var defaultSysVars = []*SysVar{ /* The following variable is defined as session scope but is actually server scope. */ {ScopeSession, TiDBGeneralLog, strconv.Itoa(DefTiDBGeneralLog)}, {ScopeSession, TiDBSlowLogThreshold, strconv.Itoa(logutil.DefaultSlowThreshold)}, + {ScopeSession, TiDBRecordPlanInSlowLog, strconv.Itoa(logutil.DefaultRecordPlanInSlowLog)}, {ScopeSession, TiDBDDLSlowOprThreshold, strconv.Itoa(DefTiDBDDLSlowOprThreshold)}, {ScopeSession, TiDBQueryLogMaxLen, strconv.Itoa(logutil.DefaultQueryLogMaxLen)}, {ScopeSession, TiDBConfig, ""}, diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 895adaea446cb..3d22e952775d3 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -108,6 +108,9 @@ const ( // tidb_slow_log_threshold is used to set the slow log threshold in the server. TiDBSlowLogThreshold = "tidb_slow_log_threshold" + // tidb_record_plan_in_slow_log is used to log the plan of the slow query. + TiDBRecordPlanInSlowLog = "tidb_record_plan_in_slow_log" + // tidb_query_log_max_len is used to set the max length of the query in the log. TiDBQueryLogMaxLen = "tidb_query_log_max_len" diff --git a/sessionctx/variable/varsutil.go b/sessionctx/variable/varsutil.go index fd30a689988c1..778c115cc017b 100644 --- a/sessionctx/variable/varsutil.go +++ b/sessionctx/variable/varsutil.go @@ -127,6 +127,8 @@ func GetSessionOnlySysVars(s *SessionVars, key string) (string, bool, error) { return mysql.Priority2Str[mysql.PriorityEnum(atomic.LoadInt32(&ForcePriority))], true, nil case TiDBSlowLogThreshold: return strconv.FormatUint(atomic.LoadUint64(&config.GetGlobalConfig().Log.SlowThreshold), 10), true, nil + case TiDBRecordPlanInSlowLog: + return strconv.FormatUint(uint64(atomic.LoadUint32(&config.GetGlobalConfig().Log.RecordPlanInSlowLog)), 10), true, nil case TiDBDDLSlowOprThreshold: return strconv.FormatUint(uint64(atomic.LoadUint32(&DDLSlowOprThreshold)), 10), true, nil case TiDBQueryLogMaxLen: @@ -394,7 +396,7 @@ func ValidateSetSystemVar(vars *SessionVars, name string, value string) (string, SQLWarnings, UniqueChecks, OldAlterTable, LogBinTrustFunctionCreators, SQLBigSelects, BinlogDirectNonTransactionalUpdates, SQLQuoteShowCreate, AutomaticSpPrivileges, RelayLogPurge, SQLAutoIsNull, QueryCacheWlockInvalidate, ValidatePasswordCheckUserName, - SuperReadOnly, BinlogOrderCommits, MasterVerifyChecksum, BinlogRowQueryLogEvents, LogSlowSlaveStatements, + SuperReadOnly, BinlogOrderCommits, MasterVerifyChecksum, BinlogRowQueryLogEvents, LogSlowSlaveStatements, TiDBRecordPlanInSlowLog, LogSlowAdminStatements, LogQueriesNotUsingIndexes, Profiling: if strings.EqualFold(value, "ON") { return "1", nil diff --git a/util/logutil/log.go b/util/logutil/log.go index 40061f20be11a..7b54fbc093a4b 100644 --- a/util/logutil/log.go +++ b/util/logutil/log.go @@ -42,6 +42,8 @@ const ( DefaultSlowThreshold = 300 // DefaultQueryLogMaxLen is the default max length of the query in the log. DefaultQueryLogMaxLen = 2048 + // DefaultRecordPlanInSlowLog is the default value for whether enable log query plan in the slow log. + DefaultRecordPlanInSlowLog = 1 ) // EmptyFileLogConfig is an empty FileLogConfig. diff --git a/util/plancodec/codec.go b/util/plancodec/codec.go new file mode 100644 index 0000000000000..a13a8490d24fd --- /dev/null +++ b/util/plancodec/codec.go @@ -0,0 +1,297 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package plancodec + +import ( + "bytes" + "encoding/base64" + "strconv" + "strings" + "sync" + + "github.com/golang/snappy" + "github.com/pingcap/errors" +) + +const ( + // TreeBody indicates the current operator sub-tree is not finished, still + // has child operators to be attached on. + TreeBody = '│' + // TreeMiddleNode indicates this operator is not the last child of the + // current sub-tree rooted by its parent. + TreeMiddleNode = '├' + // TreeLastNode indicates this operator is the last child of the current + // sub-tree rooted by its parent. + TreeLastNode = '└' + // TreeGap is used to represent the gap between the branches of the tree. + TreeGap = ' ' + // TreeNodeIdentifier is used to replace the TreeGap once we need to attach + // a node to a sub-tree. + TreeNodeIdentifier = '─' +) + +const ( + rootTaskType = "0" + copTaskType = "1" +) + +const ( + idSeparator = "_" + lineBreaker = '\n' + lineBreakerStr = "\n" + separator = '\t' + separatorStr = "\t" +) + +var decoderPool = sync.Pool{ + New: func() interface{} { + return &planDecoder{} + }, +} + +// DecodePlan use to decode the string to plan tree. +func DecodePlan(planString string) (string, error) { + if len(planString) == 0 { + return "", nil + } + pd := decoderPool.Get().(*planDecoder) + defer decoderPool.Put(pd) + pd.buf.Reset() + return pd.decode(planString) +} + +type planDecoder struct { + buf bytes.Buffer + depths []int + indents [][]rune + planInfos []*planInfo +} + +type planInfo struct { + depth int + fields []string +} + +func (pd *planDecoder) decode(planString string) (string, error) { + str, err := decompress(planString) + if err != nil { + return "", err + } + + nodes := strings.Split(str, lineBreakerStr) + if len(pd.depths) < len(nodes) { + pd.depths = make([]int, 0, len(nodes)) + pd.planInfos = make([]*planInfo, 0, len(nodes)) + pd.indents = make([][]rune, 0, len(nodes)) + } + pd.depths = pd.depths[:0] + pd.planInfos = pd.planInfos[:0] + planInfos := pd.planInfos + for _, node := range nodes { + p, err := decodePlanInfo(node) + if err != nil { + return "", err + } + if p == nil { + continue + } + planInfos = append(planInfos, p) + pd.depths = append(pd.depths, p.depth) + } + + // Calculated indentation of plans. + pd.initPlanTreeIndents() + for i := 1; i < len(pd.depths); i++ { + parentIndex := pd.findParentIndex(i) + pd.fillIndent(parentIndex, i) + } + // Align the value of plan fields. + pd.alignFields(planInfos) + + for i, p := range planInfos { + if i > 0 { + pd.buf.WriteByte(lineBreaker) + } + // This is for alignment. + pd.buf.WriteByte(separator) + pd.buf.WriteString(string(pd.indents[i])) + for j := 0; j < len(p.fields); j++ { + if j > 0 { + pd.buf.WriteByte(separator) + } + pd.buf.WriteString(p.fields[j]) + } + } + return pd.buf.String(), nil +} + +func (pd *planDecoder) initPlanTreeIndents() { + pd.indents = pd.indents[:0] + for i := 0; i < len(pd.depths); i++ { + indent := make([]rune, 2*pd.depths[i]) + pd.indents = append(pd.indents, indent) + if len(indent) == 0 { + continue + } + for i := 0; i < len(indent)-2; i++ { + indent[i] = ' ' + } + indent[len(indent)-2] = TreeLastNode + indent[len(indent)-1] = TreeNodeIdentifier + } +} + +func (pd *planDecoder) findParentIndex(childIndex int) int { + for i := childIndex - 1; i > 0; i-- { + if pd.depths[i]+1 == pd.depths[childIndex] { + return i + } + } + return 0 +} +func (pd *planDecoder) fillIndent(parentIndex, childIndex int) { + depth := pd.depths[childIndex] + if depth == 0 { + return + } + idx := depth*2 - 2 + for i := childIndex - 1; i > parentIndex; i-- { + if pd.indents[i][idx] == TreeLastNode { + pd.indents[i][idx] = TreeMiddleNode + break + } + pd.indents[i][idx] = TreeBody + } +} + +func (pd *planDecoder) alignFields(planInfos []*planInfo) { + if len(planInfos) == 0 { + return + } + fieldsLen := len(planInfos[0].fields) + // Last field no need to align. + fieldsLen-- + for colIdx := 0; colIdx < fieldsLen; colIdx++ { + maxFieldLen := pd.getMaxFieldLength(colIdx, planInfos) + for rowIdx, p := range planInfos { + fillLen := maxFieldLen - pd.getPlanFieldLen(rowIdx, colIdx, p) + for i := 0; i < fillLen; i++ { + p.fields[colIdx] += " " + } + } + } +} + +func (pd *planDecoder) getMaxFieldLength(idx int, planInfos []*planInfo) int { + maxLength := -1 + for rowIdx, p := range planInfos { + l := pd.getPlanFieldLen(rowIdx, idx, p) + if l > maxLength { + maxLength = l + } + } + return maxLength +} + +func (pd *planDecoder) getPlanFieldLen(rowIdx, colIdx int, p *planInfo) int { + if colIdx == 0 { + return len(p.fields[0]) + len(pd.indents[rowIdx]) + } + return len(p.fields[colIdx]) +} + +func decodePlanInfo(str string) (*planInfo, error) { + values := strings.Split(str, separatorStr) + if len(values) < 2 { + return nil, nil + } + + p := &planInfo{ + fields: make([]string, 0, len(values)-1), + } + for i, v := range values { + switch i { + // depth + case 0: + depth, err := strconv.Atoi(v) + if err != nil { + return nil, errors.Errorf("decode plan: %v, depth: %v, error: %v", str, v, err) + } + p.depth = depth + // plan ID + case 1: + ids := strings.Split(v, idSeparator) + if len(ids) != 2 { + return nil, errors.Errorf("decode plan: %v error, invalid plan id: %v", str, v) + } + planID, err := strconv.Atoi(ids[0]) + if err != err { + return nil, errors.Errorf("decode plan: %v, plan id: %v, error: %v", str, v, err) + } + p.fields = append(p.fields, PhysicalIDToTypeString(planID)+idSeparator+ids[1]) + // task type + case 2: + if v == rootTaskType { + p.fields = append(p.fields, "root") + } else { + p.fields = append(p.fields, "cop") + } + default: + p.fields = append(p.fields, v) + } + } + return p, nil +} + +// EncodePlanNode is used to encode the plan to a string. +func EncodePlanNode(depth, pid int, planType string, isRoot bool, rowCount float64, explainInfo string, buf *bytes.Buffer) { + buf.WriteString(strconv.Itoa(depth)) + buf.WriteByte(separator) + buf.WriteString(encodeID(planType, pid)) + buf.WriteByte(separator) + if isRoot { + buf.WriteString(rootTaskType) + } else { + buf.WriteString(copTaskType) + } + buf.WriteByte(separator) + buf.WriteString(strconv.FormatFloat(rowCount, 'f', -1, 64)) + buf.WriteByte(separator) + buf.WriteString(explainInfo) + buf.WriteByte(lineBreaker) +} + +func encodeID(planType string, id int) string { + planID := TypeStringToPhysicalID(planType) + return strconv.Itoa(planID) + idSeparator + strconv.Itoa(id) +} + +// Compress is used to compress the input with zlib. +func Compress(input []byte) string { + compressBytes := snappy.Encode(nil, input) + return base64.StdEncoding.EncodeToString(compressBytes) +} + +func decompress(str string) (string, error) { + decodeBytes, err := base64.StdEncoding.DecodeString(str) + if err != nil { + return "", err + } + + bs, err := snappy.Decode(nil, decodeBytes) + if err != nil { + return "", err + } + return string(bs), nil +} diff --git a/util/plancodec/id.go b/util/plancodec/id.go new file mode 100644 index 0000000000000..8369f7bfd0e59 --- /dev/null +++ b/util/plancodec/id.go @@ -0,0 +1,313 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package plancodec + +import "strconv" + +const ( + // TypeSel is the type of Selection. + TypeSel = "Selection" + // TypeSet is the type of Set. + TypeSet = "Set" + // TypeProj is the type of Projection. + TypeProj = "Projection" + // TypeAgg is the type of Aggregation. + TypeAgg = "Aggregation" + // TypeStreamAgg is the type of StreamAgg. + TypeStreamAgg = "StreamAgg" + // TypeHashAgg is the type of HashAgg. + TypeHashAgg = "HashAgg" + // TypeShow is the type of show. + TypeShow = "Show" + // TypeJoin is the type of Join. + TypeJoin = "Join" + // TypeUnion is the type of Union. + TypeUnion = "Union" + // TypeTableScan is the type of TableScan. + TypeTableScan = "TableScan" + // TypeMemTableScan is the type of TableScan. + TypeMemTableScan = "MemTableScan" + // TypeUnionScan is the type of UnionScan. + TypeUnionScan = "UnionScan" + // TypeIdxScan is the type of IndexScan. + TypeIdxScan = "IndexScan" + // TypeSort is the type of Sort. + TypeSort = "Sort" + // TypeTopN is the type of TopN. + TypeTopN = "TopN" + // TypeLimit is the type of Limit. + TypeLimit = "Limit" + // TypeHashLeftJoin is the type of left hash join. + TypeHashLeftJoin = "HashLeftJoin" + // TypeHashRightJoin is the type of right hash join. + TypeHashRightJoin = "HashRightJoin" + // TypeMergeJoin is the type of merge join. + TypeMergeJoin = "MergeJoin" + // TypeIndexJoin is the type of index look up join. + TypeIndexJoin = "IndexJoin" + // TypeIndexMergeJoin is the type of index look up merge join. + TypeIndexMergeJoin = "IndexMergeJoin" + // TypeIndexHashJoin is the type of index nested loop hash join. + TypeIndexHashJoin = "IndexHashJoin" + // TypeApply is the type of Apply. + TypeApply = "Apply" + // TypeMaxOneRow is the type of MaxOneRow. + TypeMaxOneRow = "MaxOneRow" + // TypeExists is the type of Exists. + TypeExists = "Exists" + // TypeDual is the type of TableDual. + TypeDual = "TableDual" + // TypeLock is the type of SelectLock. + TypeLock = "SelectLock" + // TypeInsert is the type of Insert + TypeInsert = "Insert" + // TypeUpdate is the type of Update. + TypeUpdate = "Update" + // TypeDelete is the type of Delete. + TypeDelete = "Delete" + // TypeIndexLookUp is the type of IndexLookUp. + TypeIndexLookUp = "IndexLookUp" + // TypeTableReader is the type of TableReader. + TypeTableReader = "TableReader" + // TypeIndexReader is the type of IndexReader. + TypeIndexReader = "IndexReader" + // TypeWindow is the type of Window. + TypeWindow = "Window" + // TypeTableGather is the type of TableGather. + TypeTableGather = "TableGather" + // TypeIndexMerge is the type of IndexMergeReader + TypeIndexMerge = "IndexMerge" + // TypePointGet is the type of PointGetPlan. + TypePointGet = "Point_Get" + // TypeShowDDLJobs is the type of show ddl jobs. + TypeShowDDLJobs = "ShowDDLJobs" + // TypeBatchPointGet is the type of BatchPointGetPlan. + TypeBatchPointGet = "Batch_Point_Get" +) + +// plan id. +const ( + typeSelID int = iota + 1 + typeSetID + typeProjID + typeAggID + typeStreamAggID + typeHashAggID + typeShowID + typeJoinID + typeUnionID + typeTableScanID + typeMemTableScanID + typeUnionScanID + typeIdxScanID + typeSortID + typeTopNID + typeLimitID + typeHashLeftJoinID + typeHashRightJoinID + typeMergeJoinID + typeIndexJoinID + typeIndexMergeJoinID + typeIndexHashJoinID + typeApplyID + typeMaxOneRowID + typeExistsID + typeDualID + typeLockID + typeInsertID + typeUpdateID + typeDeleteID + typeIndexLookUpID + typeTableReaderID + typeIndexReaderID + typeWindowID + typeTableGatherID + typeIndexMergeID + typePointGet + typeShowDDLJobs + typeBatchPointGet +) + +// TypeStringToPhysicalID converts the plan type string to plan id. +func TypeStringToPhysicalID(tp string) int { + switch tp { + case TypeSel: + return typeSelID + case TypeSet: + return typeSetID + case TypeProj: + return typeProjID + case TypeAgg: + return typeAggID + case TypeStreamAgg: + return typeStreamAggID + case TypeHashAgg: + return typeHashAggID + case TypeShow: + return typeShowID + case TypeJoin: + return typeJoinID + case TypeUnion: + return typeUnionID + case TypeTableScan: + return typeTableScanID + case TypeMemTableScan: + return typeMemTableScanID + case TypeUnionScan: + return typeUnionScanID + case TypeIdxScan: + return typeIdxScanID + case TypeSort: + return typeSortID + case TypeTopN: + return typeTopNID + case TypeLimit: + return typeLimitID + case TypeHashLeftJoin: + return typeHashLeftJoinID + case TypeHashRightJoin: + return typeHashRightJoinID + case TypeMergeJoin: + return typeMergeJoinID + case TypeIndexJoin: + return typeIndexJoinID + case TypeIndexMergeJoin: + return typeIndexMergeJoinID + case TypeIndexHashJoin: + return typeIndexHashJoinID + case TypeApply: + return typeApplyID + case TypeMaxOneRow: + return typeMaxOneRowID + case TypeExists: + return typeExistsID + case TypeDual: + return typeDualID + case TypeLock: + return typeLockID + case TypeInsert: + return typeInsertID + case TypeUpdate: + return typeUpdateID + case TypeDelete: + return typeDeleteID + case TypeIndexLookUp: + return typeIndexLookUpID + case TypeTableReader: + return typeTableReaderID + case TypeIndexReader: + return typeIndexReaderID + case TypeWindow: + return typeWindowID + case TypeTableGather: + return typeTableGatherID + case TypeIndexMerge: + return typeIndexMergeID + case TypePointGet: + return typePointGet + case TypeShowDDLJobs: + return typeShowDDLJobs + case TypeBatchPointGet: + return typeBatchPointGet + } + // Should never reach here. + return 0 +} + +// PhysicalIDToTypeString converts the plan id to plan type string. +func PhysicalIDToTypeString(id int) string { + switch id { + case typeSelID: + return TypeSel + case typeSetID: + return TypeSet + case typeProjID: + return TypeProj + case typeAggID: + return TypeAgg + case typeStreamAggID: + return TypeStreamAgg + case typeHashAggID: + return TypeHashAgg + case typeShowID: + return TypeShow + case typeJoinID: + return TypeJoin + case typeUnionID: + return TypeUnion + case typeTableScanID: + return TypeTableScan + case typeMemTableScanID: + return TypeMemTableScan + case typeUnionScanID: + return TypeUnionScan + case typeIdxScanID: + return TypeIdxScan + case typeSortID: + return TypeSort + case typeTopNID: + return TypeTopN + case typeLimitID: + return TypeLimit + case typeHashLeftJoinID: + return TypeHashLeftJoin + case typeHashRightJoinID: + return TypeHashRightJoin + case typeMergeJoinID: + return TypeMergeJoin + case typeIndexJoinID: + return TypeIndexJoin + case typeIndexMergeJoinID: + return TypeIndexMergeJoin + case typeIndexHashJoinID: + return TypeIndexHashJoin + case typeApplyID: + return TypeApply + case typeMaxOneRowID: + return TypeMaxOneRow + case typeExistsID: + return TypeExists + case typeDualID: + return TypeDual + case typeLockID: + return TypeLock + case typeInsertID: + return TypeInsert + case typeUpdateID: + return TypeUpdate + case typeDeleteID: + return TypeDelete + case typeIndexLookUpID: + return TypeIndexLookUp + case typeTableReaderID: + return TypeTableReader + case typeIndexReaderID: + return TypeIndexReader + case typeWindowID: + return TypeWindow + case typeTableGatherID: + return TypeTableGather + case typeIndexMergeID: + return TypeIndexMerge + case typePointGet: + return TypePointGet + case typeShowDDLJobs: + return TypeShowDDLJobs + case typeBatchPointGet: + return TypeBatchPointGet + } + + // Should never reach here. + return "UnknownPlanID" + strconv.Itoa(id) +}