From 28368fea42a0c3a425f0523abbf5b9540c4d03c0 Mon Sep 17 00:00:00 2001 From: crazycs Date: Thu, 17 Oct 2019 22:13:19 +0800 Subject: [PATCH] *: record and print the plan in slow log. (#12179) --- config/config.go | 26 +- config/config.toml.example | 4 + executor/adapter.go | 30 +++ executor/set_test.go | 5 + expression/builtin.go | 1 + expression/builtin_info.go | 34 +++ expression/integration_test.go | 19 ++ go.mod | 3 +- go.sum | 4 +- infoschema/slow_log.go | 20 ++ infoschema/slow_log_test.go | 2 +- infoschema/tables_test.go | 5 +- planner/core/common_plans.go | 46 ++-- planner/core/encode.go | 68 +++++ planner/core/initialize.go | 186 ++++---------- planner/core/logical_plan_builder.go | 3 +- planner/core/plan.go | 8 + planner/core/point_get_plan.go | 3 +- planner/core/rule_partition_processor.go | 3 +- planner/core/task.go | 3 +- sessionctx/variable/session.go | 12 + sessionctx/variable/sysvar.go | 1 + sessionctx/variable/tidb_vars.go | 3 + sessionctx/variable/varsutil.go | 4 +- util/logutil/log.go | 2 + util/plancodec/codec.go | 297 +++++++++++++++++++++ util/plancodec/id.go | 313 +++++++++++++++++++++++ 27 files changed, 920 insertions(+), 185 deletions(-) create mode 100644 planner/core/encode.go create mode 100644 util/plancodec/codec.go create mode 100644 util/plancodec/id.go diff --git a/config/config.go b/config/config.go index 61803441bb9e9..ddf6a37937b11 100644 --- a/config/config.go +++ b/config/config.go @@ -114,10 +114,11 @@ type Log struct { // File log config. File logutil.FileLogConfig `toml:"file" json:"file"` - SlowQueryFile string `toml:"slow-query-file" json:"slow-query-file"` - SlowThreshold uint64 `toml:"slow-threshold" json:"slow-threshold"` - ExpensiveThreshold uint `toml:"expensive-threshold" json:"expensive-threshold"` - QueryLogMaxLen uint64 `toml:"query-log-max-len" json:"query-log-max-len"` + SlowQueryFile string `toml:"slow-query-file" json:"slow-query-file"` + SlowThreshold uint64 `toml:"slow-threshold" json:"slow-threshold"` + ExpensiveThreshold uint `toml:"expensive-threshold" json:"expensive-threshold"` + QueryLogMaxLen uint64 `toml:"query-log-max-len" json:"query-log-max-len"` + RecordPlanInSlowLog uint32 `toml:"record-plan-in-slow-log" json:"record-plan-in-slow-log"` } // Security is the security section of the config. @@ -354,14 +355,15 @@ var defaultConf = Config{ }, LowerCaseTableNames: 2, Log: Log{ - Level: "info", - Format: "text", - File: logutil.NewFileLogConfig(logutil.DefaultLogMaxSize), - SlowQueryFile: "tidb-slow.log", - SlowThreshold: logutil.DefaultSlowThreshold, - ExpensiveThreshold: 10000, - DisableErrorStack: true, - QueryLogMaxLen: logutil.DefaultQueryLogMaxLen, + Level: "info", + Format: "text", + File: logutil.NewFileLogConfig(logutil.DefaultLogMaxSize), + SlowQueryFile: "tidb-slow.log", + SlowThreshold: logutil.DefaultSlowThreshold, + ExpensiveThreshold: 10000, + DisableErrorStack: true, + QueryLogMaxLen: logutil.DefaultQueryLogMaxLen, + RecordPlanInSlowLog: logutil.DefaultRecordPlanInSlowLog, }, Status: Status{ ReportStatus: true, diff --git a/config/config.toml.example b/config/config.toml.example index d68beb62c71e6..e98b19e14bcab 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -85,6 +85,10 @@ slow-query-file = "tidb-slow.log" # Queries with execution time greater than this value will be logged. (Milliseconds) slow-threshold = 300 +# record-plan-in-slow-log is used to enable record query plan in slow log. +# 0 is disable. 1 is enable. +record-plan-in-slow-log = 1 + # Queries with internal result greater than this value will be logged. expensive-threshold = 10000 diff --git a/executor/adapter.go b/executor/adapter.go index 79969e7af4dba..c81cd77341a93 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -786,6 +786,7 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) { ExecDetail: execDetail, MemMax: memMax, Succ: succ, + Plan: getPlanTree(a.Plan), Prepared: a.isPreparedStmt, HasMoreResults: hasMoreResults, } @@ -821,6 +822,35 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) { } } +// getPlanTree will try to get the select plan tree if the plan is select or the select plan of delete/update/insert statement. +func getPlanTree(p plannercore.Plan) string { + cfg := config.GetGlobalConfig() + if atomic.LoadUint32(&cfg.Log.RecordPlanInSlowLog) == 0 { + return "" + } + var selectPlan plannercore.PhysicalPlan + if physicalPlan, ok := p.(plannercore.PhysicalPlan); ok { + selectPlan = physicalPlan + } else { + switch x := p.(type) { + case *plannercore.Delete: + selectPlan = x.SelectPlan + case *plannercore.Update: + selectPlan = x.SelectPlan + case *plannercore.Insert: + selectPlan = x.SelectPlan + } + } + if selectPlan == nil { + return "" + } + planTree := plannercore.EncodePlan(selectPlan) + if len(planTree) == 0 { + return planTree + } + return variable.SlowLogPlanPrefix + planTree + variable.SlowLogPlanSuffix +} + // SummaryStmt collects statements for performance_schema.events_statements_summary_by_digest func (a *ExecStmt) SummaryStmt() { sessVars := a.Ctx.GetSessionVars() diff --git a/executor/set_test.go b/executor/set_test.go index 3ec8226f59c98..723d91dfa643b 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -383,6 +383,11 @@ func (s *testSuite2) TestSetVar(c *C) { tk.MustExec("set @@tidb_expensive_query_time_threshold=70") tk.MustQuery("select @@tidb_expensive_query_time_threshold;").Check(testkit.Rows("70")) + + tk.MustExec("set @@tidb_record_plan_in_slow_log = 1") + tk.MustQuery("select @@tidb_record_plan_in_slow_log;").Check(testkit.Rows("1")) + tk.MustExec("set @@tidb_record_plan_in_slow_log = 0") + tk.MustQuery("select @@tidb_record_plan_in_slow_log;").Check(testkit.Rows("0")) } func (s *testSuite2) TestSetCharset(c *C) { diff --git a/expression/builtin.go b/expression/builtin.go index fb2755298e5b2..704ed544ccd8c 100644 --- a/expression/builtin.go +++ b/expression/builtin.go @@ -723,6 +723,7 @@ var funcs = map[string]functionClass{ ast.TiDBVersion: &tidbVersionFunctionClass{baseFunctionClass{ast.TiDBVersion, 0, 0}}, ast.TiDBIsDDLOwner: &tidbIsDDLOwnerFunctionClass{baseFunctionClass{ast.TiDBIsDDLOwner, 0, 0}}, ast.TiDBParseTso: &tidbParseTsoFunctionClass{baseFunctionClass{ast.TiDBParseTso, 1, 1}}, + ast.TiDBDecodePlan: &tidbDecodePlanFunctionClass{baseFunctionClass{ast.TiDBDecodePlan, 1, 1}}, } // IsFunctionSupported check if given function name is a builtin sql function. diff --git a/expression/builtin_info.go b/expression/builtin_info.go index 427250d39039c..96892b9c701ae 100644 --- a/expression/builtin_info.go +++ b/expression/builtin_info.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tidb/util/printer" ) @@ -49,6 +50,7 @@ var ( _ functionClass = &rowCountFunctionClass{} _ functionClass = &tidbVersionFunctionClass{} _ functionClass = &tidbIsDDLOwnerFunctionClass{} + _ functionClass = &tidbDecodePlanFunctionClass{} _ functionClass = &tidbDecodeKeyFunctionClass{} ) @@ -656,3 +658,35 @@ func decodeKey(ctx sessionctx.Context, s string) string { ctx.GetSessionVars().StmtCtx.AppendWarning(errors.Errorf("invalid record/index key: %X", key)) return s } + +type tidbDecodePlanFunctionClass struct { + baseFunctionClass +} + +func (c *tidbDecodePlanFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { + if err := c.verifyArgs(args); err != nil { + return nil, err + } + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETString, types.ETString) + sig := &builtinTiDBDecodePlanSig{bf} + return sig, nil +} + +type builtinTiDBDecodePlanSig struct { + baseBuiltinFunc +} + +func (b *builtinTiDBDecodePlanSig) Clone() builtinFunc { + newSig := &builtinTiDBDecodePlanSig{} + newSig.cloneFrom(&b.baseBuiltinFunc) + return newSig +} + +func (b *builtinTiDBDecodePlanSig) evalString(row chunk.Row) (string, bool, error) { + planString, isNull, err := b.args[0].EvalString(b.ctx, row) + if isNull || err != nil { + return "", isNull, err + } + planTree, err := plancodec.DecodePlan(planString) + return planTree, false, err +} diff --git a/expression/integration_test.go b/expression/integration_test.go index 4e705413d9dbb..c2e9d97c6447b 100755 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -4156,6 +4156,25 @@ func (s *testIntegrationSuite) testTiDBIsOwnerFunc(c *C) { result.Check(testkit.Rows(fmt.Sprintf("%v", ret))) } +func (s *testIntegrationSuite) TestTiDBDecodePlanFunc(c *C) { + tk := testkit.NewTestKit(c, s.store) + defer s.cleanEnv(c) + tk.MustQuery("select tidb_decode_plan('')").Check(testkit.Rows("")) + tk.MustQuery("select tidb_decode_plan('7APIMAk1XzEzCTAJMQlmdW5jczpjb3VudCgxKQoxCTE3XzE0CTAJMAlpbm5lciBqb2luLCBp" + + "AQyQOlRhYmxlUmVhZGVyXzIxLCBlcXVhbDpbZXEoQ29sdW1uIzEsIA0KCDkpIBkXADIVFywxMCldCjIJMzJfMTgFZXhkYXRhOlNlbGVjdGlvbl" + + "8xNwozCTFfMTcJMQkwCWx0HVlATlVMTCksIG5vdChpc251bGwVHAApUhcAUDIpKQo0CTEwXzE2CTEJMTAwMDAJdAHB2Dp0MSwgcmFuZ2U6Wy1p" + + "bmYsK2luZl0sIGtlZXAgb3JkZXI6ZmFsc2UsIHN0YXRzOnBzZXVkbwoFtgAyAZcEMAk6tgAEMjAFtgQyMDq2AAg5LCBmtgAAMFa3AAA5FbcAO" + + "T63AAAyzrcA')").Check(testkit.Rows("" + + "\tStreamAgg_13 \troot\t1 \tfuncs:count(1)\n" + + "\t└─HashLeftJoin_14 \troot\t0 \tinner join, inner:TableReader_21, equal:[eq(Column#1, Column#9) eq(Column#2, Column#10)]\n" + + "\t ├─TableReader_18 \troot\t0 \tdata:Selection_17\n" + + "\t │ └─Selection_17 \tcop \t0 \tlt(Column#1, NULL), not(isnull(Column#1)), not(isnull(Column#2))\n" + + "\t │ └─TableScan_16\tcop \t10000\ttable:t1, range:[-inf,+inf], keep order:false, stats:pseudo\n" + + "\t └─TableReader_21 \troot\t0 \tdata:Selection_20\n" + + "\t └─Selection_20 \tcop \t0 \tlt(Column#9, NULL), not(isnull(Column#10)), not(isnull(Column#9))\n" + + "\t └─TableScan_19\tcop \t10000\ttable:t2, range:[-inf,+inf], keep order:false, stats:pseudo")) +} + func (s *testIntegrationSuite) TestTiDBInternalFunc(c *C) { tk := testkit.NewTestKit(c, s.store) defer s.cleanEnv(c) diff --git a/go.mod b/go.mod index d904b5bbac557..c5570f7d23b8a 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/gogo/protobuf v1.2.0 github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect github.com/golang/protobuf v1.3.2 + github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db github.com/google/btree v1.0.0 github.com/google/uuid v1.1.1 github.com/gorilla/context v1.1.1 // indirect @@ -40,7 +41,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e github.com/pingcap/kvproto v0.0.0-20190910074005-0e61b6f435c1 github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 - github.com/pingcap/parser v0.0.0-20191012071233-32876040fefb + github.com/pingcap/parser v0.0.0-20191014060455-5d0bf28eaa23 github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0 github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible github.com/pingcap/tipb v0.0.0-20191015023537-709b39e7f8bb diff --git a/go.sum b/go.sum index 95a0b13364936..f1ce43c37517d 100644 --- a/go.sum +++ b/go.sum @@ -166,8 +166,8 @@ github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd h1:hWDol43WY5PGhsh3+87 github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20191012071233-32876040fefb h1:okeNsbftvzQ8I9DseKukhZURRYJUHOpRSHwlSZC0g0g= -github.com/pingcap/parser v0.0.0-20191012071233-32876040fefb/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/parser v0.0.0-20191014060455-5d0bf28eaa23 h1:eYQVLfwQ7/8u6hpCs30EKGIUrFVxY8hoDwZdPVSgTlk= +github.com/pingcap/parser v0.0.0-20191014060455-5d0bf28eaa23/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0 h1:GIEq+wZfrl2bcJxpuSrEH4H7/nlf5YdmpS+dU9lNIt8= github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0/go.mod h1:G/6rJpnYwM0LKMec2rI82/5Kg6GaZMvlfB+e6/tvYmI= github.com/pingcap/tidb-tools v2.1.3-0.20190321065848-1e8b48f5c168+incompatible h1:MkWCxgZpJBgY2f4HtwWMMFzSBb3+JPzeJgF3VrXE/bU= diff --git a/infoschema/slow_log.go b/infoschema/slow_log.go index a47c219950cfc..eb76b2794a5e0 100644 --- a/infoschema/slow_log.go +++ b/infoschema/slow_log.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/plancodec" "go.uber.org/zap" ) @@ -60,6 +61,7 @@ var slowQueryCols = []columnInfo{ {variable.SlowLogCopWaitAddr, mysql.TypeVarchar, 64, 0, nil, nil}, {variable.SlowLogMemMax, mysql.TypeLonglong, 20, 0, nil, nil}, {variable.SlowLogSucc, mysql.TypeTiny, 1, 0, nil, nil}, + {variable.SlowLogPlan, mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, {variable.SlowLogPrevStmt, mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, {variable.SlowLogQuerySQLStr, mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil}, } @@ -204,6 +206,7 @@ type slowQueryTuple struct { sql string isInternal bool succ bool + plan string } func (st *slowQueryTuple) setFieldValue(tz *time.Location, field, value string) error { @@ -273,6 +276,8 @@ func (st *slowQueryTuple) setFieldValue(tz *time.Location, field, value string) st.memMax, err = strconv.ParseInt(value, 10, 64) case variable.SlowLogSucc: st.succ, err = strconv.ParseBool(value) + case variable.SlowLogPlan: + st.plan = value case variable.SlowLogQuerySQLStr: st.sql = value } @@ -319,11 +324,26 @@ func (st *slowQueryTuple) convertToDatumRow() []types.Datum { } else { record = append(record, types.NewIntDatum(0)) } + record = append(record, types.NewStringDatum(parsePlan(st.plan))) record = append(record, types.NewStringDatum(st.prevStmt)) record = append(record, types.NewStringDatum(st.sql)) return record } +func parsePlan(planString string) string { + if len(planString) <= len(variable.SlowLogPlanPrefix)+len(variable.SlowLogPlanSuffix) { + return planString + } + planString = planString[len(variable.SlowLogPlanPrefix) : len(planString)-len(variable.SlowLogPlanSuffix)] + decodePlanString, err := plancodec.DecodePlan(planString) + if err == nil { + planString = decodePlanString + } else { + logutil.BgLogger().Error("decode plan in slow log failed", zap.String("plan", planString), zap.Error(err)) + } + return planString +} + // ParseTime exports for testing. func ParseTime(s string) (time.Time, error) { t, err := time.Parse(logutil.SlowLogTimeFormat, s) diff --git a/infoschema/slow_log_test.go b/infoschema/slow_log_test.go index efb0d02f700fb..9a14b128f3726 100644 --- a/infoschema/slow_log_test.go +++ b/infoschema/slow_log_test.go @@ -55,7 +55,7 @@ select * from t;`) } recordString += str } - expectRecordString := "2019-04-28 15:24:04.309074,405888132465033227,,,0,0.216905,0.021,0,0,1,637,0,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,0,update t set i = 1;,select * from t;" + expectRecordString := "2019-04-28 15:24:04.309074,405888132465033227,,,0,0.216905,0.021,0,0,1,637,0,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,0,,update t set i = 1;,select * from t;" c.Assert(expectRecordString, Equals, recordString) // fix sql contain '# ' bug diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 734b4ad56bd3c..cb302df773e6b 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -478,6 +478,7 @@ func (s *testTableSuite) TestSlowQuery(c *C) { # Cop_wait_avg: 0.05 Cop_wait_p90: 0.6 Cop_wait_max: 0.8 Cop_wait_addr: 0.0.0.0:20160 # Mem_max: 70724 # Succ: true +# Plan: abcd # Prev_stmt: update t set i = 2; select * from t_slim;`)) c.Assert(f.Sync(), IsNil) @@ -487,10 +488,10 @@ select * from t_slim;`)) tk.MustExec("set time_zone = '+08:00';") re := tk.MustQuery("select * from information_schema.slow_query") re.Check(testutil.RowsWithSep("|", - "2019-02-12 19:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|update t set i = 2;|select * from t_slim;")) + "2019-02-12 19:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|abcd|update t set i = 2;|select * from t_slim;")) tk.MustExec("set time_zone = '+00:00';") re = tk.MustQuery("select * from information_schema.slow_query") - re.Check(testutil.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|update t set i = 2;|select * from t_slim;")) + re.Check(testutil.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|abcd|update t set i = 2;|select * from t_slim;")) // Test for long query. _, err = f.Write([]byte(` diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index b4fb0f7246ca5..46f611cc51e8e 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -35,6 +35,7 @@ import ( driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/kvcache" + "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tidb/util/ranger" ) @@ -807,23 +808,6 @@ func (e *Explain) prepareOperatorInfo(p Plan, taskType string, indent string, is e.Rows = append(e.Rows, row) } -const ( - // treeBody indicates the current operator sub-tree is not finished, still - // has child operators to be attached on. - treeBody = '│' - // treeMiddleNode indicates this operator is not the last child of the - // current sub-tree rooted by its parent. - treeMiddleNode = '├' - // treeLastNode indicates this operator is the last child of the current - // sub-tree rooted by its parent. - treeLastNode = '└' - // treeGap is used to represent the gap between the branches of the tree. - treeGap = ' ' - // treeNodeIdentifier is used to replace the treeGap once we need to attach - // a node to a sub-tree. - treeNodeIdentifier = '─' -) - func (e *Explain) prettyIdentifier(id, indent string, isLastChild bool) string { if len(indent) == 0 { return id @@ -831,44 +815,44 @@ func (e *Explain) prettyIdentifier(id, indent string, isLastChild bool) string { indentBytes := []rune(indent) for i := len(indentBytes) - 1; i >= 0; i-- { - if indentBytes[i] != treeBody { + if indentBytes[i] != plancodec.TreeBody { continue } // Here we attach a new node to the current sub-tree by changing - // the closest treeBody to a: - // 1. treeLastNode, if this operator is the last child. - // 2. treeMiddleNode, if this operator is not the last child.. + // the closest TreeBody to a: + // 1. TreeLastNode, if this operator is the last child. + // 2. TreeMiddleNode, if this operator is not the last child.. if isLastChild { - indentBytes[i] = treeLastNode + indentBytes[i] = plancodec.TreeLastNode } else { - indentBytes[i] = treeMiddleNode + indentBytes[i] = plancodec.TreeMiddleNode } break } - // Replace the treeGap between the treeBody and the node to a - // treeNodeIdentifier. - indentBytes[len(indentBytes)-1] = treeNodeIdentifier + // Replace the TreeGap between the TreeBody and the node to a + // TreeNodeIdentifier. + indentBytes[len(indentBytes)-1] = plancodec.TreeNodeIdentifier return string(indentBytes) + id } func (e *Explain) getIndent4Child(indent string, isLastChild bool) string { if !isLastChild { - return string(append([]rune(indent), treeBody, treeGap)) + return string(append([]rune(indent), plancodec.TreeBody, plancodec.TreeGap)) } // If the current node is the last node of the current operator tree, we - // need to end this sub-tree by changing the closest treeBody to a treeGap. + // need to end this sub-tree by changing the closest TreeBody to a TreeGap. indentBytes := []rune(indent) for i := len(indentBytes) - 1; i >= 0; i-- { - if indentBytes[i] == treeBody { - indentBytes[i] = treeGap + if indentBytes[i] == plancodec.TreeBody { + indentBytes[i] = plancodec.TreeGap break } } - return string(append(indentBytes, treeBody, treeGap)) + return string(append(indentBytes, plancodec.TreeBody, plancodec.TreeGap)) } func (e *Explain) prepareDotInfo(p PhysicalPlan) { diff --git a/planner/core/encode.go b/planner/core/encode.go new file mode 100644 index 0000000000000..4abb3ed9b7cd9 --- /dev/null +++ b/planner/core/encode.go @@ -0,0 +1,68 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "bytes" + "sync" + + "github.com/pingcap/tidb/util/plancodec" +) + +var encoderPool = sync.Pool{ + New: func() interface{} { + return &planEncoder{} + }, +} + +type planEncoder struct { + buf bytes.Buffer + encodedPlans map[int]bool +} + +// EncodePlan is used to encodePlan the plan to the plan tree with compressing. +func EncodePlan(p PhysicalPlan) string { + pn := encoderPool.Get().(*planEncoder) + defer encoderPool.Put(pn) + return pn.encodePlanTree(p) +} + +func (pn *planEncoder) encodePlanTree(p PhysicalPlan) string { + pn.encodedPlans = make(map[int]bool) + pn.buf.Reset() + pn.encodePlan(p, true, 0) + return plancodec.Compress(pn.buf.Bytes()) +} + +func (pn *planEncoder) encodePlan(p PhysicalPlan, isRoot bool, depth int) { + plancodec.EncodePlanNode(depth, p.ID(), p.TP(), isRoot, p.statsInfo().RowCount, p.ExplainInfo(), &pn.buf) + pn.encodedPlans[p.ID()] = true + + depth++ + for _, child := range p.Children() { + if pn.encodedPlans[child.ID()] { + continue + } + pn.encodePlan(child.(PhysicalPlan), isRoot, depth) + } + switch copPlan := p.(type) { + case *PhysicalTableReader: + pn.encodePlan(copPlan.tablePlan, false, depth) + case *PhysicalIndexReader: + pn.encodePlan(copPlan.indexPlan, false, depth) + case *PhysicalIndexLookUpReader: + pn.encodePlan(copPlan.indexPlan, false, depth) + pn.encodePlan(copPlan.tablePlan, false, depth) + } +} diff --git a/planner/core/initialize.go b/planner/core/initialize.go index a46e928e09dee..60ec2f6ed9585 100644 --- a/planner/core/initialize.go +++ b/planner/core/initialize.go @@ -18,130 +18,54 @@ import ( "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" -) - -const ( - // TypeSel is the type of Selection. - TypeSel = "Selection" - // TypeSet is the type of Set. - TypeSet = "Set" - // TypeProj is the type of Projection. - TypeProj = "Projection" - // TypeAgg is the type of Aggregation. - TypeAgg = "Aggregation" - // TypeStreamAgg is the type of StreamAgg. - TypeStreamAgg = "StreamAgg" - // TypeHashAgg is the type of HashAgg. - TypeHashAgg = "HashAgg" - // TypeShow is the type of show. - TypeShow = "Show" - // TypeJoin is the type of Join. - TypeJoin = "Join" - // TypeUnion is the type of Union. - TypeUnion = "Union" - // TypeTableScan is the type of TableScan. - TypeTableScan = "TableScan" - // TypeMemTableScan is the type of TableScan. - TypeMemTableScan = "MemTableScan" - // TypeUnionScan is the type of UnionScan. - TypeUnionScan = "UnionScan" - // TypeIdxScan is the type of IndexScan. - TypeIdxScan = "IndexScan" - // TypeSort is the type of Sort. - TypeSort = "Sort" - // TypeTopN is the type of TopN. - TypeTopN = "TopN" - // TypeLimit is the type of Limit. - TypeLimit = "Limit" - // TypeHashLeftJoin is the type of left hash join. - TypeHashLeftJoin = "HashLeftJoin" - // TypeHashRightJoin is the type of right hash join. - TypeHashRightJoin = "HashRightJoin" - // TypeMergeJoin is the type of merge join. - TypeMergeJoin = "MergeJoin" - // TypeIndexJoin is the type of index look up join. - TypeIndexJoin = "IndexJoin" - // TypeIndexMergeJoin is the type of index nested loop merge join. - TypeIndexMergeJoin = "IndexMergeJoin" - // TypeIndexHashJoin is the type of index nested loop hash join. - TypeIndexHashJoin = "IndexHashJoin" - // TypeApply is the type of Apply. - TypeApply = "Apply" - // TypeMaxOneRow is the type of MaxOneRow. - TypeMaxOneRow = "MaxOneRow" - // TypeExists is the type of Exists. - TypeExists = "Exists" - // TypeDual is the type of TableDual. - TypeDual = "TableDual" - // TypeLock is the type of SelectLock. - TypeLock = "SelectLock" - // TypeInsert is the type of Insert - TypeInsert = "Insert" - // TypeUpdate is the type of Update. - TypeUpdate = "Update" - // TypeDelete is the type of Delete. - TypeDelete = "Delete" - // TypeIndexLookUp is the type of IndexLookUp. - TypeIndexLookUp = "IndexLookUp" - // TypeTableReader is the type of TableReader. - TypeTableReader = "TableReader" - // TypeIndexReader is the type of IndexReader. - TypeIndexReader = "IndexReader" - // TypeWindow is the type of Window. - TypeWindow = "Window" - // TypeTableGather is the type of TableGather. - TypeTableGather = "TableGather" - // TypeIndexMerge is the type of IndexMergeReader - TypeIndexMerge = "IndexMerge" - // TypeShowDDLJobs is the type of show ddl jobs. - TypeShowDDLJobs = "ShowDDLJobs" + "github.com/pingcap/tidb/util/plancodec" ) // Init initializes LogicalAggregation. func (la LogicalAggregation) Init(ctx sessionctx.Context, offset int) *LogicalAggregation { - la.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeAgg, &la, offset) + la.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeAgg, &la, offset) return &la } // Init initializes LogicalJoin. func (p LogicalJoin) Init(ctx sessionctx.Context, offset int) *LogicalJoin { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeJoin, &p, offset) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeJoin, &p, offset) return &p } // Init initializes DataSource. func (ds DataSource) Init(ctx sessionctx.Context, offset int) *DataSource { - ds.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeTableScan, &ds, offset) + ds.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeTableScan, &ds, offset) return &ds } // Init initializes TableGather. func (tg TableGather) Init(ctx sessionctx.Context, offset int) *TableGather { - tg.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeTableGather, &tg, offset) + tg.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeTableGather, &tg, offset) return &tg } // Init initializes TableScan. func (ts TableScan) Init(ctx sessionctx.Context, offset int) *TableScan { - ts.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeTableScan, &ts, offset) + ts.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeTableScan, &ts, offset) return &ts } // Init initializes LogicalApply. func (la LogicalApply) Init(ctx sessionctx.Context, offset int) *LogicalApply { - la.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeApply, &la, offset) + la.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeApply, &la, offset) return &la } // Init initializes LogicalSelection. func (p LogicalSelection) Init(ctx sessionctx.Context, offset int) *LogicalSelection { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeSel, &p, offset) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeSel, &p, offset) return &p } // Init initializes PhysicalSelection. func (p PhysicalSelection) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalSelection { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeSel, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeSel, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -149,19 +73,19 @@ func (p PhysicalSelection) Init(ctx sessionctx.Context, stats *property.StatsInf // Init initializes LogicalUnionScan. func (p LogicalUnionScan) Init(ctx sessionctx.Context, offset int) *LogicalUnionScan { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeUnionScan, &p, offset) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeUnionScan, &p, offset) return &p } // Init initializes LogicalProjection. func (p LogicalProjection) Init(ctx sessionctx.Context, offset int) *LogicalProjection { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeProj, &p, offset) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeProj, &p, offset) return &p } // Init initializes PhysicalProjection. func (p PhysicalProjection) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalProjection { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeProj, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeProj, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -169,13 +93,13 @@ func (p PhysicalProjection) Init(ctx sessionctx.Context, stats *property.StatsIn // Init initializes LogicalUnionAll. func (p LogicalUnionAll) Init(ctx sessionctx.Context, offset int) *LogicalUnionAll { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeUnion, &p, offset) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeUnion, &p, offset) return &p } // Init initializes PhysicalUnionAll. func (p PhysicalUnionAll) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalUnionAll { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeUnion, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeUnion, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -183,13 +107,13 @@ func (p PhysicalUnionAll) Init(ctx sessionctx.Context, stats *property.StatsInfo // Init initializes LogicalSort. func (ls LogicalSort) Init(ctx sessionctx.Context, offset int) *LogicalSort { - ls.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeSort, &ls, offset) + ls.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeSort, &ls, offset) return &ls } // Init initializes PhysicalSort. func (p PhysicalSort) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalSort { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeSort, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeSort, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -197,20 +121,20 @@ func (p PhysicalSort) Init(ctx sessionctx.Context, stats *property.StatsInfo, of // Init initializes NominalSort. func (p NominalSort) Init(ctx sessionctx.Context, offset int, props ...*property.PhysicalProperty) *NominalSort { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeSort, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeSort, &p, offset) p.childrenReqProps = props return &p } // Init initializes LogicalTopN. func (lt LogicalTopN) Init(ctx sessionctx.Context, offset int) *LogicalTopN { - lt.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeTopN, <, offset) + lt.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeTopN, <, offset) return < } // Init initializes PhysicalTopN. func (p PhysicalTopN) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalTopN { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeTopN, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeTopN, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -218,13 +142,13 @@ func (p PhysicalTopN) Init(ctx sessionctx.Context, stats *property.StatsInfo, of // Init initializes LogicalLimit. func (p LogicalLimit) Init(ctx sessionctx.Context, offset int) *LogicalLimit { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeLimit, &p, offset) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeLimit, &p, offset) return &p } // Init initializes PhysicalLimit. func (p PhysicalLimit) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalLimit { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeLimit, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeLimit, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -232,26 +156,26 @@ func (p PhysicalLimit) Init(ctx sessionctx.Context, stats *property.StatsInfo, o // Init initializes LogicalTableDual. func (p LogicalTableDual) Init(ctx sessionctx.Context, offset int) *LogicalTableDual { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeDual, &p, offset) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeDual, &p, offset) return &p } // Init initializes PhysicalTableDual. func (p PhysicalTableDual) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int) *PhysicalTableDual { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeDual, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeDual, &p, offset) p.stats = stats return &p } // Init initializes LogicalMaxOneRow. func (p LogicalMaxOneRow) Init(ctx sessionctx.Context, offset int) *LogicalMaxOneRow { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeMaxOneRow, &p, offset) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeMaxOneRow, &p, offset) return &p } // Init initializes PhysicalMaxOneRow. func (p PhysicalMaxOneRow) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalMaxOneRow { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeMaxOneRow, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeMaxOneRow, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -259,13 +183,13 @@ func (p PhysicalMaxOneRow) Init(ctx sessionctx.Context, stats *property.StatsInf // Init initializes LogicalWindow. func (p LogicalWindow) Init(ctx sessionctx.Context, offset int) *LogicalWindow { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeWindow, &p, offset) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeWindow, &p, offset) return &p } // Init initializes PhysicalWindow. func (p PhysicalWindow) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalWindow { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeWindow, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeWindow, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -273,37 +197,37 @@ func (p PhysicalWindow) Init(ctx sessionctx.Context, stats *property.StatsInfo, // Init initializes Update. func (p Update) Init(ctx sessionctx.Context) *Update { - p.basePlan = newBasePlan(ctx, TypeUpdate, 0) + p.basePlan = newBasePlan(ctx, plancodec.TypeUpdate, 0) return &p } // Init initializes Delete. func (p Delete) Init(ctx sessionctx.Context) *Delete { - p.basePlan = newBasePlan(ctx, TypeDelete, 0) + p.basePlan = newBasePlan(ctx, plancodec.TypeDelete, 0) return &p } // Init initializes Insert. func (p Insert) Init(ctx sessionctx.Context) *Insert { - p.basePlan = newBasePlan(ctx, TypeInsert, 0) + p.basePlan = newBasePlan(ctx, plancodec.TypeInsert, 0) return &p } // Init initializes LogicalShow. func (p LogicalShow) Init(ctx sessionctx.Context) *LogicalShow { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeShow, &p, 0) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeShow, &p, 0) return &p } // Init initializes LogicalShowDDLJobs. func (p LogicalShowDDLJobs) Init(ctx sessionctx.Context) *LogicalShowDDLJobs { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeShowDDLJobs, &p, 0) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeShowDDLJobs, &p, 0) return &p } // Init initializes PhysicalShow. func (p PhysicalShow) Init(ctx sessionctx.Context) *PhysicalShow { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeShow, &p, 0) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeShow, &p, 0) // Just use pseudo stats to avoid panic. p.stats = &property.StatsInfo{RowCount: 1} return &p @@ -311,7 +235,7 @@ func (p PhysicalShow) Init(ctx sessionctx.Context) *PhysicalShow { // Init initializes PhysicalShowDDLJobs. func (p PhysicalShowDDLJobs) Init(ctx sessionctx.Context) *PhysicalShowDDLJobs { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeShowDDLJobs, &p, 0) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeShowDDLJobs, &p, 0) // Just use pseudo stats to avoid panic. p.stats = &property.StatsInfo{RowCount: 1} return &p @@ -319,13 +243,13 @@ func (p PhysicalShowDDLJobs) Init(ctx sessionctx.Context) *PhysicalShowDDLJobs { // Init initializes LogicalLock. func (p LogicalLock) Init(ctx sessionctx.Context) *LogicalLock { - p.baseLogicalPlan = newBaseLogicalPlan(ctx, TypeLock, &p, 0) + p.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeLock, &p, 0) return &p } // Init initializes PhysicalLock. func (p PhysicalLock) Init(ctx sessionctx.Context, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalLock { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeLock, &p, 0) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeLock, &p, 0) p.childrenReqProps = props p.stats = stats return &p @@ -333,28 +257,28 @@ func (p PhysicalLock) Init(ctx sessionctx.Context, stats *property.StatsInfo, pr // Init initializes PhysicalTableScan. func (p PhysicalTableScan) Init(ctx sessionctx.Context, offset int) *PhysicalTableScan { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeTableScan, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeTableScan, &p, offset) return &p } // Init initializes PhysicalIndexScan. func (p PhysicalIndexScan) Init(ctx sessionctx.Context, offset int) *PhysicalIndexScan { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIdxScan, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeIdxScan, &p, offset) return &p } // Init initializes PhysicalMemTable. func (p PhysicalMemTable) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int) *PhysicalMemTable { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeMemTableScan, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeMemTableScan, &p, offset) p.stats = stats return &p } // Init initializes PhysicalHashJoin. func (p PhysicalHashJoin) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalHashJoin { - tp := TypeHashRightJoin + tp := plancodec.TypeHashRightJoin if p.InnerChildIdx == 1 { - tp = TypeHashLeftJoin + tp = plancodec.TypeHashLeftJoin } p.basePhysicalPlan = newBasePhysicalPlan(ctx, tp, &p, offset) p.childrenReqProps = props @@ -364,21 +288,21 @@ func (p PhysicalHashJoin) Init(ctx sessionctx.Context, stats *property.StatsInfo // Init initializes PhysicalMergeJoin. func (p PhysicalMergeJoin) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int) *PhysicalMergeJoin { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeMergeJoin, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeMergeJoin, &p, offset) p.stats = stats return &p } // Init initializes basePhysicalAgg. func (base basePhysicalAgg) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int) *basePhysicalAgg { - base.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeHashAgg, &base, offset) + base.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeHashAgg, &base, offset) base.stats = stats return &base } func (base basePhysicalAgg) initForHash(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalHashAgg { p := &PhysicalHashAgg{base} - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeHashAgg, p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeHashAgg, p, offset) p.childrenReqProps = props p.stats = stats return p @@ -386,7 +310,7 @@ func (base basePhysicalAgg) initForHash(ctx sessionctx.Context, stats *property. func (base basePhysicalAgg) initForStream(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalStreamAgg { p := &PhysicalStreamAgg{base} - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeStreamAgg, p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeStreamAgg, p, offset) p.childrenReqProps = props p.stats = stats return p @@ -394,7 +318,7 @@ func (base basePhysicalAgg) initForStream(ctx sessionctx.Context, stats *propert // Init initializes PhysicalApply. func (p PhysicalApply) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalApply { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeApply, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeApply, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -402,7 +326,7 @@ func (p PhysicalApply) Init(ctx sessionctx.Context, stats *property.StatsInfo, o // Init initializes PhysicalUnionScan. func (p PhysicalUnionScan) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalUnionScan { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeUnionScan, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeUnionScan, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -410,7 +334,7 @@ func (p PhysicalUnionScan) Init(ctx sessionctx.Context, stats *property.StatsInf // Init initializes PhysicalIndexLookUpReader. func (p PhysicalIndexLookUpReader) Init(ctx sessionctx.Context, offset int) *PhysicalIndexLookUpReader { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIndexLookUp, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeIndexLookUp, &p, offset) p.TablePlans = flattenPushDownPlan(p.tablePlan) p.IndexPlans = flattenPushDownPlan(p.indexPlan) p.schema = p.tablePlan.Schema() @@ -419,7 +343,7 @@ func (p PhysicalIndexLookUpReader) Init(ctx sessionctx.Context, offset int) *Phy // Init initializes PhysicalIndexMergeReader. func (p PhysicalIndexMergeReader) Init(ctx sessionctx.Context, offset int) *PhysicalIndexMergeReader { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIndexMerge, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeIndexMerge, &p, offset) if p.tablePlan != nil { p.stats = p.tablePlan.statsInfo() } else { @@ -452,7 +376,7 @@ func (p PhysicalIndexMergeReader) Init(ctx sessionctx.Context, offset int) *Phys // Init initializes PhysicalTableReader. func (p PhysicalTableReader) Init(ctx sessionctx.Context, offset int) *PhysicalTableReader { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeTableReader, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeTableReader, &p, offset) if p.tablePlan != nil { p.TablePlans = flattenPushDownPlan(p.tablePlan) p.schema = p.tablePlan.Schema() @@ -462,7 +386,7 @@ func (p PhysicalTableReader) Init(ctx sessionctx.Context, offset int) *PhysicalT // Init initializes PhysicalIndexReader. func (p PhysicalIndexReader) Init(ctx sessionctx.Context, offset int) *PhysicalIndexReader { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIndexReader, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeIndexReader, &p, offset) p.IndexPlans = flattenPushDownPlan(p.indexPlan) switch p.indexPlan.(type) { case *PhysicalHashAgg, *PhysicalStreamAgg: @@ -477,7 +401,7 @@ func (p PhysicalIndexReader) Init(ctx sessionctx.Context, offset int) *PhysicalI // Init initializes PhysicalIndexJoin. func (p PhysicalIndexJoin) Init(ctx sessionctx.Context, stats *property.StatsInfo, offset int, props ...*property.PhysicalProperty) *PhysicalIndexJoin { - p.basePhysicalPlan = newBasePhysicalPlan(ctx, TypeIndexJoin, &p, offset) + p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeIndexJoin, &p, offset) p.childrenReqProps = props p.stats = stats return &p @@ -486,7 +410,7 @@ func (p PhysicalIndexJoin) Init(ctx sessionctx.Context, stats *property.StatsInf // Init initializes PhysicalIndexMergeJoin. func (p PhysicalIndexMergeJoin) Init(ctx sessionctx.Context) *PhysicalIndexMergeJoin { ctx.GetSessionVars().PlanID++ - p.tp = TypeIndexMergeJoin + p.tp = plancodec.TypeIndexMergeJoin p.id = ctx.GetSessionVars().PlanID p.ctx = ctx return &p @@ -495,7 +419,7 @@ func (p PhysicalIndexMergeJoin) Init(ctx sessionctx.Context) *PhysicalIndexMerge // Init initializes PhysicalIndexHashJoin. func (p PhysicalIndexHashJoin) Init(ctx sessionctx.Context) *PhysicalIndexHashJoin { ctx.GetSessionVars().PlanID++ - p.tp = TypeIndexHashJoin + p.tp = plancodec.TypeIndexHashJoin p.id = ctx.GetSessionVars().PlanID p.ctx = ctx return &p @@ -503,7 +427,7 @@ func (p PhysicalIndexHashJoin) Init(ctx sessionctx.Context) *PhysicalIndexHashJo // Init initializes BatchPointGetPlan. func (p BatchPointGetPlan) Init(ctx sessionctx.Context, stats *property.StatsInfo, schema *expression.Schema, names []*types.FieldName) *BatchPointGetPlan { - p.basePlan = newBasePlan(ctx, "Batch_Point_Get", 0) + p.basePlan = newBasePlan(ctx, plancodec.TypeBatchPointGet, 0) p.schema = schema p.names = names p.stats = stats diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index fc3472a41f247..c457300d544ba 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -45,6 +45,7 @@ import ( "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/plancodec" ) const ( @@ -2597,7 +2598,7 @@ func (b *PlanBuilder) buildSemiApply(outerPlan, innerPlan LogicalPlan, condition } ap := &LogicalApply{LogicalJoin: *join} - ap.tp = TypeApply + ap.tp = plancodec.TypeApply ap.self = ap return ap, nil } diff --git a/planner/core/plan.go b/planner/core/plan.go index 531d9bae11dfb..fe8383b65ba28 100644 --- a/planner/core/plan.go +++ b/planner/core/plan.go @@ -38,6 +38,9 @@ type Plan interface { // Get the ID. ID() int + // TP get the plan type. + TP() string + // Get the ID in explain statement ExplainID() fmt.Stringer @@ -299,6 +302,11 @@ func (p *basePlan) ExplainID() fmt.Stringer { }) } +// TP implements Plan interface. +func (p *basePlan) TP() string { + return p.tp +} + func (p *basePlan) SelectBlockOffset() int { return p.blockOffset } diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 78fff77ec95e8..70012d3b4c078 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/parser_driver" + "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tipb/go-tipb" ) @@ -599,7 +600,7 @@ func tryPointGetPlan(ctx sessionctx.Context, selStmt *ast.SelectStmt) *PointGetP func newPointGetPlan(ctx sessionctx.Context, dbName string, schema *expression.Schema, tbl *model.TableInfo, names []*types.FieldName) *PointGetPlan { p := &PointGetPlan{ - basePlan: newBasePlan(ctx, "Point_Get", 0), + basePlan: newBasePlan(ctx, plancodec.TypePointGet, 0), dbName: dbName, schema: schema, TblInfo: tbl, diff --git a/planner/core/rule_partition_processor.go b/planner/core/rule_partition_processor.go index 0a871371048fe..2b8f52918c801 100644 --- a/planner/core/rule_partition_processor.go +++ b/planner/core/rule_partition_processor.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/plancodec" "github.com/pingcap/tidb/util/ranger" ) @@ -156,7 +157,7 @@ func (s *partitionProcessor) prune(ds *DataSource) (LogicalPlan, error) { // Not a deep copy. newDataSource := *ds - newDataSource.baseLogicalPlan = newBaseLogicalPlan(ds.SCtx(), TypeTableScan, &newDataSource, ds.blockOffset) + newDataSource.baseLogicalPlan = newBaseLogicalPlan(ds.SCtx(), plancodec.TypeTableScan, &newDataSource, ds.blockOffset) newDataSource.isPartition = true newDataSource.physicalTableID = pi.Definitions[i].ID // There are many expression nodes in the plan tree use the original datasource diff --git a/planner/core/task.go b/planner/core/task.go index 1424441d83a14..bc4b19e2c6285 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/plancodec" ) // task is a new version of `PhysicalPlanInfo`. It stores cost information for a task. @@ -942,7 +943,7 @@ func (p *basePhysicalAgg) newPartialAggregate(copToFlash bool) (partial, final P p.removeUnnecessaryFirstRow(finalAggFuncs, groupByItems) // Create physical "final" aggregation. - if p.tp == TypeStreamAgg { + if p.tp == plancodec.TypeStreamAgg { finalAgg := basePhysicalAgg{ AggFuncs: finalAggFuncs, GroupByItems: groupByItems, diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 66284cd26bedb..2e0a8b22102a3 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -887,6 +887,8 @@ func (s *SessionVars) SetSystemVar(name string, val string) error { atomic.StoreUint32(&ProcessGeneralLog, uint32(tidbOptPositiveInt32(val, DefTiDBGeneralLog))) case TiDBSlowLogThreshold: atomic.StoreUint64(&config.GetGlobalConfig().Log.SlowThreshold, uint64(tidbOptInt64(val, logutil.DefaultSlowThreshold))) + case TiDBRecordPlanInSlowLog: + atomic.StoreUint32(&config.GetGlobalConfig().Log.RecordPlanInSlowLog, uint32(tidbOptInt64(val, logutil.DefaultRecordPlanInSlowLog))) case TiDBDDLSlowOprThreshold: atomic.StoreUint32(&DDLSlowOprThreshold, uint32(tidbOptPositiveInt32(val, DefTiDBDDLSlowOprThreshold))) case TiDBQueryLogMaxLen: @@ -1152,6 +1154,12 @@ const ( SlowLogSucc = "Succ" // SlowLogPrevStmt is used to show the previous executed statement. SlowLogPrevStmt = "Prev_stmt" + // SlowLogPlan is used to record the query plan. + SlowLogPlan = "Plan" + // SlowLogPlanPrefix is the prefix of the plan value. + SlowLogPlanPrefix = ast.TiDBDecodePlan + "('" + // SlowLogPlanSuffix is the suffix of the plan value. + SlowLogPlanSuffix = "')" // SlowLogPrevStmtPrefix is the prefix of Prev_stmt in slow log file. SlowLogPrevStmtPrefix = SlowLogPrevStmt + SlowLogSpaceMarkStr ) @@ -1174,6 +1182,7 @@ type SlowQueryLogItems struct { Prepared bool HasMoreResults bool PrevStmt string + Plan string } // SlowLogFormat uses for formatting slow log. @@ -1277,6 +1286,9 @@ func (s *SessionVars) SlowLogFormat(logItems *SlowQueryLogItems) string { writeSlowLogItem(&buf, SlowLogPrepared, strconv.FormatBool(logItems.Prepared)) writeSlowLogItem(&buf, SlowLogHasMoreResults, strconv.FormatBool(logItems.HasMoreResults)) writeSlowLogItem(&buf, SlowLogSucc, strconv.FormatBool(logItems.Succ)) + if len(logItems.Plan) != 0 { + writeSlowLogItem(&buf, SlowLogPlan, logItems.Plan) + } if logItems.PrevStmt != "" { writeSlowLogItem(&buf, SlowLogPrevStmt, logItems.PrevStmt) diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 95caf8a033dc2..6d38e0d1bc970 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -704,6 +704,7 @@ var defaultSysVars = []*SysVar{ /* The following variable is defined as session scope but is actually server scope. */ {ScopeSession, TiDBGeneralLog, strconv.Itoa(DefTiDBGeneralLog)}, {ScopeSession, TiDBSlowLogThreshold, strconv.Itoa(logutil.DefaultSlowThreshold)}, + {ScopeSession, TiDBRecordPlanInSlowLog, strconv.Itoa(logutil.DefaultRecordPlanInSlowLog)}, {ScopeSession, TiDBDDLSlowOprThreshold, strconv.Itoa(DefTiDBDDLSlowOprThreshold)}, {ScopeSession, TiDBQueryLogMaxLen, strconv.Itoa(logutil.DefaultQueryLogMaxLen)}, {ScopeSession, TiDBConfig, ""}, diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 003685888e15c..fbc84496be4cc 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -109,6 +109,9 @@ const ( // tidb_slow_log_threshold is used to set the slow log threshold in the server. TiDBSlowLogThreshold = "tidb_slow_log_threshold" + // tidb_record_plan_in_slow_log is used to log the plan of the slow query. + TiDBRecordPlanInSlowLog = "tidb_record_plan_in_slow_log" + // tidb_query_log_max_len is used to set the max length of the query in the log. TiDBQueryLogMaxLen = "tidb_query_log_max_len" diff --git a/sessionctx/variable/varsutil.go b/sessionctx/variable/varsutil.go index de4d0ae4fab30..93375d989fc52 100644 --- a/sessionctx/variable/varsutil.go +++ b/sessionctx/variable/varsutil.go @@ -128,6 +128,8 @@ func GetSessionOnlySysVars(s *SessionVars, key string) (string, bool, error) { return mysql.Priority2Str[mysql.PriorityEnum(atomic.LoadInt32(&ForcePriority))], true, nil case TiDBSlowLogThreshold: return strconv.FormatUint(atomic.LoadUint64(&config.GetGlobalConfig().Log.SlowThreshold), 10), true, nil + case TiDBRecordPlanInSlowLog: + return strconv.FormatUint(uint64(atomic.LoadUint32(&config.GetGlobalConfig().Log.RecordPlanInSlowLog)), 10), true, nil case TiDBDDLSlowOprThreshold: return strconv.FormatUint(uint64(atomic.LoadUint32(&DDLSlowOprThreshold)), 10), true, nil case TiDBQueryLogMaxLen: @@ -393,7 +395,7 @@ func ValidateSetSystemVar(vars *SessionVars, name string, value string) (string, TiDBBatchInsert, TiDBDisableTxnAutoRetry, TiDBEnableStreaming, TiDBEnableArrow, TiDBBatchDelete, TiDBBatchCommit, TiDBEnableCascadesPlanner, TiDBEnableWindowFunction, TiDBCheckMb4ValueInUTF8, TiDBLowResolutionTSO, TiDBEnableIndexMerge, TiDBEnableNoopFuncs, - TiDBScatterRegion, TiDBGeneralLog, TiDBConstraintCheckInPlace, TiDBEnableVectorizedExpression: + TiDBScatterRegion, TiDBGeneralLog, TiDBConstraintCheckInPlace, TiDBEnableVectorizedExpression, TiDBRecordPlanInSlowLog: fallthrough case GeneralLog, AvoidTemporalUpgrade, BigTables, CheckProxyUsers, LogBin, CoreFile, EndMakersInJSON, SQLLogBin, OfflineMode, PseudoSlaveMode, LowPriorityUpdates, diff --git a/util/logutil/log.go b/util/logutil/log.go index 1f3f84b6ed64b..2f4fac7e6786e 100644 --- a/util/logutil/log.go +++ b/util/logutil/log.go @@ -45,6 +45,8 @@ const ( DefaultSlowThreshold = 300 // DefaultQueryLogMaxLen is the default max length of the query in the log. DefaultQueryLogMaxLen = 4096 + // DefaultRecordPlanInSlowLog is the default value for whether enable log query plan in the slow log. + DefaultRecordPlanInSlowLog = 1 ) // EmptyFileLogConfig is an empty FileLogConfig. diff --git a/util/plancodec/codec.go b/util/plancodec/codec.go new file mode 100644 index 0000000000000..a13a8490d24fd --- /dev/null +++ b/util/plancodec/codec.go @@ -0,0 +1,297 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package plancodec + +import ( + "bytes" + "encoding/base64" + "strconv" + "strings" + "sync" + + "github.com/golang/snappy" + "github.com/pingcap/errors" +) + +const ( + // TreeBody indicates the current operator sub-tree is not finished, still + // has child operators to be attached on. + TreeBody = '│' + // TreeMiddleNode indicates this operator is not the last child of the + // current sub-tree rooted by its parent. + TreeMiddleNode = '├' + // TreeLastNode indicates this operator is the last child of the current + // sub-tree rooted by its parent. + TreeLastNode = '└' + // TreeGap is used to represent the gap between the branches of the tree. + TreeGap = ' ' + // TreeNodeIdentifier is used to replace the TreeGap once we need to attach + // a node to a sub-tree. + TreeNodeIdentifier = '─' +) + +const ( + rootTaskType = "0" + copTaskType = "1" +) + +const ( + idSeparator = "_" + lineBreaker = '\n' + lineBreakerStr = "\n" + separator = '\t' + separatorStr = "\t" +) + +var decoderPool = sync.Pool{ + New: func() interface{} { + return &planDecoder{} + }, +} + +// DecodePlan use to decode the string to plan tree. +func DecodePlan(planString string) (string, error) { + if len(planString) == 0 { + return "", nil + } + pd := decoderPool.Get().(*planDecoder) + defer decoderPool.Put(pd) + pd.buf.Reset() + return pd.decode(planString) +} + +type planDecoder struct { + buf bytes.Buffer + depths []int + indents [][]rune + planInfos []*planInfo +} + +type planInfo struct { + depth int + fields []string +} + +func (pd *planDecoder) decode(planString string) (string, error) { + str, err := decompress(planString) + if err != nil { + return "", err + } + + nodes := strings.Split(str, lineBreakerStr) + if len(pd.depths) < len(nodes) { + pd.depths = make([]int, 0, len(nodes)) + pd.planInfos = make([]*planInfo, 0, len(nodes)) + pd.indents = make([][]rune, 0, len(nodes)) + } + pd.depths = pd.depths[:0] + pd.planInfos = pd.planInfos[:0] + planInfos := pd.planInfos + for _, node := range nodes { + p, err := decodePlanInfo(node) + if err != nil { + return "", err + } + if p == nil { + continue + } + planInfos = append(planInfos, p) + pd.depths = append(pd.depths, p.depth) + } + + // Calculated indentation of plans. + pd.initPlanTreeIndents() + for i := 1; i < len(pd.depths); i++ { + parentIndex := pd.findParentIndex(i) + pd.fillIndent(parentIndex, i) + } + // Align the value of plan fields. + pd.alignFields(planInfos) + + for i, p := range planInfos { + if i > 0 { + pd.buf.WriteByte(lineBreaker) + } + // This is for alignment. + pd.buf.WriteByte(separator) + pd.buf.WriteString(string(pd.indents[i])) + for j := 0; j < len(p.fields); j++ { + if j > 0 { + pd.buf.WriteByte(separator) + } + pd.buf.WriteString(p.fields[j]) + } + } + return pd.buf.String(), nil +} + +func (pd *planDecoder) initPlanTreeIndents() { + pd.indents = pd.indents[:0] + for i := 0; i < len(pd.depths); i++ { + indent := make([]rune, 2*pd.depths[i]) + pd.indents = append(pd.indents, indent) + if len(indent) == 0 { + continue + } + for i := 0; i < len(indent)-2; i++ { + indent[i] = ' ' + } + indent[len(indent)-2] = TreeLastNode + indent[len(indent)-1] = TreeNodeIdentifier + } +} + +func (pd *planDecoder) findParentIndex(childIndex int) int { + for i := childIndex - 1; i > 0; i-- { + if pd.depths[i]+1 == pd.depths[childIndex] { + return i + } + } + return 0 +} +func (pd *planDecoder) fillIndent(parentIndex, childIndex int) { + depth := pd.depths[childIndex] + if depth == 0 { + return + } + idx := depth*2 - 2 + for i := childIndex - 1; i > parentIndex; i-- { + if pd.indents[i][idx] == TreeLastNode { + pd.indents[i][idx] = TreeMiddleNode + break + } + pd.indents[i][idx] = TreeBody + } +} + +func (pd *planDecoder) alignFields(planInfos []*planInfo) { + if len(planInfos) == 0 { + return + } + fieldsLen := len(planInfos[0].fields) + // Last field no need to align. + fieldsLen-- + for colIdx := 0; colIdx < fieldsLen; colIdx++ { + maxFieldLen := pd.getMaxFieldLength(colIdx, planInfos) + for rowIdx, p := range planInfos { + fillLen := maxFieldLen - pd.getPlanFieldLen(rowIdx, colIdx, p) + for i := 0; i < fillLen; i++ { + p.fields[colIdx] += " " + } + } + } +} + +func (pd *planDecoder) getMaxFieldLength(idx int, planInfos []*planInfo) int { + maxLength := -1 + for rowIdx, p := range planInfos { + l := pd.getPlanFieldLen(rowIdx, idx, p) + if l > maxLength { + maxLength = l + } + } + return maxLength +} + +func (pd *planDecoder) getPlanFieldLen(rowIdx, colIdx int, p *planInfo) int { + if colIdx == 0 { + return len(p.fields[0]) + len(pd.indents[rowIdx]) + } + return len(p.fields[colIdx]) +} + +func decodePlanInfo(str string) (*planInfo, error) { + values := strings.Split(str, separatorStr) + if len(values) < 2 { + return nil, nil + } + + p := &planInfo{ + fields: make([]string, 0, len(values)-1), + } + for i, v := range values { + switch i { + // depth + case 0: + depth, err := strconv.Atoi(v) + if err != nil { + return nil, errors.Errorf("decode plan: %v, depth: %v, error: %v", str, v, err) + } + p.depth = depth + // plan ID + case 1: + ids := strings.Split(v, idSeparator) + if len(ids) != 2 { + return nil, errors.Errorf("decode plan: %v error, invalid plan id: %v", str, v) + } + planID, err := strconv.Atoi(ids[0]) + if err != err { + return nil, errors.Errorf("decode plan: %v, plan id: %v, error: %v", str, v, err) + } + p.fields = append(p.fields, PhysicalIDToTypeString(planID)+idSeparator+ids[1]) + // task type + case 2: + if v == rootTaskType { + p.fields = append(p.fields, "root") + } else { + p.fields = append(p.fields, "cop") + } + default: + p.fields = append(p.fields, v) + } + } + return p, nil +} + +// EncodePlanNode is used to encode the plan to a string. +func EncodePlanNode(depth, pid int, planType string, isRoot bool, rowCount float64, explainInfo string, buf *bytes.Buffer) { + buf.WriteString(strconv.Itoa(depth)) + buf.WriteByte(separator) + buf.WriteString(encodeID(planType, pid)) + buf.WriteByte(separator) + if isRoot { + buf.WriteString(rootTaskType) + } else { + buf.WriteString(copTaskType) + } + buf.WriteByte(separator) + buf.WriteString(strconv.FormatFloat(rowCount, 'f', -1, 64)) + buf.WriteByte(separator) + buf.WriteString(explainInfo) + buf.WriteByte(lineBreaker) +} + +func encodeID(planType string, id int) string { + planID := TypeStringToPhysicalID(planType) + return strconv.Itoa(planID) + idSeparator + strconv.Itoa(id) +} + +// Compress is used to compress the input with zlib. +func Compress(input []byte) string { + compressBytes := snappy.Encode(nil, input) + return base64.StdEncoding.EncodeToString(compressBytes) +} + +func decompress(str string) (string, error) { + decodeBytes, err := base64.StdEncoding.DecodeString(str) + if err != nil { + return "", err + } + + bs, err := snappy.Decode(nil, decodeBytes) + if err != nil { + return "", err + } + return string(bs), nil +} diff --git a/util/plancodec/id.go b/util/plancodec/id.go new file mode 100644 index 0000000000000..8369f7bfd0e59 --- /dev/null +++ b/util/plancodec/id.go @@ -0,0 +1,313 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package plancodec + +import "strconv" + +const ( + // TypeSel is the type of Selection. + TypeSel = "Selection" + // TypeSet is the type of Set. + TypeSet = "Set" + // TypeProj is the type of Projection. + TypeProj = "Projection" + // TypeAgg is the type of Aggregation. + TypeAgg = "Aggregation" + // TypeStreamAgg is the type of StreamAgg. + TypeStreamAgg = "StreamAgg" + // TypeHashAgg is the type of HashAgg. + TypeHashAgg = "HashAgg" + // TypeShow is the type of show. + TypeShow = "Show" + // TypeJoin is the type of Join. + TypeJoin = "Join" + // TypeUnion is the type of Union. + TypeUnion = "Union" + // TypeTableScan is the type of TableScan. + TypeTableScan = "TableScan" + // TypeMemTableScan is the type of TableScan. + TypeMemTableScan = "MemTableScan" + // TypeUnionScan is the type of UnionScan. + TypeUnionScan = "UnionScan" + // TypeIdxScan is the type of IndexScan. + TypeIdxScan = "IndexScan" + // TypeSort is the type of Sort. + TypeSort = "Sort" + // TypeTopN is the type of TopN. + TypeTopN = "TopN" + // TypeLimit is the type of Limit. + TypeLimit = "Limit" + // TypeHashLeftJoin is the type of left hash join. + TypeHashLeftJoin = "HashLeftJoin" + // TypeHashRightJoin is the type of right hash join. + TypeHashRightJoin = "HashRightJoin" + // TypeMergeJoin is the type of merge join. + TypeMergeJoin = "MergeJoin" + // TypeIndexJoin is the type of index look up join. + TypeIndexJoin = "IndexJoin" + // TypeIndexMergeJoin is the type of index look up merge join. + TypeIndexMergeJoin = "IndexMergeJoin" + // TypeIndexHashJoin is the type of index nested loop hash join. + TypeIndexHashJoin = "IndexHashJoin" + // TypeApply is the type of Apply. + TypeApply = "Apply" + // TypeMaxOneRow is the type of MaxOneRow. + TypeMaxOneRow = "MaxOneRow" + // TypeExists is the type of Exists. + TypeExists = "Exists" + // TypeDual is the type of TableDual. + TypeDual = "TableDual" + // TypeLock is the type of SelectLock. + TypeLock = "SelectLock" + // TypeInsert is the type of Insert + TypeInsert = "Insert" + // TypeUpdate is the type of Update. + TypeUpdate = "Update" + // TypeDelete is the type of Delete. + TypeDelete = "Delete" + // TypeIndexLookUp is the type of IndexLookUp. + TypeIndexLookUp = "IndexLookUp" + // TypeTableReader is the type of TableReader. + TypeTableReader = "TableReader" + // TypeIndexReader is the type of IndexReader. + TypeIndexReader = "IndexReader" + // TypeWindow is the type of Window. + TypeWindow = "Window" + // TypeTableGather is the type of TableGather. + TypeTableGather = "TableGather" + // TypeIndexMerge is the type of IndexMergeReader + TypeIndexMerge = "IndexMerge" + // TypePointGet is the type of PointGetPlan. + TypePointGet = "Point_Get" + // TypeShowDDLJobs is the type of show ddl jobs. + TypeShowDDLJobs = "ShowDDLJobs" + // TypeBatchPointGet is the type of BatchPointGetPlan. + TypeBatchPointGet = "Batch_Point_Get" +) + +// plan id. +const ( + typeSelID int = iota + 1 + typeSetID + typeProjID + typeAggID + typeStreamAggID + typeHashAggID + typeShowID + typeJoinID + typeUnionID + typeTableScanID + typeMemTableScanID + typeUnionScanID + typeIdxScanID + typeSortID + typeTopNID + typeLimitID + typeHashLeftJoinID + typeHashRightJoinID + typeMergeJoinID + typeIndexJoinID + typeIndexMergeJoinID + typeIndexHashJoinID + typeApplyID + typeMaxOneRowID + typeExistsID + typeDualID + typeLockID + typeInsertID + typeUpdateID + typeDeleteID + typeIndexLookUpID + typeTableReaderID + typeIndexReaderID + typeWindowID + typeTableGatherID + typeIndexMergeID + typePointGet + typeShowDDLJobs + typeBatchPointGet +) + +// TypeStringToPhysicalID converts the plan type string to plan id. +func TypeStringToPhysicalID(tp string) int { + switch tp { + case TypeSel: + return typeSelID + case TypeSet: + return typeSetID + case TypeProj: + return typeProjID + case TypeAgg: + return typeAggID + case TypeStreamAgg: + return typeStreamAggID + case TypeHashAgg: + return typeHashAggID + case TypeShow: + return typeShowID + case TypeJoin: + return typeJoinID + case TypeUnion: + return typeUnionID + case TypeTableScan: + return typeTableScanID + case TypeMemTableScan: + return typeMemTableScanID + case TypeUnionScan: + return typeUnionScanID + case TypeIdxScan: + return typeIdxScanID + case TypeSort: + return typeSortID + case TypeTopN: + return typeTopNID + case TypeLimit: + return typeLimitID + case TypeHashLeftJoin: + return typeHashLeftJoinID + case TypeHashRightJoin: + return typeHashRightJoinID + case TypeMergeJoin: + return typeMergeJoinID + case TypeIndexJoin: + return typeIndexJoinID + case TypeIndexMergeJoin: + return typeIndexMergeJoinID + case TypeIndexHashJoin: + return typeIndexHashJoinID + case TypeApply: + return typeApplyID + case TypeMaxOneRow: + return typeMaxOneRowID + case TypeExists: + return typeExistsID + case TypeDual: + return typeDualID + case TypeLock: + return typeLockID + case TypeInsert: + return typeInsertID + case TypeUpdate: + return typeUpdateID + case TypeDelete: + return typeDeleteID + case TypeIndexLookUp: + return typeIndexLookUpID + case TypeTableReader: + return typeTableReaderID + case TypeIndexReader: + return typeIndexReaderID + case TypeWindow: + return typeWindowID + case TypeTableGather: + return typeTableGatherID + case TypeIndexMerge: + return typeIndexMergeID + case TypePointGet: + return typePointGet + case TypeShowDDLJobs: + return typeShowDDLJobs + case TypeBatchPointGet: + return typeBatchPointGet + } + // Should never reach here. + return 0 +} + +// PhysicalIDToTypeString converts the plan id to plan type string. +func PhysicalIDToTypeString(id int) string { + switch id { + case typeSelID: + return TypeSel + case typeSetID: + return TypeSet + case typeProjID: + return TypeProj + case typeAggID: + return TypeAgg + case typeStreamAggID: + return TypeStreamAgg + case typeHashAggID: + return TypeHashAgg + case typeShowID: + return TypeShow + case typeJoinID: + return TypeJoin + case typeUnionID: + return TypeUnion + case typeTableScanID: + return TypeTableScan + case typeMemTableScanID: + return TypeMemTableScan + case typeUnionScanID: + return TypeUnionScan + case typeIdxScanID: + return TypeIdxScan + case typeSortID: + return TypeSort + case typeTopNID: + return TypeTopN + case typeLimitID: + return TypeLimit + case typeHashLeftJoinID: + return TypeHashLeftJoin + case typeHashRightJoinID: + return TypeHashRightJoin + case typeMergeJoinID: + return TypeMergeJoin + case typeIndexJoinID: + return TypeIndexJoin + case typeIndexMergeJoinID: + return TypeIndexMergeJoin + case typeIndexHashJoinID: + return TypeIndexHashJoin + case typeApplyID: + return TypeApply + case typeMaxOneRowID: + return TypeMaxOneRow + case typeExistsID: + return TypeExists + case typeDualID: + return TypeDual + case typeLockID: + return TypeLock + case typeInsertID: + return TypeInsert + case typeUpdateID: + return TypeUpdate + case typeDeleteID: + return TypeDelete + case typeIndexLookUpID: + return TypeIndexLookUp + case typeTableReaderID: + return TypeTableReader + case typeIndexReaderID: + return TypeIndexReader + case typeWindowID: + return TypeWindow + case typeTableGatherID: + return TypeTableGather + case typeIndexMergeID: + return TypeIndexMerge + case typePointGet: + return TypePointGet + case typeShowDDLJobs: + return TypeShowDDLJobs + case typeBatchPointGet: + return TypeBatchPointGet + } + + // Should never reach here. + return "UnknownPlanID" + strconv.Itoa(id) +}