Skip to content

Commit

Permalink
Merge branch 'master' into add_disk_usage_tb
Browse files Browse the repository at this point in the history
  • Loading branch information
crazycs520 authored Jun 1, 2020
2 parents a629ac1 + 8a164d2 commit d6f40c3
Show file tree
Hide file tree
Showing 232 changed files with 4,855 additions and 1,637 deletions.
6 changes: 4 additions & 2 deletions .github/ISSUE_TEMPLATE/bug-report.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,12 @@ Please answer these questions before submitting your issue. Thanks!

### 2. What did you expect to see? (Required)

### 3. Affected version (Required)
### 3. What did you see instead (Required)

### 4. Affected version (Required)

<!-- v3.0.0, v4.0.0, etc -->

### 4. Root Cause Analysis
### 5. Root Cause Analysis

<!-- should be filled by the investigator before it's closed -->
75 changes: 62 additions & 13 deletions bindinfo/bind_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ import (
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/store/mockstore"
"github.com/pingcap/tidb/store/mockstore/cluster"
"github.com/pingcap/tidb/store/mockstore/mocktikv"
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/stmtsummary"
"github.com/pingcap/tidb/util/testkit"
Expand Down Expand Up @@ -67,15 +66,11 @@ func (s *testSuite) SetUpSuite(c *C) {
flag.Lookup("mockTikv")
useMockTikv := *mockTikv
if useMockTikv {
cluster := mocktikv.NewCluster()
mocktikv.BootstrapWithSingleStore(cluster)
s.cluster = cluster

mvccStore := mocktikv.MustNewMVCCStore()
cluster.SetMvccStore(mvccStore)
store, err := mockstore.NewMockTikvStore(
mockstore.WithCluster(cluster),
mockstore.WithMVCCStore(mvccStore),
store, err := mockstore.NewMockStore(
mockstore.WithClusterInspector(func(c cluster.Cluster) {
mockstore.BootstrapWithSingleStore(c)
s.cluster = c
}),
)
c.Assert(err, IsNil)
s.store = store
Expand Down Expand Up @@ -123,8 +118,9 @@ func (s *testSuite) TestBindParse(c *C) {
status := "using"
charset := "utf8mb4"
collation := "utf8mb4_bin"
sql := fmt.Sprintf(`INSERT INTO mysql.bind_info(original_sql,bind_sql,default_db,status,create_time,update_time,charset,collation) VALUES ('%s', '%s', '%s', '%s', NOW(), NOW(),'%s', '%s')`,
originSQL, bindSQL, defaultDb, status, charset, collation)
source := bindinfo.Manual
sql := fmt.Sprintf(`INSERT INTO mysql.bind_info(original_sql,bind_sql,default_db,status,create_time,update_time,charset,collation,source) VALUES ('%s', '%s', '%s', '%s', NOW(), NOW(),'%s', '%s', '%s')`,
originSQL, bindSQL, defaultDb, status, charset, collation, source)
tk.MustExec(sql)
bindHandle := bindinfo.NewBindHandle(tk.Se)
err := bindHandle.Update(true)
Expand Down Expand Up @@ -863,7 +859,8 @@ func (s *testSuite) TestEvolveInvalidBindings(c *C) {
tk.MustExec("create table t(a int, b int, index idx_a(a))")
tk.MustExec("create global binding for select * from t where a > 10 using select /*+ USE_INDEX(t) */ * from t where a > 10")
// Manufacture a rejected binding by hacking mysql.bind_info.
tk.MustExec("insert into mysql.bind_info values('select * from t where a > ?', 'select /*+ USE_INDEX(t,idx_a) */ * from t where a > 10', 'test', 'rejected', '2000-01-01 09:00:00', '2000-01-01 09:00:00', '', '')")
tk.MustExec("insert into mysql.bind_info values('select * from t where a > ?', 'select /*+ USE_INDEX(t,idx_a) */ * from t where a > 10', 'test', 'rejected', '2000-01-01 09:00:00', '2000-01-01 09:00:00', '', '','" +
bindinfo.Manual + "')")
tk.MustQuery("select bind_sql, status from mysql.bind_info").Sort().Check(testkit.Rows(
"select /*+ USE_INDEX(t) */ * from t where a > 10 using",
"select /*+ USE_INDEX(t,idx_a) */ * from t where a > 10 rejected",
Expand Down Expand Up @@ -1170,3 +1167,55 @@ func (s *testSuite) TestInvisibleIndex(c *C) {

tk.MustExec("drop binding for select * from t")
}

func (s *testSuite) TestbindingSource(c *C) {
tk := testkit.NewTestKit(c, s.store)
s.cleanBindingEnv(tk)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, index idx_a(a))")

// Test Source for SQL created sql
tk.MustExec("create global binding for select * from t where a > 10 using select * from t ignore index(idx_a) where a > 10")
bindHandle := s.domain.BindHandle()
sql, hash := parser.NormalizeDigest("select * from t where a > ?")
bindData := bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 1)
bind := bindData.Bindings[0]
c.Assert(bind.Source, Equals, bindinfo.Manual)

// Test Source for evolved sql
tk.MustExec("set @@tidb_evolve_plan_baselines=1")
tk.MustQuery("select * from t where a > 10")
bindHandle.SaveEvolveTasksToStore()
sql, hash = parser.NormalizeDigest("select * from t where a > ?")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a > ?")
c.Assert(len(bindData.Bindings), Equals, 2)
bind = bindData.Bindings[1]
c.Assert(bind.Source, Equals, bindinfo.Evolve)
tk.MustExec("set @@tidb_evolve_plan_baselines=0")

// Test Source for captured sqls
stmtsummary.StmtSummaryByDigestMap.Clear()
tk.MustExec("set @@tidb_capture_plan_baselines = on")
defer func() {
tk.MustExec("set @@tidb_capture_plan_baselines = off")
}()
tk.MustExec("use test")
c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil), IsTrue)
tk.MustExec("select * from t ignore index(idx_a) where a < 10")
tk.MustExec("select * from t ignore index(idx_a) where a < 10")
tk.MustExec("admin capture bindings")
bindHandle.CaptureBaselines()
sql, hash = parser.NormalizeDigest("select * from t where a < ?")
bindData = bindHandle.GetBindRecord(hash, sql, "test")
c.Check(bindData, NotNil)
c.Check(bindData.OriginalSQL, Equals, "select * from t where a < ?")
c.Assert(len(bindData.Bindings), Equals, 1)
bind = bindData.Bindings[0]
c.Assert(bind.Source, Equals, bindinfo.Capture)
}
7 changes: 7 additions & 0 deletions bindinfo/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,12 @@ const (
// Rejected means that the bind has been rejected after verify process.
// We can retry it after certain time has passed.
Rejected = "rejected"
// Manual indicates the binding is created by SQL like "create binding for ...".
Manual = "manual"
// Capture indicates the binding is captured by TiDB automatically.
Capture = "capture"
// Evolve indicates the binding is evolved by TiDB from old bindings.
Evolve = "evolve"
)

// Binding stores the basic bind hint info.
Expand All @@ -47,6 +53,7 @@ type Binding struct {
Status string
CreateTime types.Time
UpdateTime types.Time
Source string
Charset string
Collation string
// Hint is the parsed hints, it is used to bind hints to stmt node.
Expand Down
7 changes: 5 additions & 2 deletions bindinfo/handle.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ func (h *BindHandle) Update(fullLoad bool) (err error) {
lastUpdateTime := h.bindInfo.lastUpdateTime
h.bindInfo.Unlock()

sql := "select original_sql, bind_sql, default_db, status, create_time, update_time, charset, collation from mysql.bind_info"
sql := "select original_sql, bind_sql, default_db, status, create_time, update_time, charset, collation, source from mysql.bind_info"
if !fullLoad {
sql += " where update_time > \"" + lastUpdateTime.String() + "\""
}
Expand Down Expand Up @@ -462,6 +462,7 @@ func (h *BindHandle) newBindRecord(row chunk.Row) (string, *BindRecord, error) {
UpdateTime: row.GetTime(5),
Charset: row.GetString(6),
Collation: row.GetString(7),
Source: row.GetString(8),
}
bindRecord := &BindRecord{
OriginalSQL: row.GetString(0),
Expand Down Expand Up @@ -567,7 +568,7 @@ func (h *BindHandle) deleteBindInfoSQL(normdOrigSQL, db, bindSQL string) string
}

func (h *BindHandle) insertBindInfoSQL(orignalSQL string, db string, info Binding) string {
return fmt.Sprintf(`INSERT INTO mysql.bind_info VALUES (%s, %s, %s, %s, %s, %s, %s, %s)`,
return fmt.Sprintf(`INSERT INTO mysql.bind_info VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)`,
expression.Quote(orignalSQL),
expression.Quote(info.BindSQL),
expression.Quote(db),
Expand All @@ -576,6 +577,7 @@ func (h *BindHandle) insertBindInfoSQL(orignalSQL string, db string, info Bindin
expression.Quote(info.UpdateTime.String()),
expression.Quote(info.Charset),
expression.Quote(info.Collation),
expression.Quote(info.Source),
)
}

Expand Down Expand Up @@ -628,6 +630,7 @@ func (h *BindHandle) CaptureBaselines() {
Status: Using,
Charset: charset,
Collation: collation,
Source: Capture,
}
// We don't need to pass the `sctx` because the BindSQL has been validated already.
err = h.AddBindRecord(nil, &BindRecord{OriginalSQL: normalizedSQL, Db: dbName, Bindings: []Binding{binding}})
Expand Down
18 changes: 9 additions & 9 deletions cmd/explaintest/r/explain_easy.result
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,10 @@ id estRows task access object operator info
Union_17 26000.00 root
├─HashAgg_21 16000.00 root group by:Column#10, funcs:firstrow(Column#12)->Column#10
│ └─Union_22 16000.00 root
│ ├─StreamAgg_27 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#12
│ ├─StreamAgg_27 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#12, funcs:firstrow(test.t2.c1)->Column#10
│ │ └─IndexReader_40 10000.00 root index:IndexFullScan_39
│ │ └─IndexFullScan_39 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
│ └─StreamAgg_45 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#12
│ └─StreamAgg_45 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#12, funcs:firstrow(test.t2.c1)->Column#10
│ └─IndexReader_58 10000.00 root index:IndexFullScan_57
│ └─IndexFullScan_57 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
└─IndexReader_63 10000.00 root index:IndexFullScan_62
Expand All @@ -176,13 +176,13 @@ explain select c1 from t2 union all select c1 from t2 union select c1 from t2;
id estRows task access object operator info
HashAgg_18 24000.00 root group by:Column#10, funcs:firstrow(Column#11)->Column#10
└─Union_19 24000.00 root
├─StreamAgg_24 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11
├─StreamAgg_24 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11, funcs:firstrow(test.t2.c1)->Column#10
│ └─IndexReader_37 10000.00 root index:IndexFullScan_36
│ └─IndexFullScan_36 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
├─StreamAgg_42 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11
├─StreamAgg_42 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11, funcs:firstrow(test.t2.c1)->Column#10
│ └─IndexReader_55 10000.00 root index:IndexFullScan_54
│ └─IndexFullScan_54 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
└─StreamAgg_60 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11
└─StreamAgg_60 8000.00 root group by:test.t2.c1, funcs:firstrow(test.t2.c1)->Column#11, funcs:firstrow(test.t2.c1)->Column#10
└─IndexReader_73 10000.00 root index:IndexFullScan_72
└─IndexFullScan_72 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:true, stats:pseudo
select * from information_schema.tidb_indexes where table_name='t4';
Expand Down Expand Up @@ -669,17 +669,17 @@ id estRows task access object operator info
Sort_13 2.00 root Column#3
└─HashAgg_17 2.00 root group by:Column#3, funcs:firstrow(Column#6)->Column#3
└─Union_18 2.00 root
├─HashAgg_19 1.00 root group by:1, funcs:firstrow(0)->Column#6
├─HashAgg_19 1.00 root group by:1, funcs:firstrow(0)->Column#6, funcs:firstrow(0)->Column#3
│ └─TableDual_22 1.00 root rows:1
└─HashAgg_25 1.00 root group by:1, funcs:firstrow(1)->Column#6
└─HashAgg_25 1.00 root group by:1, funcs:firstrow(1)->Column#6, funcs:firstrow(1)->Column#3
└─TableDual_28 1.00 root rows:1
explain SELECT 0 AS a FROM dual UNION (SELECT 1 AS a FROM dual ORDER BY a);
id estRows task access object operator info
HashAgg_15 2.00 root group by:Column#3, funcs:firstrow(Column#6)->Column#3
└─Union_16 2.00 root
├─HashAgg_17 1.00 root group by:1, funcs:firstrow(0)->Column#6
├─HashAgg_17 1.00 root group by:1, funcs:firstrow(0)->Column#6, funcs:firstrow(0)->Column#3
│ └─TableDual_20 1.00 root rows:1
└─StreamAgg_27 1.00 root group by:Column#1, funcs:firstrow(Column#1)->Column#6
└─StreamAgg_27 1.00 root group by:Column#1, funcs:firstrow(Column#1)->Column#6, funcs:firstrow(Column#1)->Column#3
└─Projection_32 1.00 root 1->Column#1
└─TableDual_33 1.00 root rows:1
create table t (i int key, j int, unique key (i, j));
Expand Down
4 changes: 2 additions & 2 deletions cmd/explaintest/r/generated_columns.result
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ PARTITION p5 VALUES LESS THAN (6),
PARTITION max VALUES LESS THAN MAXVALUE);
EXPLAIN SELECT * FROM sgc3 WHERE a <= 1;
id estRows task access object operator info
Union_8 6646.67 root
PartitionUnion_8 6646.67 root
├─TableReader_11 3323.33 root data:Selection_10
│ └─Selection_10 3323.33 cop[tikv] le(test.sgc3.a, 1)
│ └─TableFullScan_9 10000.00 cop[tikv] table:sgc3, partition:p0 keep order:false, stats:pseudo
Expand All @@ -113,7 +113,7 @@ Union_8 6646.67 root
└─TableFullScan_12 10000.00 cop[tikv] table:sgc3, partition:p1 keep order:false, stats:pseudo
EXPLAIN SELECT * FROM sgc3 WHERE a < 7;
id estRows task access object operator info
Union_13 23263.33 root
PartitionUnion_13 23263.33 root
├─TableReader_16 3323.33 root data:Selection_15
│ └─Selection_15 3323.33 cop[tikv] lt(test.sgc3.a, 7)
│ └─TableFullScan_14 10000.00 cop[tikv] table:sgc3, partition:p0 keep order:false, stats:pseudo
Expand Down
Loading

0 comments on commit d6f40c3

Please sign in to comment.