diff --git a/.golangci.yml b/.golangci.yml index d262ed0e0457b..60670adf3c311 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,5 @@ run: - timeout: 7m + timeout: 10m linters: disable-all: true enable: diff --git a/bindinfo/bind_serial_test.go b/bindinfo/bind_serial_test.go index 7cf49c087278f..53b674a8ec715 100644 --- a/bindinfo/bind_serial_test.go +++ b/bindinfo/bind_serial_test.go @@ -16,7 +16,9 @@ package bindinfo_test import ( "context" + "crypto/tls" "fmt" + "strconv" "testing" "github.com/pingcap/tidb/bindinfo" @@ -26,10 +28,320 @@ import ( "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/terror" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/util" "github.com/stretchr/testify/require" ) +// mockSessionManager is a mocked session manager which is used for test. +type mockSessionManager1 struct { + PS []*util.ProcessInfo +} + +func (msm *mockSessionManager1) ShowTxnList() []*txninfo.TxnInfo { + return nil +} + +// ShowProcessList implements the SessionManager.ShowProcessList interface. +func (msm *mockSessionManager1) ShowProcessList() map[uint64]*util.ProcessInfo { + ret := make(map[uint64]*util.ProcessInfo) + for _, item := range msm.PS { + ret[item.ID] = item + } + return ret +} + +func (msm *mockSessionManager1) GetProcessInfo(id uint64) (*util.ProcessInfo, bool) { + for _, item := range msm.PS { + if item.ID == id { + return item, true + } + } + return &util.ProcessInfo{}, false +} + +// Kill implements the SessionManager.Kill interface. +func (msm *mockSessionManager1) Kill(cid uint64, query bool) { +} + +func (msm *mockSessionManager1) KillAllConnections() { +} + +func (msm *mockSessionManager1) UpdateTLSConfig(cfg *tls.Config) { +} + +func (msm *mockSessionManager1) ServerID() uint64 { + return 1 +} + +func TestPrepareCacheWithBinding(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1(a int, b int, c int, key idx_b(b), key idx_c(c))") + tk.MustExec("create table t2(a int, b int, c int, key idx_b(b), key idx_c(c))") + + // TestDMLSQLBind + tk.MustExec("prepare stmt1 from 'delete from t1 where b = 1 and c > 1';") + tk.MustExec("execute stmt1;") + require.Equal(t, "t1:idx_b", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res := tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_b(b)"), res.Rows()) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for delete from t1 where b = 1 and c > 1 using delete /*+ use_index(t1,idx_c) */ from t1 where b = 1 and c > 1") + + tk.MustExec("execute stmt1;") + require.Equal(t, "t1:idx_c", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_c(c)"), res.Rows()) + + tk.MustExec("prepare stmt2 from 'delete t1, t2 from t1 inner join t2 on t1.b = t2.b';") + tk.MustExec("execute stmt2;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "HashJoin"), res.Rows()) + tk.MustExec("execute stmt2;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for delete t1, t2 from t1 inner join t2 on t1.b = t2.b using delete /*+ inl_join(t1) */ t1, t2 from t1 inner join t2 on t1.b = t2.b") + + tk.MustExec("execute stmt2;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "IndexJoin"), res.Rows()) + + tk.MustExec("prepare stmt3 from 'update t1 set a = 1 where b = 1 and c > 1';") + tk.MustExec("execute stmt3;") + require.Equal(t, "t1:idx_b", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_b(b)"), res.Rows()) + tk.MustExec("execute stmt3;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for update t1 set a = 1 where b = 1 and c > 1 using update /*+ use_index(t1,idx_c) */ t1 set a = 1 where b = 1 and c > 1") + + tk.MustExec("execute stmt3;") + require.Equal(t, "t1:idx_c", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_c(c)"), res.Rows()) + + tk.MustExec("prepare stmt4 from 'update t1, t2 set t1.a = 1 where t1.b = t2.b';") + tk.MustExec("execute stmt4;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "HashJoin"), res.Rows()) + tk.MustExec("execute stmt4;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for update t1, t2 set t1.a = 1 where t1.b = t2.b using update /*+ inl_join(t1) */ t1, t2 set t1.a = 1 where t1.b = t2.b") + + tk.MustExec("execute stmt4;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "IndexJoin"), res.Rows()) + + tk.MustExec("prepare stmt5 from 'insert into t1 select * from t2 where t2.b = 2 and t2.c > 2';") + tk.MustExec("execute stmt5;") + require.Equal(t, "t2:idx_b", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_b(b)"), res.Rows()) + tk.MustExec("execute stmt5;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1 using insert /*+ use_index(t2,idx_c) */ into t1 select * from t2 where t2.b = 1 and t2.c > 1") + + tk.MustExec("execute stmt5;") + require.Equal(t, "t2:idx_b", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_b(b)"), res.Rows()) + + tk.MustExec("drop global binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1") + tk.MustExec("create global binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1 using insert into t1 select /*+ use_index(t2,idx_c) */ * from t2 where t2.b = 1 and t2.c > 1") + + tk.MustExec("execute stmt5;") + require.Equal(t, "t2:idx_c", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_c(c)"), res.Rows()) + + tk.MustExec("prepare stmt6 from 'replace into t1 select * from t2 where t2.b = 2 and t2.c > 2';") + tk.MustExec("execute stmt6;") + require.Equal(t, "t2:idx_b", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_b(b)"), res.Rows()) + tk.MustExec("execute stmt6;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for replace into t1 select * from t2 where t2.b = 1 and t2.c > 1 using replace into t1 select /*+ use_index(t2,idx_c) */ * from t2 where t2.b = 1 and t2.c > 1") + + tk.MustExec("execute stmt6;") + require.Equal(t, "t2:idx_c", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_c(c)"), res.Rows()) + + // TestExplain + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1(id int)") + tk.MustExec("create table t2(id int)") + + tk.MustExec("prepare stmt1 from 'SELECT * from t1,t2 where t1.id = t2.id';") + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "HashJoin")) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("prepare stmt2 from 'SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id';") + tk.MustExec("execute stmt2;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "MergeJoin")) + tk.MustExec("execute stmt2;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for SELECT * from t1,t2 where t1.id = t2.id using SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id") + + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "MergeJoin")) + + tk.MustExec("drop global binding for SELECT * from t1,t2 where t1.id = t2.id") + + tk.MustExec("create index index_id on t1(id)") + tk.MustExec("prepare stmt1 from 'SELECT * from t1 use index(index_id)';") + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "IndexReader")) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for SELECT * from t1 using SELECT * from t1 ignore index(index_id)") + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.False(t, tk.HasPlan4ExplainFor(res, "IndexReader")) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + // Add test for SetOprStmt + tk.MustExec("prepare stmt1 from 'SELECT * from t1 union SELECT * from t1';") + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.False(t, tk.HasPlan4ExplainFor(res, "IndexReader")) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("prepare stmt2 from 'SELECT * from t1 use index(index_id) union SELECT * from t1';") + tk.MustExec("execute stmt2;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "IndexReader")) + tk.MustExec("execute stmt2;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for SELECT * from t1 union SELECT * from t1 using SELECT * from t1 use index(index_id) union SELECT * from t1") + + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "IndexReader")) + + tk.MustExec("drop global binding for SELECT * from t1 union SELECT * from t1") + + // TestBindingSymbolList + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, INDEX ia (a), INDEX ib (b));") + tk.MustExec("insert into t value(1, 1);") + tk.MustExec("prepare stmt1 from 'select a, b from t where a = 3 limit 1, 100';") + tk.MustExec("execute stmt1;") + require.Equal(t, "t:ia", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "ia(a)"), res.Rows()) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec(`create global binding for select a, b from t where a = 1 limit 0, 1 using select a, b from t use index (ib) where a = 1 limit 0, 1`) + + // after binding + tk.MustExec("execute stmt1;") + require.Equal(t, "t:ib", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "ib(b)"), res.Rows()) +} + func TestExplain(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() diff --git a/executor/admin_serial_test.go b/executor/admin_serial_test.go deleted file mode 100644 index c9a60228ac7f8..0000000000000 --- a/executor/admin_serial_test.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "context" - "testing" - - "github.com/pingcap/tidb/executor" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/table/tables" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/mock" - "github.com/stretchr/testify/require" -) - -func TestAdminCheckTableFailed(t *testing.T) { - store, domain, clean := testkit.CreateMockStoreAndDomain(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists admin_test") - tk.MustExec("create table admin_test (c1 int, c2 int, c3 varchar(255) default '1', primary key(c1), key(c3), unique key(c2), key(c2, c3))") - tk.MustExec("insert admin_test (c1, c2, c3) values (-10, -20, 'y'), (-1, -10, 'z'), (1, 11, 'a'), (2, 12, 'b'), (5, 15, 'c'), (10, 20, 'd'), (20, 30, 'e')") - - // Make some corrupted index. Build the index information. - ctx := mock.NewContext() - ctx.Store = store - is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") - tbl, err := is.TableByName(dbName, tblName) - require.NoError(t, err) - tblInfo := tbl.Meta() - idxInfo := tblInfo.Indices[1] - indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) - sc := ctx.GetSessionVars().StmtCtx - tk.Session().GetSessionVars().IndexLookupSize = 3 - tk.Session().GetSessionVars().MaxChunkSize = 3 - - // Reduce one row of index. - // Table count > index count. - // Index c2 is missing 11. - txn, err := store.Begin() - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(-10), kv.IntHandle(-1)) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8003]admin_test err:[admin:8223]index: != record:&admin.RecordData{Handle:-1, Values:[]types.Datum{types.Datum{k:0x1, decimal:0x0, length:0x0, i:-10, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}}}") - require.True(t, executor.ErrAdminCheckTable.Equal(err)) - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8003]admin_test err:[admin:8223]index:\"?\" != record:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - r := tk.MustQuery("admin recover index admin_test c2") - r.Check(testkit.Rows("1 7")) - tk.MustExec("admin check table admin_test") - - // Add one row of index. - // Table count < index count. - // Index c2 has one more values than table data: 0, and the handle 0 hasn't correlative record. - txn, err = store.Begin() - require.NoError(t, err) - _, err = indexOpr.Create(ctx, txn, types.MakeDatums(0), kv.IntHandle(0), nil) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8133]handle 0, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:0, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8133]handle \"?\", index:\"?\" != record:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Add one row of index. - // Table count < index count. - // Index c2 has two more values than table data: 10, 13, and these handles have correlative record. - txn, err = store.Begin() - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(0), kv.IntHandle(0)) - require.NoError(t, err) - // Make sure the index value "19" is smaller "21". Then we scan to "19" before "21". - _, err = indexOpr.Create(ctx, txn, types.MakeDatums(19), kv.IntHandle(10), nil) - require.NoError(t, err) - _, err = indexOpr.Create(ctx, txn, types.MakeDatums(13), kv.IntHandle(2), nil) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle 2, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:13, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:12, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Table count = index count. - // Two indices have the same handle. - txn, err = store.Begin() - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(13), kv.IntHandle(2)) - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(12), kv.IntHandle(2)) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Table count = index count. - // Index c2 has one line of data is 19, the corresponding table data is 20. - txn, err = store.Begin() - require.NoError(t, err) - _, err = indexOpr.Create(ctx, txn, types.MakeDatums(12), kv.IntHandle(2), nil) - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(20), kv.IntHandle(10)) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Recover records. - txn, err = store.Begin() - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(19), kv.IntHandle(10)) - require.NoError(t, err) - _, err = indexOpr.Create(ctx, txn, types.MakeDatums(20), kv.IntHandle(10), nil) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - tk.MustExec("admin check table admin_test") -} diff --git a/executor/admin_test.go b/executor/admin_test.go index 2257060782b41..72aa436babe9b 100644 --- a/executor/admin_test.go +++ b/executor/admin_test.go @@ -1120,3 +1120,141 @@ func TestAdminCheckWithSnapshot(t *testing.T) { tk.MustExec("admin check index admin_t_s a;") tk.MustExec("drop table if exists admin_t_s") } + +func TestAdminCheckTableFailed(t *testing.T) { + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists admin_test") + tk.MustExec("create table admin_test (c1 int, c2 int, c3 varchar(255) default '1', primary key(c1), key(c3), unique key(c2), key(c2, c3))") + tk.MustExec("insert admin_test (c1, c2, c3) values (-10, -20, 'y'), (-1, -10, 'z'), (1, 11, 'a'), (2, 12, 'b'), (5, 15, 'c'), (10, 20, 'd'), (20, 30, 'e')") + + // Make some corrupted index. Build the index information. + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() + dbName := model.NewCIStr("test") + tblName := model.NewCIStr("admin_test") + tbl, err := is.TableByName(dbName, tblName) + require.NoError(t, err) + tblInfo := tbl.Meta() + idxInfo := tblInfo.Indices[1] + indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) + sc := ctx.GetSessionVars().StmtCtx + tk.Session().GetSessionVars().IndexLookupSize = 3 + tk.Session().GetSessionVars().MaxChunkSize = 3 + + // Reduce one row of index. + // Table count > index count. + // Index c2 is missing 11. + txn, err := store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(-10), kv.IntHandle(-1)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8003]admin_test err:[admin:8223]index: != record:&admin.RecordData{Handle:-1, Values:[]types.Datum{types.Datum{k:0x1, decimal:0x0, length:0x0, i:-10, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}}}") + require.True(t, executor.ErrAdminCheckTable.Equal(err)) + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8003]admin_test err:[admin:8223]index:\"?\" != record:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + r := tk.MustQuery("admin recover index admin_test c2") + r.Check(testkit.Rows("1 7")) + tk.MustExec("admin check table admin_test") + + // Add one row of index. + // Table count < index count. + // Index c2 has one more values than table data: 0, and the handle 0 hasn't correlative record. + txn, err = store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(0), kv.IntHandle(0), nil) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8133]handle 0, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:0, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8133]handle \"?\", index:\"?\" != record:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Add one row of index. + // Table count < index count. + // Index c2 has two more values than table data: 10, 13, and these handles have correlative record. + txn, err = store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(0), kv.IntHandle(0)) + require.NoError(t, err) + // Make sure the index value "19" is smaller "21". Then we scan to "19" before "21". + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(19), kv.IntHandle(10), nil) + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(13), kv.IntHandle(2), nil) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle 2, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:13, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:12, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Table count = index count. + // Two indices have the same handle. + txn, err = store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(13), kv.IntHandle(2)) + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(12), kv.IntHandle(2)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Table count = index count. + // Index c2 has one line of data is 19, the corresponding table data is 20. + txn, err = store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(12), kv.IntHandle(2), nil) + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(20), kv.IntHandle(10)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Recover records. + txn, err = store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(19), kv.IntHandle(10)) + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(20), kv.IntHandle(10), nil) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + tk.MustExec("admin check table admin_test") +} diff --git a/executor/aggregate_serial_test.go b/executor/aggregate_serial_test.go deleted file mode 100644 index 114b444a91979..0000000000000 --- a/executor/aggregate_serial_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/util/sqlexec" - "github.com/stretchr/testify/require" -) - -func TestAggInDisk(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_hashagg_final_concurrency = 1;") - tk.MustExec("set tidb_hashagg_partial_concurrency = 1;") - tk.MustExec("set tidb_mem_quota_query = 4194304") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(a int)") - sql := "insert into t values (0)" - for i := 1; i <= 200; i++ { - sql += fmt.Sprintf(",(%v)", i) - } - sql += ";" - tk.MustExec(sql) - rows := tk.MustQuery("desc analyze select /*+ HASH_AGG() */ avg(t1.a) from t t1 join t t2 group by t1.a, t2.a;").Rows() - for _, row := range rows { - length := len(row) - line := fmt.Sprintf("%v", row) - disk := fmt.Sprintf("%v", row[length-1]) - if strings.Contains(line, "HashAgg") { - require.False(t, strings.Contains(disk, "0 Bytes")) - require.True(t, strings.Contains(disk, "MB") || - strings.Contains(disk, "KB") || - strings.Contains(disk, "Bytes")) - } - } - - // Add code cover - // Test spill chunk. Add a line to avoid tmp spill chunk is always full. - tk.MustExec("insert into t values(0)") - tk.MustQuery("select sum(tt.b) from ( select /*+ HASH_AGG() */ avg(t1.a) as b from t t1 join t t2 group by t1.a, t2.a) as tt").Check( - testkit.Rows("4040100.0000")) - // Test no groupby and no data. - tk.MustExec("drop table t;") - tk.MustExec("create table t(c int, c1 int);") - tk.MustQuery("select /*+ HASH_AGG() */ count(c) from t;").Check(testkit.Rows("0")) - tk.MustQuery("select /*+ HASH_AGG() */ count(c) from t group by c1;").Check(testkit.Rows()) -} - -func TestRandomPanicAggConsume(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@tidb_max_chunk_size=32") - tk.MustExec("set @@tidb_init_chunk_size=1") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - for i := 0; i <= 1000; i++ { - tk.MustExec(fmt.Sprintf("insert into t values(%v),(%v),(%v)", i, i, i)) - } - - fpName := "github.com/pingcap/tidb/executor/ConsumeRandomPanic" - require.NoError(t, failpoint.Enable(fpName, "5%panic(\"ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]\")")) - defer func() { - require.NoError(t, failpoint.Disable(fpName)) - }() - - // Test 10 times panic for each AggExec. - var res sqlexec.RecordSet - for i := 1; i <= 10; i++ { - var err error - for err == nil { - // Test paralleled hash agg. - res, err = tk.Exec("select /*+ HASH_AGG() */ count(a) from t group by a") - if err == nil { - _, err = session.GetRows4Test(context.Background(), tk.Session(), res) - require.NoError(t, res.Close()) - } - } - require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") - - err = nil - for err == nil { - // Test unparalleled hash agg. - res, err = tk.Exec("select /*+ HASH_AGG() */ count(distinct a) from t") - if err == nil { - _, err = session.GetRows4Test(context.Background(), tk.Session(), res) - require.NoError(t, res.Close()) - } - } - require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") - - err = nil - for err == nil { - // Test stream agg. - res, err = tk.Exec("select /*+ STREAM_AGG() */ count(a) from t") - if err == nil { - _, err = session.GetRows4Test(context.Background(), tk.Session(), res) - require.NoError(t, res.Close()) - } - } - require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") - } -} diff --git a/executor/aggregate_test.go b/executor/aggregate_test.go index 25b9bd96840e9..a99d7e71a69f4 100644 --- a/executor/aggregate_test.go +++ b/executor/aggregate_test.go @@ -15,6 +15,7 @@ package executor_test import ( + "context" "fmt" "math" "math/rand" @@ -26,12 +27,15 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/parser/terror" plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/testkit/testdata" + "github.com/pingcap/tidb/util/sqlexec" "github.com/stretchr/testify/require" ) @@ -1494,3 +1498,101 @@ func TestIssue23314(t *testing.T) { res := tk.MustQuery("select col1 from t1 group by col1") res.Check(testkit.Rows("16:40:20.01")) } + +func TestAggInDisk(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set tidb_hashagg_final_concurrency = 1;") + tk.MustExec("set tidb_hashagg_partial_concurrency = 1;") + tk.MustExec("set tidb_mem_quota_query = 4194304") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t(a int)") + sql := "insert into t values (0)" + for i := 1; i <= 200; i++ { + sql += fmt.Sprintf(",(%v)", i) + } + sql += ";" + tk.MustExec(sql) + rows := tk.MustQuery("desc analyze select /*+ HASH_AGG() */ avg(t1.a) from t t1 join t t2 group by t1.a, t2.a;").Rows() + for _, row := range rows { + length := len(row) + line := fmt.Sprintf("%v", row) + disk := fmt.Sprintf("%v", row[length-1]) + if strings.Contains(line, "HashAgg") { + require.False(t, strings.Contains(disk, "0 Bytes")) + require.True(t, strings.Contains(disk, "MB") || + strings.Contains(disk, "KB") || + strings.Contains(disk, "Bytes")) + } + } + + // Add code cover + // Test spill chunk. Add a line to avoid tmp spill chunk is always full. + tk.MustExec("insert into t values(0)") + tk.MustQuery("select sum(tt.b) from ( select /*+ HASH_AGG() */ avg(t1.a) as b from t t1 join t t2 group by t1.a, t2.a) as tt").Check( + testkit.Rows("4040100.0000")) + // Test no groupby and no data. + tk.MustExec("drop table t;") + tk.MustExec("create table t(c int, c1 int);") + tk.MustQuery("select /*+ HASH_AGG() */ count(c) from t;").Check(testkit.Rows("0")) + tk.MustQuery("select /*+ HASH_AGG() */ count(c) from t group by c1;").Check(testkit.Rows()) +} + +func TestRandomPanicAggConsume(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_max_chunk_size=32") + tk.MustExec("set @@tidb_init_chunk_size=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + for i := 0; i <= 1000; i++ { + tk.MustExec(fmt.Sprintf("insert into t values(%v),(%v),(%v)", i, i, i)) + } + + fpName := "github.com/pingcap/tidb/executor/ConsumeRandomPanic" + require.NoError(t, failpoint.Enable(fpName, "5%panic(\"ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]\")")) + defer func() { + require.NoError(t, failpoint.Disable(fpName)) + }() + + // Test 10 times panic for each AggExec. + var res sqlexec.RecordSet + for i := 1; i <= 10; i++ { + var err error + for err == nil { + // Test paralleled hash agg. + res, err = tk.Exec("select /*+ HASH_AGG() */ count(a) from t group by a") + if err == nil { + _, err = session.GetRows4Test(context.Background(), tk.Session(), res) + require.NoError(t, res.Close()) + } + } + require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") + + err = nil + for err == nil { + // Test unparalleled hash agg. + res, err = tk.Exec("select /*+ HASH_AGG() */ count(distinct a) from t") + if err == nil { + _, err = session.GetRows4Test(context.Background(), tk.Session(), res) + require.NoError(t, res.Close()) + } + } + require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") + + err = nil + for err == nil { + // Test stream agg. + res, err = tk.Exec("select /*+ STREAM_AGG() */ count(a) from t") + if err == nil { + _, err = session.GetRows4Test(context.Background(), tk.Session(), res) + require.NoError(t, res.Close()) + } + } + require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") + } +} diff --git a/executor/analyze_serial_test.go b/executor/analyze_serial_test.go deleted file mode 100644 index 3cb412c55b65a..0000000000000 --- a/executor/analyze_serial_test.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/statistics" - "github.com/pingcap/tidb/statistics/handle" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/codec" - "github.com/pingcap/tidb/util/collate" - "github.com/stretchr/testify/require" -) - -func TestFastAnalyze4GlobalStats(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`create database if not exists test_fast_gstats`) - tk.MustExec("use test_fast_gstats") - tk.MustExec("set @@session.tidb_enable_fast_analyze=1") - tk.MustExec("set @@session.tidb_build_stats_concurrency=1") - // test fast analyze in dynamic mode - tk.MustExec("set @@session.tidb_analyze_version = 2;") - tk.MustExec("set @@session.tidb_partition_prune_mode = 'dynamic';") - tk.MustExec("drop table if exists test_fast_gstats;") - tk.MustExec("create table test_fast_gstats(a int, b int) PARTITION BY HASH(a) PARTITIONS 2;") - tk.MustExec("insert into test_fast_gstats values(1,1),(3,3),(4,4),(2,2),(5,5);") - err := tk.ExecToErr("analyze table test_fast_gstats;") - require.EqualError(t, err, "Fast analyze hasn't reached General Availability and only support analyze version 1 currently.") -} - -func TestAnalyzeIndex(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1 (id int, v int, primary key(id), index k(v))") - tk.MustExec("insert into t1(id, v) values(1, 2), (2, 2), (3, 2), (4, 2), (5, 1), (6, 3), (7, 4)") - tk.MustExec("set @@tidb_analyze_version=1") - tk.MustExec("analyze table t1 index k") - require.Greater(t, len(tk.MustQuery("show stats_buckets where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 0) - tk.MustExec("set @@tidb_analyze_version=default") - tk.MustExec("analyze table t1") - require.Greater(t, len(tk.MustQuery("show stats_topn where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 0) - - func() { - defer tk.MustExec("set @@session.tidb_enable_fast_analyze=0") - tk.MustExec("drop stats t1") - tk.MustExec("set @@session.tidb_enable_fast_analyze=1") - tk.MustExec("set @@tidb_analyze_version=1") - tk.MustExec("analyze table t1 index k") - require.Greater(t, len(tk.MustQuery("show stats_buckets where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 1) - }() -} - -func TestAnalyzeIncremental(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - dom, err := session.BootstrapSession(store) - require.NoError(t, err) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@tidb_analyze_version = 1") - tk.Session().GetSessionVars().EnableStreaming = false - testAnalyzeIncremental(tk, t, dom) -} - -func TestAnalyzeIncrementalStreaming(t *testing.T) { - t.Skip("unistore hasn't support streaming yet.") - store, clean := testkit.CreateMockStore(t) - dom, err := session.BootstrapSession(store) - require.NoError(t, err) - - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.Session().GetSessionVars().EnableStreaming = true - testAnalyzeIncremental(tk, t, dom) -} - -func testAnalyzeIncremental(tk *testkit.TestKit, t *testing.T, dom *domain.Domain) { - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int, primary key(a), index idx(b))") - tk.MustExec("analyze incremental table t index") - tk.MustQuery("show stats_buckets").Check(testkit.Rows()) - tk.MustExec("insert into t values (1,1)") - tk.MustExec("analyze incremental table t index") - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t idx 1 0 1 1 1 1 0")) - tk.MustExec("insert into t values (2,2)") - tk.MustExec("analyze incremental table t index") - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) - tk.MustExec("analyze incremental table t index") - // Result should not change. - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) - - // Test analyze incremental with feedback. - tk.MustExec("insert into t values (3,3)") - oriProbability := statistics.FeedbackProbability.Load() - oriMinLogCount := handle.MinLogScanCount.Load() - defer func() { - statistics.FeedbackProbability.Store(oriProbability) - handle.MinLogScanCount.Store(oriMinLogCount) - }() - statistics.FeedbackProbability.Store(1) - handle.MinLogScanCount.Store(0) - is := dom.InfoSchema() - table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - require.NoError(t, err) - tblInfo := table.Meta() - tk.MustQuery("select * from t use index(idx) where b = 3") - tk.MustQuery("select * from t where a > 1") - h := dom.StatsHandle() - require.NoError(t, h.DumpStatsDeltaToKV(handle.DumpAll)) - require.NoError(t, h.DumpStatsFeedbackToKV()) - require.NoError(t, h.HandleUpdateStats(is)) - require.NoError(t, h.Update(is)) - require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 3 0 2 2147483647 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) - tblStats := h.GetTableStats(tblInfo) - val, err := codec.EncodeKey(tk.Session().GetSessionVars().StmtCtx, nil, types.NewIntDatum(3)) - require.NoError(t, err) - require.Equal(t, uint64(1), tblStats.Indices[tblInfo.Indices[0].ID].QueryBytes(val)) - require.False(t, statistics.IsAnalyzed(tblStats.Indices[tblInfo.Indices[0].ID].Flag)) - require.False(t, statistics.IsAnalyzed(tblStats.Columns[tblInfo.Columns[0].ID].Flag)) - - tk.MustExec("analyze incremental table t index") - require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t a 0 2 3 1 3 3 0", - "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0", "test t idx 1 2 3 1 3 3 0")) - tblStats = h.GetTableStats(tblInfo) - require.Equal(t, uint64(1), tblStats.Indices[tblInfo.Indices[0].ID].QueryBytes(val)) - - // test analyzeIndexIncremental for global-level stats; - tk.MustExec("set @@session.tidb_analyze_version = 1;") - tk.MustQuery("select @@tidb_analyze_version").Check(testkit.Rows("1")) - tk.MustExec("set @@tidb_partition_prune_mode = 'static';") - tk.MustExec("drop table if exists t;") - tk.MustExec(`create table t (a int, b int, primary key(a), index idx(b)) partition by range (a) ( - partition p0 values less than (10), - partition p1 values less than (20), - partition p2 values less than (30) - );`) - tk.MustExec("analyze incremental table t index") - require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows()) - tk.MustExec("insert into t values (1,1)") - tk.MustExec("analyze incremental table t index") - tk.MustQuery("show warnings").Check(testkit.Rows()) // no warning - require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 idx 1 0 1 1 1 1 0")) - tk.MustExec("insert into t values (2,2)") - tk.MustExec("analyze incremental table t index") - require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 a 0 1 2 1 2 2 0", "test t p0 idx 1 0 1 1 1 1 0", "test t p0 idx 1 1 2 1 2 2 0")) - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic';") - tk.MustExec("insert into t values (11,11)") - err = tk.ExecToErr("analyze incremental table t index") - require.Equal(t, "[stats]: global statistics for partitioned tables unavailable in ANALYZE INCREMENTAL", err.Error()) -} - -func TestIssue27429(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table test.t(id int, value varchar(20) charset utf8mb4 collate utf8mb4_general_ci, value1 varchar(20) charset utf8mb4 collate utf8mb4_bin)") - tk.MustExec("insert into test.t values (1, 'abc', 'abc '),(4, 'Abc', 'abc'),(3,'def', 'def ');") - - tk.MustQuery("select upper(group_concat(distinct value order by 1)) from test.t;").Check(testkit.Rows("ABC,DEF")) - tk.MustQuery("select upper(group_concat(distinct value)) from test.t;").Check(testkit.Rows("ABC,DEF")) -} - -func TestIssue20874(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("delete from mysql.stats_histograms") - tk.MustExec("create table t (a char(10) collate utf8mb4_unicode_ci not null, b char(20) collate utf8mb4_general_ci not null, key idxa(a), key idxb(b))") - tk.MustExec("insert into t values ('#', 'C'), ('$', 'c'), ('a', 'a')") - tk.MustExec("set @@tidb_analyze_version=1") - tk.MustExec("analyze table t") - tk.MustQuery("show stats_buckets where db_name = 'test' and table_name = 't'").Sort().Check(testkit.Rows( - "test t a 0 0 1 1 \x02\xd2 \x02\xd2 0", - "test t a 0 1 2 1 \x0e\x0f \x0e\x0f 0", - "test t a 0 2 3 1 \x0e3 \x0e3 0", - "test t b 0 0 1 1 \x00A \x00A 0", - "test t b 0 1 3 2 \x00C \x00C 0", - "test t idxa 1 0 1 1 \x02\xd2 \x02\xd2 0", - "test t idxa 1 1 2 1 \x0e\x0f \x0e\x0f 0", - "test t idxa 1 2 3 1 \x0e3 \x0e3 0", - "test t idxb 1 0 1 1 \x00A \x00A 0", - "test t idxb 1 1 3 2 \x00C \x00C 0", - )) - tk.MustQuery("select is_index, hist_id, distinct_count, null_count, tot_col_size, stats_ver, correlation from mysql.stats_histograms").Sort().Check(testkit.Rows( - "0 1 3 0 9 1 1", - "0 2 2 0 9 1 -0.5", - "1 1 3 0 0 1 0", - "1 2 2 0 0 1 0", - )) - tk.MustExec("set @@tidb_analyze_version=2") - tk.MustExec("analyze table t") - tk.MustQuery("show stats_topn where db_name = 'test' and table_name = 't'").Sort().Check(testkit.Rows( - "test t a 0 \x02\xd2 1", - "test t a 0 \x0e\x0f 1", - "test t a 0 \x0e3 1", - "test t b 0 \x00A 1", - "test t b 0 \x00C 2", - "test t idxa 1 \x02\xd2 1", - "test t idxa 1 \x0e\x0f 1", - "test t idxa 1 \x0e3 1", - "test t idxb 1 \x00A 1", - "test t idxb 1 \x00C 2", - )) - tk.MustQuery("select is_index, hist_id, distinct_count, null_count, tot_col_size, stats_ver, correlation from mysql.stats_histograms").Sort().Check(testkit.Rows( - "0 1 3 0 6 2 1", - "0 2 2 0 6 2 -0.5", - "1 1 3 0 6 2 0", - "1 2 2 0 6 2 0", - )) -} - -func TestAnalyzeClusteredIndexPrimary(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t0") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t0(a varchar(20), primary key(a) clustered)") - tk.MustExec("create table t1(a varchar(20), primary key(a))") - tk.MustExec("insert into t0 values('1111')") - tk.MustExec("insert into t1 values('1111')") - tk.MustExec("set @@session.tidb_analyze_version = 1") - tk.MustExec("analyze table t0 index primary") - tk.MustExec("analyze table t1 index primary") - tk.MustQuery("show stats_buckets").Check(testkit.Rows( - "test t0 PRIMARY 1 0 1 1 1111 1111 0", - "test t1 PRIMARY 1 0 1 1 1111 1111 0")) - tk.MustExec("set @@session.tidb_analyze_version = 2") - tk.MustExec("analyze table t0") - tk.MustExec("analyze table t1") - tk.MustQuery("show stats_topn").Sort().Check(testkit.Rows(""+ - "test t0 PRIMARY 1 1111 1", - "test t0 a 0 1111 1", - "test t1 PRIMARY 1 1111 1", - "test t1 a 0 1111 1")) -} - -func TestAnalyzeSamplingWorkPanic(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_analyze_version = 2") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12)") - tk.MustExec("split table t between (-9223372036854775808) and (9223372036854775807) regions 12") - - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingBuildWorkerPanic", "return(1)")) - err := tk.ExecToErr("analyze table t") - require.NotNil(t, err) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingBuildWorkerPanic")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingMergeWorkerPanic", "return(1)")) - err = tk.ExecToErr("analyze table t") - require.NotNil(t, err) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingMergeWorkerPanic")) -} diff --git a/executor/analyze_test.go b/executor/analyze_test.go index 29d2aca24ee68..3ee10fe9a3626 100644 --- a/executor/analyze_test.go +++ b/executor/analyze_test.go @@ -962,3 +962,274 @@ func TestAdjustSampleRateNote(t *testing.T) { tk.MustExec("analyze table t") tk.MustQuery("show warnings").Check(testkit.Rows("Note 1105 Analyze use auto adjusted sample rate 1.000000 for table test.t.")) } + +func TestFastAnalyze4GlobalStats(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec(`create database if not exists test_fast_gstats`) + tk.MustExec("use test_fast_gstats") + tk.MustExec("set @@session.tidb_enable_fast_analyze=1") + tk.MustExec("set @@session.tidb_build_stats_concurrency=1") + // test fast analyze in dynamic mode + tk.MustExec("set @@session.tidb_analyze_version = 2;") + tk.MustExec("set @@session.tidb_partition_prune_mode = 'dynamic';") + tk.MustExec("drop table if exists test_fast_gstats;") + tk.MustExec("create table test_fast_gstats(a int, b int) PARTITION BY HASH(a) PARTITIONS 2;") + tk.MustExec("insert into test_fast_gstats values(1,1),(3,3),(4,4),(2,2),(5,5);") + err := tk.ExecToErr("analyze table test_fast_gstats;") + require.EqualError(t, err, "Fast analyze hasn't reached General Availability and only support analyze version 1 currently.") +} + +func TestAnalyzeIndex(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (id int, v int, primary key(id), index k(v))") + tk.MustExec("insert into t1(id, v) values(1, 2), (2, 2), (3, 2), (4, 2), (5, 1), (6, 3), (7, 4)") + tk.MustExec("set @@tidb_analyze_version=1") + tk.MustExec("analyze table t1 index k") + require.Greater(t, len(tk.MustQuery("show stats_buckets where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 0) + tk.MustExec("set @@tidb_analyze_version=default") + tk.MustExec("analyze table t1") + require.Greater(t, len(tk.MustQuery("show stats_topn where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 0) + + func() { + defer tk.MustExec("set @@session.tidb_enable_fast_analyze=0") + tk.MustExec("drop stats t1") + tk.MustExec("set @@session.tidb_enable_fast_analyze=1") + tk.MustExec("set @@tidb_analyze_version=1") + tk.MustExec("analyze table t1 index k") + require.Greater(t, len(tk.MustQuery("show stats_buckets where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 1) + }() +} + +func TestAnalyzeIncremental(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + dom, err := session.BootstrapSession(store) + require.NoError(t, err) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_analyze_version = 1") + tk.Session().GetSessionVars().EnableStreaming = false + testAnalyzeIncremental(tk, t, dom) +} + +func TestAnalyzeIncrementalStreaming(t *testing.T) { + t.Skip("unistore hasn't support streaming yet.") + store, clean := testkit.CreateMockStore(t) + dom, err := session.BootstrapSession(store) + require.NoError(t, err) + + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.Session().GetSessionVars().EnableStreaming = true + testAnalyzeIncremental(tk, t, dom) +} + +func testAnalyzeIncremental(tk *testkit.TestKit, t *testing.T, dom *domain.Domain) { + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, primary key(a), index idx(b))") + tk.MustExec("analyze incremental table t index") + tk.MustQuery("show stats_buckets").Check(testkit.Rows()) + tk.MustExec("insert into t values (1,1)") + tk.MustExec("analyze incremental table t index") + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t idx 1 0 1 1 1 1 0")) + tk.MustExec("insert into t values (2,2)") + tk.MustExec("analyze incremental table t index") + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) + tk.MustExec("analyze incremental table t index") + // Result should not change. + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) + + // Test analyze incremental with feedback. + tk.MustExec("insert into t values (3,3)") + oriProbability := statistics.FeedbackProbability.Load() + oriMinLogCount := handle.MinLogScanCount.Load() + defer func() { + statistics.FeedbackProbability.Store(oriProbability) + handle.MinLogScanCount.Store(oriMinLogCount) + }() + statistics.FeedbackProbability.Store(1) + handle.MinLogScanCount.Store(0) + is := dom.InfoSchema() + table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + require.NoError(t, err) + tblInfo := table.Meta() + tk.MustQuery("select * from t use index(idx) where b = 3") + tk.MustQuery("select * from t where a > 1") + h := dom.StatsHandle() + require.NoError(t, h.DumpStatsDeltaToKV(handle.DumpAll)) + require.NoError(t, h.DumpStatsFeedbackToKV()) + require.NoError(t, h.HandleUpdateStats(is)) + require.NoError(t, h.Update(is)) + require.NoError(t, h.LoadNeededHistograms()) + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 3 0 2 2147483647 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) + tblStats := h.GetTableStats(tblInfo) + val, err := codec.EncodeKey(tk.Session().GetSessionVars().StmtCtx, nil, types.NewIntDatum(3)) + require.NoError(t, err) + require.Equal(t, uint64(1), tblStats.Indices[tblInfo.Indices[0].ID].QueryBytes(val)) + require.False(t, statistics.IsAnalyzed(tblStats.Indices[tblInfo.Indices[0].ID].Flag)) + require.False(t, statistics.IsAnalyzed(tblStats.Columns[tblInfo.Columns[0].ID].Flag)) + + tk.MustExec("analyze incremental table t index") + require.NoError(t, h.LoadNeededHistograms()) + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t a 0 2 3 1 3 3 0", + "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0", "test t idx 1 2 3 1 3 3 0")) + tblStats = h.GetTableStats(tblInfo) + require.Equal(t, uint64(1), tblStats.Indices[tblInfo.Indices[0].ID].QueryBytes(val)) + + // test analyzeIndexIncremental for global-level stats; + tk.MustExec("set @@session.tidb_analyze_version = 1;") + tk.MustQuery("select @@tidb_analyze_version").Check(testkit.Rows("1")) + tk.MustExec("set @@tidb_partition_prune_mode = 'static';") + tk.MustExec("drop table if exists t;") + tk.MustExec(`create table t (a int, b int, primary key(a), index idx(b)) partition by range (a) ( + partition p0 values less than (10), + partition p1 values less than (20), + partition p2 values less than (30) + );`) + tk.MustExec("analyze incremental table t index") + require.NoError(t, h.LoadNeededHistograms()) + tk.MustQuery("show stats_buckets").Check(testkit.Rows()) + tk.MustExec("insert into t values (1,1)") + tk.MustExec("analyze incremental table t index") + tk.MustQuery("show warnings").Check(testkit.Rows()) // no warning + require.NoError(t, h.LoadNeededHistograms()) + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 idx 1 0 1 1 1 1 0")) + tk.MustExec("insert into t values (2,2)") + tk.MustExec("analyze incremental table t index") + require.NoError(t, h.LoadNeededHistograms()) + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 a 0 1 2 1 2 2 0", "test t p0 idx 1 0 1 1 1 1 0", "test t p0 idx 1 1 2 1 2 2 0")) + tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic';") + tk.MustExec("insert into t values (11,11)") + err = tk.ExecToErr("analyze incremental table t index") + require.Equal(t, "[stats]: global statistics for partitioned tables unavailable in ANALYZE INCREMENTAL", err.Error()) +} + +func TestIssue27429(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table test.t(id int, value varchar(20) charset utf8mb4 collate utf8mb4_general_ci, value1 varchar(20) charset utf8mb4 collate utf8mb4_bin)") + tk.MustExec("insert into test.t values (1, 'abc', 'abc '),(4, 'Abc', 'abc'),(3,'def', 'def ');") + + tk.MustQuery("select upper(group_concat(distinct value order by 1)) from test.t;").Check(testkit.Rows("ABC,DEF")) + tk.MustQuery("select upper(group_concat(distinct value)) from test.t;").Check(testkit.Rows("ABC,DEF")) +} + +func TestIssue20874(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("delete from mysql.stats_histograms") + tk.MustExec("create table t (a char(10) collate utf8mb4_unicode_ci not null, b char(20) collate utf8mb4_general_ci not null, key idxa(a), key idxb(b))") + tk.MustExec("insert into t values ('#', 'C'), ('$', 'c'), ('a', 'a')") + tk.MustExec("set @@tidb_analyze_version=1") + tk.MustExec("analyze table t") + tk.MustQuery("show stats_buckets where db_name = 'test' and table_name = 't'").Sort().Check(testkit.Rows( + "test t a 0 0 1 1 \x02\xd2 \x02\xd2 0", + "test t a 0 1 2 1 \x0e\x0f \x0e\x0f 0", + "test t a 0 2 3 1 \x0e3 \x0e3 0", + "test t b 0 0 1 1 \x00A \x00A 0", + "test t b 0 1 3 2 \x00C \x00C 0", + "test t idxa 1 0 1 1 \x02\xd2 \x02\xd2 0", + "test t idxa 1 1 2 1 \x0e\x0f \x0e\x0f 0", + "test t idxa 1 2 3 1 \x0e3 \x0e3 0", + "test t idxb 1 0 1 1 \x00A \x00A 0", + "test t idxb 1 1 3 2 \x00C \x00C 0", + )) + tk.MustQuery("select is_index, hist_id, distinct_count, null_count, tot_col_size, stats_ver, correlation from mysql.stats_histograms").Sort().Check(testkit.Rows( + "0 1 3 0 9 1 1", + "0 2 2 0 9 1 -0.5", + "1 1 3 0 0 1 0", + "1 2 2 0 0 1 0", + )) + tk.MustExec("set @@tidb_analyze_version=2") + tk.MustExec("analyze table t") + tk.MustQuery("show stats_topn where db_name = 'test' and table_name = 't'").Sort().Check(testkit.Rows( + "test t a 0 \x02\xd2 1", + "test t a 0 \x0e\x0f 1", + "test t a 0 \x0e3 1", + "test t b 0 \x00A 1", + "test t b 0 \x00C 2", + "test t idxa 1 \x02\xd2 1", + "test t idxa 1 \x0e\x0f 1", + "test t idxa 1 \x0e3 1", + "test t idxb 1 \x00A 1", + "test t idxb 1 \x00C 2", + )) + tk.MustQuery("select is_index, hist_id, distinct_count, null_count, tot_col_size, stats_ver, correlation from mysql.stats_histograms").Sort().Check(testkit.Rows( + "0 1 3 0 6 2 1", + "0 2 2 0 6 2 -0.5", + "1 1 3 0 6 2 0", + "1 2 2 0 6 2 0", + )) +} + +func TestAnalyzeClusteredIndexPrimary(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t0") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t0(a varchar(20), primary key(a) clustered)") + tk.MustExec("create table t1(a varchar(20), primary key(a))") + tk.MustExec("insert into t0 values('1111')") + tk.MustExec("insert into t1 values('1111')") + tk.MustExec("set @@session.tidb_analyze_version = 1") + tk.MustExec("analyze table t0 index primary") + tk.MustExec("analyze table t1 index primary") + tk.MustQuery("show stats_buckets").Check(testkit.Rows( + "test t0 PRIMARY 1 0 1 1 1111 1111 0", + "test t1 PRIMARY 1 0 1 1 1111 1111 0")) + tk.MustExec("set @@session.tidb_analyze_version = 2") + tk.MustExec("analyze table t0") + tk.MustExec("analyze table t1") + tk.MustQuery("show stats_topn").Sort().Check(testkit.Rows(""+ + "test t0 PRIMARY 1 1111 1", + "test t0 a 0 1111 1", + "test t1 PRIMARY 1 1111 1", + "test t1 a 0 1111 1")) +} + +func TestAnalyzeSamplingWorkPanic(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@session.tidb_analyze_version = 2") + tk.MustExec("create table t(a int)") + tk.MustExec("insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12)") + tk.MustExec("split table t between (-9223372036854775808) and (9223372036854775807) regions 12") + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingBuildWorkerPanic", "return(1)")) + err := tk.ExecToErr("analyze table t") + require.NotNil(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingBuildWorkerPanic")) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingMergeWorkerPanic", "return(1)")) + err = tk.ExecToErr("analyze table t") + require.NotNil(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingMergeWorkerPanic")) +} diff --git a/executor/batch_point_get_serial_test.go b/executor/batch_point_get_serial_test.go deleted file mode 100644 index 413af6863091e..0000000000000 --- a/executor/batch_point_get_serial_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/testkit" - "github.com/stretchr/testify/require" -) - -func TestPointGetForTemporaryTable(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create global temporary table t1 (id int primary key, val int) on commit delete rows") - tk.MustExec("begin") - tk.MustExec("insert into t1 values (1,1)") - tk.MustQuery("explain format = 'brief' select * from t1 where id in (1, 2, 3)"). - Check(testkit.Rows("Batch_Point_Get 3.00 root table:t1 handle:[1 2 3], keep order:false, desc:false")) - - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy", "return(true)")) - defer func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy")) - }() - - // Batch point get. - tk.MustQuery("select * from t1 where id in (1, 2, 3)").Check(testkit.Rows("1 1")) - tk.MustQuery("select * from t1 where id in (2, 3)").Check(testkit.Rows()) - - // Point get. - tk.MustQuery("select * from t1 where id = 1").Check(testkit.Rows("1 1")) - tk.MustQuery("select * from t1 where id = 2").Check(testkit.Rows()) -} diff --git a/executor/batch_point_get_test.go b/executor/batch_point_get_test.go index 27c035e52a433..44809a9211f90 100644 --- a/executor/batch_point_get_test.go +++ b/executor/batch_point_get_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx/variable" @@ -352,3 +353,30 @@ func TestCacheSnapShot(t *testing.T) { require.Equal(t, batchGet[string(keys[0])], []byte("1111")) require.Equal(t, batchGet[string(keys[1])], []byte("2222")) } + +func TestPointGetForTemporaryTable(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create global temporary table t1 (id int primary key, val int) on commit delete rows") + tk.MustExec("begin") + tk.MustExec("insert into t1 values (1,1)") + tk.MustQuery("explain format = 'brief' select * from t1 where id in (1, 2, 3)"). + Check(testkit.Rows("Batch_Point_Get 3.00 root table:t1 handle:[1 2 3], keep order:false, desc:false")) + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy", "return(true)")) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy")) + }() + + // Batch point get. + tk.MustQuery("select * from t1 where id in (1, 2, 3)").Check(testkit.Rows("1 1")) + tk.MustQuery("select * from t1 where id in (2, 3)").Check(testkit.Rows()) + + // Point get. + tk.MustQuery("select * from t1 where id = 1").Check(testkit.Rows("1 1")) + tk.MustQuery("select * from t1 where id = 2").Check(testkit.Rows()) +} diff --git a/executor/collation_serial_test.go b/executor/collation_test.go similarity index 100% rename from executor/collation_serial_test.go rename to executor/collation_test.go diff --git a/executor/cte_serial_test.go b/executor/cte_serial_test.go deleted file mode 100644 index b8b04551b0c6e..0000000000000 --- a/executor/cte_serial_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "fmt" - "math/rand" - "sort" - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/testkit" - "github.com/stretchr/testify/require" -) - -func TestSpillToDisk(t *testing.T) { - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.OOMUseTmpStorage = true - }) - - store, close := testkit.CreateMockStore(t) - defer close() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testCTEStorageSpill", "return(true)")) - defer func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testCTEStorageSpill")) - tk.MustExec("set tidb_mem_quota_query = 1073741824;") - }() - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill", "return(true)")) - defer func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill")) - }() - - // Use duplicated rows to test UNION DISTINCT. - tk.MustExec("set tidb_mem_quota_query = 1073741824;") - insertStr := "insert into t1 values(0)" - rowNum := 1000 - vals := make([]int, rowNum) - vals[0] = 0 - for i := 1; i < rowNum; i++ { - v := rand.Intn(100) - vals[i] = v - insertStr += fmt.Sprintf(", (%d)", v) - } - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(c1 int);") - tk.MustExec(insertStr) - tk.MustExec("set tidb_mem_quota_query = 40000;") - tk.MustExec("set cte_max_recursion_depth = 500000;") - sql := fmt.Sprintf("with recursive cte1 as ( "+ - "select c1 from t1 "+ - "union "+ - "select c1 + 1 c1 from cte1 where c1 < %d) "+ - "select c1 from cte1 order by c1;", rowNum) - rows := tk.MustQuery(sql) - - memTracker := tk.Session().GetSessionVars().StmtCtx.MemTracker - diskTracker := tk.Session().GetSessionVars().StmtCtx.DiskTracker - require.Greater(t, memTracker.MaxConsumed(), int64(0)) - require.Greater(t, diskTracker.MaxConsumed(), int64(0)) - - sort.Ints(vals) - resRows := make([]string, 0, rowNum) - for i := vals[0]; i <= rowNum; i++ { - resRows = append(resRows, fmt.Sprintf("%d", i)) - } - rows.Check(testkit.Rows(resRows...)) -} diff --git a/executor/cte_test.go b/executor/cte_test.go index bf4ec37b99095..bf6d33ede4a42 100644 --- a/executor/cte_test.go +++ b/executor/cte_test.go @@ -16,8 +16,12 @@ package executor_test import ( "fmt" + "math/rand" + "sort" "testing" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" ) @@ -346,3 +350,61 @@ func TestCTEWithLimit(t *testing.T) { rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 4) select * from cte1;") rows.Check(testkit.Rows("3", "4", "3", "4")) } + +func TestSpillToDisk(t *testing.T) { + defer config.RestoreFunc()() + config.UpdateGlobal(func(conf *config.Config) { + conf.OOMUseTmpStorage = true + }) + + store, close := testkit.CreateMockStore(t) + defer close() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testCTEStorageSpill", "return(true)")) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testCTEStorageSpill")) + tk.MustExec("set tidb_mem_quota_query = 1073741824;") + }() + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill", "return(true)")) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill")) + }() + + // Use duplicated rows to test UNION DISTINCT. + tk.MustExec("set tidb_mem_quota_query = 1073741824;") + insertStr := "insert into t1 values(0)" + rowNum := 1000 + vals := make([]int, rowNum) + vals[0] = 0 + for i := 1; i < rowNum; i++ { + v := rand.Intn(100) + vals[i] = v + insertStr += fmt.Sprintf(", (%d)", v) + } + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 int);") + tk.MustExec(insertStr) + tk.MustExec("set tidb_mem_quota_query = 40000;") + tk.MustExec("set cte_max_recursion_depth = 500000;") + sql := fmt.Sprintf("with recursive cte1 as ( "+ + "select c1 from t1 "+ + "union "+ + "select c1 + 1 c1 from cte1 where c1 < %d) "+ + "select c1 from cte1 order by c1;", rowNum) + rows := tk.MustQuery(sql) + + memTracker := tk.Session().GetSessionVars().StmtCtx.MemTracker + diskTracker := tk.Session().GetSessionVars().StmtCtx.DiskTracker + require.Greater(t, memTracker.MaxConsumed(), int64(0)) + require.Greater(t, diskTracker.MaxConsumed(), int64(0)) + + sort.Ints(vals) + resRows := make([]string, 0, rowNum) + for i := vals[0]; i <= rowNum; i++ { + resRows = append(resRows, fmt.Sprintf("%d", i)) + } + rows.Check(testkit.Rows(resRows...)) +} diff --git a/executor/executor_pkg_serial_test.go b/executor/executor_pkg_serial_test.go deleted file mode 100644 index cc2b9923d791c..0000000000000 --- a/executor/executor_pkg_serial_test.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor - -import ( - "context" - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/expression" - "github.com/pingcap/tidb/parser/ast" - plannerutil "github.com/pingcap/tidb/planner/util" - "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/util/memory" - "github.com/pingcap/tidb/util/mock" - "github.com/stretchr/testify/require" -) - -func TestLoadDataWithDifferentEscapeChar(t *testing.T) { - tests := []struct { - input string - escapeChar byte - expected []string - }{ - { - `"{""itemRangeType"":0,""itemContainType"":0,""shopRangeType"":1,""shopJson"":""[{\""id\"":\""A1234\"",\""shopName\"":\""AAAAAA\""}]""}"`, - byte(0), // escaped by '' - []string{`{"itemRangeType":0,"itemContainType":0,"shopRangeType":1,"shopJson":"[{\"id\":\"A1234\",\"shopName\":\"AAAAAA\"}]"}`}, - }, - } - - for _, test := range tests { - ldInfo := LoadDataInfo{ - FieldsInfo: &ast.FieldsClause{ - Enclosed: '"', - Terminated: ",", - Escaped: test.escapeChar, - }, - } - got, err := ldInfo.getFieldsFromLine([]byte(test.input)) - require.NoErrorf(t, err, "failed: %s", test.input) - assertEqualStrings(t, got, test.expected) - } -} - -func TestSortSpillDisk(t *testing.T) { - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.OOMUseTmpStorage = true - conf.MemQuotaQuery = 1 - }) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill", "return(true)")) - defer func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill")) - }() - ctx := mock.NewContext() - ctx.GetSessionVars().InitChunkSize = variable.DefMaxChunkSize - ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize - ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, -1) - cas := &sortCase{rows: 2048, orderByIdx: []int{0, 1}, ndvs: []int{0, 0}, ctx: ctx} - opt := mockDataSourceParameters{ - schema: expression.NewSchema(cas.columns()...), - rows: cas.rows, - ctx: cas.ctx, - ndvs: cas.ndvs, - } - dataSource := buildMockDataSource(opt) - exec := &SortExec{ - baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 0, dataSource), - ByItems: make([]*plannerutil.ByItems, 0, len(cas.orderByIdx)), - schema: dataSource.schema, - } - for _, idx := range cas.orderByIdx { - exec.ByItems = append(exec.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) - } - tmpCtx := context.Background() - chk := newFirstChunk(exec) - dataSource.prepareChunks() - err := exec.Open(tmpCtx) - require.NoError(t, err) - for { - err = exec.Next(tmpCtx, chk) - require.NoError(t, err) - if chk.NumRows() == 0 { - break - } - } - // Test only 1 partition and all data in memory. - require.Len(t, exec.partitionList, 1) - require.Equal(t, false, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, 2048, exec.partitionList[0].NumRow()) - err = exec.Close() - require.NoError(t, err) - - ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 1) - dataSource.prepareChunks() - err = exec.Open(tmpCtx) - require.NoError(t, err) - for { - err = exec.Next(tmpCtx, chk) - require.NoError(t, err) - if chk.NumRows() == 0 { - break - } - } - // Test 2 partitions and all data in disk. - // Now spilling is in parallel. - // Maybe the second add() will called before spilling, depends on - // Golang goroutine scheduling. So the result has two possibilities. - if len(exec.partitionList) == 2 { - require.Len(t, exec.partitionList, 2) - require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, true, exec.partitionList[1].AlreadySpilledSafeForTest()) - require.Equal(t, 1024, exec.partitionList[0].NumRow()) - require.Equal(t, 1024, exec.partitionList[1].NumRow()) - } else { - require.Len(t, exec.partitionList, 1) - require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, 2048, exec.partitionList[0].NumRow()) - } - - err = exec.Close() - require.NoError(t, err) - - ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 24000) - dataSource.prepareChunks() - err = exec.Open(tmpCtx) - require.NoError(t, err) - for { - err = exec.Next(tmpCtx, chk) - require.NoError(t, err) - if chk.NumRows() == 0 { - break - } - } - // Test only 1 partition but spill disk. - require.Len(t, exec.partitionList, 1) - require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, 2048, exec.partitionList[0].NumRow()) - err = exec.Close() - require.NoError(t, err) - - // Test partition nums. - ctx = mock.NewContext() - ctx.GetSessionVars().InitChunkSize = variable.DefMaxChunkSize - ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize - ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 16864*50) - ctx.GetSessionVars().StmtCtx.MemTracker.Consume(16864 * 45) - cas = &sortCase{rows: 20480, orderByIdx: []int{0, 1}, ndvs: []int{0, 0}, ctx: ctx} - opt = mockDataSourceParameters{ - schema: expression.NewSchema(cas.columns()...), - rows: cas.rows, - ctx: cas.ctx, - ndvs: cas.ndvs, - } - dataSource = buildMockDataSource(opt) - exec = &SortExec{ - baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 0, dataSource), - ByItems: make([]*plannerutil.ByItems, 0, len(cas.orderByIdx)), - schema: dataSource.schema, - } - for _, idx := range cas.orderByIdx { - exec.ByItems = append(exec.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) - } - tmpCtx = context.Background() - chk = newFirstChunk(exec) - dataSource.prepareChunks() - err = exec.Open(tmpCtx) - require.NoError(t, err) - for { - err = exec.Next(tmpCtx, chk) - require.NoError(t, err) - if chk.NumRows() == 0 { - break - } - } - // Don't spill too many partitions. - require.True(t, len(exec.partitionList) <= 4) - err = exec.Close() - require.NoError(t, err) -} diff --git a/executor/executor_pkg_test.go b/executor/executor_pkg_test.go index 5cd983fccd5f0..1b449f5fed6a6 100644 --- a/executor/executor_pkg_test.go +++ b/executor/executor_pkg_test.go @@ -23,18 +23,22 @@ import ( "time" "unsafe" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/executor/aggfuncs" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/mysql" + plannerutil "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/tableutil" @@ -397,3 +401,167 @@ func SubTestFilterTemporaryTableKeys(t *testing.T) { res := filterTemporaryTableKeys(vars, []kv.Key{tablecodec.EncodeTablePrefix(tableID), tablecodec.EncodeTablePrefix(42)}) require.Len(t, res, 1) } + +func TestLoadDataWithDifferentEscapeChar(t *testing.T) { + tests := []struct { + input string + escapeChar byte + expected []string + }{ + { + `"{""itemRangeType"":0,""itemContainType"":0,""shopRangeType"":1,""shopJson"":""[{\""id\"":\""A1234\"",\""shopName\"":\""AAAAAA\""}]""}"`, + byte(0), // escaped by '' + []string{`{"itemRangeType":0,"itemContainType":0,"shopRangeType":1,"shopJson":"[{\"id\":\"A1234\",\"shopName\":\"AAAAAA\"}]"}`}, + }, + } + + for _, test := range tests { + ldInfo := LoadDataInfo{ + FieldsInfo: &ast.FieldsClause{ + Enclosed: '"', + Terminated: ",", + Escaped: test.escapeChar, + }, + } + got, err := ldInfo.getFieldsFromLine([]byte(test.input)) + require.NoErrorf(t, err, "failed: %s", test.input) + assertEqualStrings(t, got, test.expected) + } +} + +func TestSortSpillDisk(t *testing.T) { + defer config.RestoreFunc()() + config.UpdateGlobal(func(conf *config.Config) { + conf.OOMUseTmpStorage = true + conf.MemQuotaQuery = 1 + }) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill", "return(true)")) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill")) + }() + ctx := mock.NewContext() + ctx.GetSessionVars().InitChunkSize = variable.DefMaxChunkSize + ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize + ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, -1) + cas := &sortCase{rows: 2048, orderByIdx: []int{0, 1}, ndvs: []int{0, 0}, ctx: ctx} + opt := mockDataSourceParameters{ + schema: expression.NewSchema(cas.columns()...), + rows: cas.rows, + ctx: cas.ctx, + ndvs: cas.ndvs, + } + dataSource := buildMockDataSource(opt) + exec := &SortExec{ + baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 0, dataSource), + ByItems: make([]*plannerutil.ByItems, 0, len(cas.orderByIdx)), + schema: dataSource.schema, + } + for _, idx := range cas.orderByIdx { + exec.ByItems = append(exec.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) + } + tmpCtx := context.Background() + chk := newFirstChunk(exec) + dataSource.prepareChunks() + err := exec.Open(tmpCtx) + require.NoError(t, err) + for { + err = exec.Next(tmpCtx, chk) + require.NoError(t, err) + if chk.NumRows() == 0 { + break + } + } + // Test only 1 partition and all data in memory. + require.Len(t, exec.partitionList, 1) + require.Equal(t, false, exec.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, 2048, exec.partitionList[0].NumRow()) + err = exec.Close() + require.NoError(t, err) + + ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 1) + dataSource.prepareChunks() + err = exec.Open(tmpCtx) + require.NoError(t, err) + for { + err = exec.Next(tmpCtx, chk) + require.NoError(t, err) + if chk.NumRows() == 0 { + break + } + } + // Test 2 partitions and all data in disk. + // Now spilling is in parallel. + // Maybe the second add() will called before spilling, depends on + // Golang goroutine scheduling. So the result has two possibilities. + if len(exec.partitionList) == 2 { + require.Len(t, exec.partitionList, 2) + require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, true, exec.partitionList[1].AlreadySpilledSafeForTest()) + require.Equal(t, 1024, exec.partitionList[0].NumRow()) + require.Equal(t, 1024, exec.partitionList[1].NumRow()) + } else { + require.Len(t, exec.partitionList, 1) + require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, 2048, exec.partitionList[0].NumRow()) + } + + err = exec.Close() + require.NoError(t, err) + + ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 24000) + dataSource.prepareChunks() + err = exec.Open(tmpCtx) + require.NoError(t, err) + for { + err = exec.Next(tmpCtx, chk) + require.NoError(t, err) + if chk.NumRows() == 0 { + break + } + } + // Test only 1 partition but spill disk. + require.Len(t, exec.partitionList, 1) + require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, 2048, exec.partitionList[0].NumRow()) + err = exec.Close() + require.NoError(t, err) + + // Test partition nums. + ctx = mock.NewContext() + ctx.GetSessionVars().InitChunkSize = variable.DefMaxChunkSize + ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize + ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 16864*50) + ctx.GetSessionVars().StmtCtx.MemTracker.Consume(16864 * 45) + cas = &sortCase{rows: 20480, orderByIdx: []int{0, 1}, ndvs: []int{0, 0}, ctx: ctx} + opt = mockDataSourceParameters{ + schema: expression.NewSchema(cas.columns()...), + rows: cas.rows, + ctx: cas.ctx, + ndvs: cas.ndvs, + } + dataSource = buildMockDataSource(opt) + exec = &SortExec{ + baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 0, dataSource), + ByItems: make([]*plannerutil.ByItems, 0, len(cas.orderByIdx)), + schema: dataSource.schema, + } + for _, idx := range cas.orderByIdx { + exec.ByItems = append(exec.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) + } + tmpCtx = context.Background() + chk = newFirstChunk(exec) + dataSource.prepareChunks() + err = exec.Open(tmpCtx) + require.NoError(t, err) + for { + err = exec.Next(tmpCtx, chk) + require.NoError(t, err) + if chk.NumRows() == 0 { + break + } + } + // Don't spill too many partitions. + require.True(t, len(exec.partitionList) <= 4) + err = exec.Close() + require.NoError(t, err) +} diff --git a/executor/explainfor_test.go b/executor/explainfor_test.go index e3dbfd42d3a52..5befb576c132a 100644 --- a/executor/explainfor_test.go +++ b/executor/explainfor_test.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" - txninfo "github.com/pingcap/tidb/session/txninfo" + "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/israce" @@ -1124,16 +1124,20 @@ func (s *testPrepareSerialSuite) TestSPM4PlanCache(c *C) { tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1")) tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + // The bindSQL has changed, the previous cache is invalid. + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) tk.MustQuery("execute stmt;").Check(testkit.Rows()) tkProcess = tk.Se.ShowProcess() ps = []*util.ProcessInfo{tkProcess} tk.Se.SetSessionManager(&mockSessionManager1{PS: ps}) res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - // The binding does not take effect for caches that have been cached. - c.Assert(res.Rows()[0][0], Matches, ".*TableReader.*") - c.Assert(res.Rows()[1][0], Matches, ".*TableFullScan.*") - tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("0")) + // We can use the new binding. + c.Assert(res.Rows()[0][0], Matches, ".*IndexReader.*") + c.Assert(res.Rows()[1][0], Matches, ".*IndexFullScan.*") + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1")) tk.MustExec("delete from mysql.bind_info where default_db='test';") tk.MustExec("admin reload bindings;") diff --git a/executor/hash_table_serial_test.go b/executor/hash_table_test.go similarity index 100% rename from executor/hash_table_serial_test.go rename to executor/hash_table_test.go diff --git a/executor/infoschema_reader_test.go b/executor/infoschema_reader_test.go index 0744372305cb6..53fd409f0fd2a 100644 --- a/executor/infoschema_reader_test.go +++ b/executor/infoschema_reader_test.go @@ -37,7 +37,7 @@ import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/server" "github.com/pingcap/tidb/session" - txninfo "github.com/pingcap/tidb/session/txninfo" + "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/statistics/handle" diff --git a/executor/oomtest/oom_serial_test.go b/executor/oomtest/oom_test.go similarity index 100% rename from executor/oomtest/oom_serial_test.go rename to executor/oomtest/oom_test.go diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index 25c52e8c4954f..89e4311aaee09 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -2912,7 +2912,7 @@ type testOutput struct { func (s *testSuiteWithData) verifyPartitionResult(tk *testkit.TestKit, input []string, output []testOutput) { for i, tt := range input { - var isSelect bool = false + var isSelect = false if strings.HasPrefix(strings.ToLower(tt), "select ") { isSelect = true } diff --git a/executor/prepared.go b/executor/prepared.go index 3013aba0de9cd..82a030e76b6c1 100644 --- a/executor/prepared.go +++ b/executor/prepared.go @@ -314,8 +314,9 @@ func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error { prepared := preparedObj.PreparedAst delete(vars.PreparedStmtNameToID, e.Name) if plannercore.PreparedPlanCacheEnabled() { + bindSQL := planner.GetBindSQL4PlanCache(e.ctx, prepared.Stmt) e.ctx.PreparedPlanCache().Delete(plannercore.NewPSTMTPlanCacheKey( - vars, id, prepared.SchemaVersion, + vars, id, prepared.SchemaVersion, bindSQL, )) } vars.RemovePreparedStmt(id) diff --git a/executor/prepared_serial_test.go b/executor/prepared_serial_test.go deleted file mode 100644 index 8315e3dadc718..0000000000000 --- a/executor/prepared_serial_test.go +++ /dev/null @@ -1,1179 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/parser/auth" - "github.com/pingcap/tidb/parser/model" - plannercore "github.com/pingcap/tidb/planner/core" - "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/testkit/testdata" - "github.com/pingcap/tidb/util" - "github.com/pingcap/tidb/util/israce" - "github.com/stretchr/testify/require" -) - -func TestIssue28064(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - tk.MustExec("use test") - tk.MustExec("drop table if exists t28064") - tk.MustExec("CREATE TABLE `t28064` (" + - "`a` decimal(10,0) DEFAULT NULL," + - "`b` decimal(10,0) DEFAULT NULL," + - "`c` decimal(10,0) DEFAULT NULL," + - "`d` decimal(10,0) DEFAULT NULL," + - "KEY `iabc` (`a`,`b`,`c`));") - tk.MustExec("set @a='123', @b='234', @c='345';") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("prepare stmt1 from 'select * from t28064 use index (iabc) where a = ? and b = ? and c = ?';") - - tk.MustExec("execute stmt1 using @a, @b, @c;") - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - rows.Check(testkit.Rows("Selection_8 0.00 root eq(test.t28064.a, 123), eq(test.t28064.b, 234), eq(test.t28064.c, 345)", - "└─IndexLookUp_7 0.00 root ", - " ├─IndexRangeScan_5(Build) 0.00 cop[tikv] table:t28064, index:iabc(a, b, c) range:[123 234 345,123 234 345], keep order:false, stats:pseudo", - " └─TableRowIDScan_6(Probe) 0.00 cop[tikv] table:t28064 keep order:false, stats:pseudo")) - - tk.MustExec("execute stmt1 using @a, @b, @c;") - rows = tk.MustQuery("select @@last_plan_from_cache") - rows.Check(testkit.Rows("1")) - - tk.MustExec("execute stmt1 using @a, @b, @c;") - rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - rows.Check(testkit.Rows("Selection_8 0.00 root eq(test.t28064.a, 123), eq(test.t28064.b, 234), eq(test.t28064.c, 345)", - "└─IndexLookUp_7 0.00 root ", - " ├─IndexRangeScan_5(Build) 0.00 cop[tikv] table:t28064, index:iabc(a, b, c) range:[123 234 345,123 234 345], keep order:false, stats:pseudo", - " └─TableRowIDScan_6(Probe) 0.00 cop[tikv] table:t28064 keep order:false, stats:pseudo")) -} - -func TestPreparePlanCache4Blacklist(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - - // test the blacklist of optimization rules - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int);") - tk.MustExec("prepare stmt from 'select min(a) from t;';") - tk.MustExec("execute stmt;") - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - require.Contains(t, res.Rows()[1][0], "TopN") - - res = tk.MustQuery("explain format = 'brief' select min(a) from t") - require.Contains(t, res.Rows()[1][0], "TopN") - - tk.MustExec("INSERT INTO mysql.opt_rule_blacklist VALUES('max_min_eliminate');") - tk.MustExec("ADMIN reload opt_rule_blacklist;") - - tk.MustExec("execute stmt;") - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustExec("execute stmt;") - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - // Plans that have been cached will not be affected by the blacklist. - require.Contains(t, res.Rows()[1][0], "TopN") - - res = tk.MustQuery("explain format = 'brief' select min(a) from t") - require.Contains(t, res.Rows()[0][0], "StreamAgg") - - // test the blacklist of Expression Pushdown - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int);") - tk.MustExec("prepare stmt from 'SELECT * FROM t WHERE a < 2 and a > 2;';") - tk.MustExec("execute stmt;") - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - require.Equal(t, 3, len(res.Rows())) - require.Contains(t, res.Rows()[1][0], "Selection") - require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) - - res = tk.MustQuery("explain format = 'brief' SELECT * FROM t WHERE a < 2 and a > 2;") - require.Equal(t, 3, len(res.Rows())) - require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) - - tk.MustExec("INSERT INTO mysql.expr_pushdown_blacklist VALUES('<','tikv','');") - tk.MustExec("ADMIN reload expr_pushdown_blacklist;") - - tk.MustExec("execute stmt;") - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustExec("execute stmt;") - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - // The expressions can still be pushed down to tikv. - require.Equal(t, 3, len(res.Rows())) - require.Contains(t, res.Rows()[1][0], "Selection") - require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) - - res = tk.MustQuery("explain format = 'brief' SELECT * FROM t WHERE a < 2 and a > 2;") - require.Equal(t, 4, len(res.Rows())) - require.Contains(t, res.Rows()[0][0], "Selection") - require.Equal(t, "lt(test.t.a, 2)", res.Rows()[0][4]) - require.Contains(t, res.Rows()[2][0], "Selection") - require.Equal(t, "gt(test.t.a, 2)", res.Rows()[2][4]) - - tk.MustExec("DELETE FROM mysql.expr_pushdown_blacklist;") - tk.MustExec("ADMIN reload expr_pushdown_blacklist;") -} - -func TestPlanCacheClusterIndex(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("create table t1(a varchar(20), b varchar(20), c varchar(20), primary key(a, b))") - tk.MustExec("insert into t1 values('1','1','111'),('2','2','222'),('3','3','333')") - - // For table scan - tk.MustExec(`prepare stmt1 from "select * from t1 where t1.a = ? and t1.b > ?"`) - tk.MustExec("set @v1 = '1'") - tk.MustExec("set @v2 = '0'") - tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("1 1 111")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) - tk.MustExec("set @v1 = '2'") - tk.MustExec("set @v2 = '1'") - tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("2 2 222")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - tk.MustExec("set @v1 = '3'") - tk.MustExec("set @v2 = '2'") - tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("3 3 333")) - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() - require.Equal(t, 0, strings.Index(rows[len(rows)-1][4].(string), `range:("3" "2","3" +inf]`)) - // For point get - tk.MustExec(`prepare stmt2 from "select * from t1 where t1.a = ? and t1.b = ?"`) - tk.MustExec("set @v1 = '1'") - tk.MustExec("set @v2 = '1'") - tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("1 1 111")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) - tk.MustExec("set @v1 = '2'") - tk.MustExec("set @v2 = '2'") - tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("2 2 222")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - tk.MustExec("set @v1 = '3'") - tk.MustExec("set @v2 = '3'") - tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("3 3 333")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() - require.Equal(t, 0, strings.Index(rows[len(rows)-1][0].(string), `Point_Get`)) - // For CBO point get and batch point get - // case 1: - tk.MustExec(`drop table if exists ta, tb`) - tk.MustExec(`create table ta (a varchar(8) primary key, b int)`) - tk.MustExec(`insert ta values ('a', 1), ('b', 2)`) - tk.MustExec(`create table tb (a varchar(8) primary key, b int)`) - tk.MustExec(`insert tb values ('a', 1), ('b', 2)`) - tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.a = tb.a and ta.a = ?"`) - tk.MustExec(`set @v1 = 'a', @v2 = 'b'`) - tk.MustQuery(`execute stmt1 using @v1`).Check(testkit.Rows("a 1 a 1")) - tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 b 2")) - - // case 2: - tk.MustExec(`drop table if exists ta, tb`) - tk.MustExec(`create table ta (a varchar(10) primary key, b int not null)`) - tk.MustExec(`insert ta values ('a', 1), ('b', 2)`) - tk.MustExec(`create table tb (b int primary key, c int)`) - tk.MustExec(`insert tb values (1, 1), (2, 2)`) - tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.b = tb.b and ta.a = ?"`) - tk.MustExec(`set @v1 = 'a', @v2 = 'b'`) - tk.MustQuery(`execute stmt1 using @v1`).Check(testkit.Rows("a 1 1 1")) - tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 2 2")) - tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 2 2")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() - require.True(t, strings.Contains(rows[3][0].(string), `TableRangeScan`)) - - // case 3: - tk.MustExec(`drop table if exists ta, tb`) - tk.MustExec(`create table ta (a varchar(10), b varchar(10), c int, primary key (a, b))`) - tk.MustExec(`insert ta values ('a', 'a', 1), ('b', 'b', 2), ('c', 'c', 3)`) - tk.MustExec(`create table tb (b int primary key, c int)`) - tk.MustExec(`insert tb values (1, 1), (2, 2), (3,3)`) - tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.c = tb.b and ta.a = ? and ta.b = ?"`) - tk.MustExec(`set @v1 = 'a', @v2 = 'b', @v3 = 'c'`) - tk.MustQuery(`execute stmt1 using @v1, @v1`).Check(testkit.Rows("a a 1 1 1")) - tk.MustQuery(`execute stmt1 using @v2, @v2`).Check(testkit.Rows("b b 2 2 2")) - tk.MustExec(`prepare stmt2 from "select * from ta, tb where ta.c = tb.b and (ta.a, ta.b) in ((?, ?), (?, ?))"`) - tk.MustQuery(`execute stmt2 using @v1, @v1, @v2, @v2`).Check(testkit.Rows("a a 1 1 1", "b b 2 2 2")) - tk.MustQuery(`execute stmt2 using @v2, @v2, @v3, @v3`).Check(testkit.Rows("b b 2 2 2", "c c 3 3 3")) - - // For issue 19002 - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec(`drop table if exists t1`) - tk.MustExec(`create table t1(a int, b int, c int, primary key(a, b))`) - tk.MustExec(`insert into t1 values(1,1,111),(2,2,222),(3,3,333)`) - // Point Get: - tk.MustExec(`prepare stmt1 from "select * from t1 where t1.a = ? and t1.b = ?"`) - tk.MustExec(`set @v1=1, @v2=1`) - tk.MustQuery(`execute stmt1 using @v1,@v2`).Check(testkit.Rows("1 1 111")) - tk.MustExec(`set @v1=2, @v2=2`) - tk.MustQuery(`execute stmt1 using @v1,@v2`).Check(testkit.Rows("2 2 222")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) - // Batch Point Get: - tk.MustExec(`prepare stmt2 from "select * from t1 where (t1.a,t1.b) in ((?,?),(?,?))"`) - tk.MustExec(`set @v1=1, @v2=1, @v3=2, @v4=2`) - tk.MustQuery(`execute stmt2 using @v1,@v2,@v3,@v4`).Check(testkit.Rows("1 1 111", "2 2 222")) - tk.MustExec(`set @v1=2, @v2=2, @v3=3, @v4=3`) - tk.MustQuery(`execute stmt2 using @v1,@v2,@v3,@v4`).Check(testkit.Rows("2 2 222", "3 3 333")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) -} - -func TestPlanCacheWithDifferentVariableTypes(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - require.NoError(t, err) - - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("create table t1(a varchar(20), b int, c float, key(b, a))") - tk.MustExec("insert into t1 values('1',1,1.1),('2',2,222),('3',3,333)") - tk.MustExec("create table t2(a varchar(20), b int, c float, key(b, a))") - tk.MustExec("insert into t2 values('3',3,3.3),('2',2,222),('3',3,333)") - - var input []struct { - PrepareStmt string - Executes []struct { - Vars []struct { - Name string - Value string - } - ExecuteSQL string - } - } - var output []struct { - PrepareStmt string - Executes []struct { - SQL string - Vars []struct { - Name string - Value string - } - Plan []string - LastPlanUseCache string - Result []string - } - } - prepareMergeSuiteData.GetTestCases(t, &input, &output) - for i, tt := range input { - tk.MustExec(tt.PrepareStmt) - testdata.OnRecord(func() { - output[i].PrepareStmt = tt.PrepareStmt - output[i].Executes = make([]struct { - SQL string - Vars []struct { - Name string - Value string - } - Plan []string - LastPlanUseCache string - Result []string - }, len(tt.Executes)) - }) - require.Equal(t, tt.PrepareStmt, output[i].PrepareStmt) - for j, exec := range tt.Executes { - for _, v := range exec.Vars { - tk.MustExec(fmt.Sprintf(`set @%s = %s`, v.Name, v.Value)) - } - res := tk.MustQuery(exec.ExecuteSQL) - lastPlanUseCache := tk.MustQuery("select @@last_plan_from_cache").Rows()[0][0] - tk.MustQuery(exec.ExecuteSQL) - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - plan := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - testdata.OnRecord(func() { - output[i].Executes[j].SQL = exec.ExecuteSQL - output[i].Executes[j].Plan = testdata.ConvertRowsToStrings(plan.Rows()) - output[i].Executes[j].Vars = exec.Vars - output[i].Executes[j].LastPlanUseCache = lastPlanUseCache.(string) - output[i].Executes[j].Result = testdata.ConvertRowsToStrings(res.Rows()) - }) - - require.Equal(t, exec.ExecuteSQL, output[i].Executes[j].SQL) - plan.Check(testkit.Rows(output[i].Executes[j].Plan...)) - require.Equal(t, exec.Vars, output[i].Executes[j].Vars) - require.Equal(t, lastPlanUseCache.(string), output[i].Executes[j].LastPlanUseCache) - res.Check(testkit.Rows(output[i].Executes[j].Result...)) - } - } -} - -func TestPlanCacheOperators(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - type ExecCase struct { - Parameters []string - UseCache bool - } - type PrepCase struct { - PrepStmt string - ExecCases []ExecCase - } - - cases := []PrepCase{ - {"use test", nil}, - - // cases for TableReader on PK - {"create table t (a int, b int, primary key(a))", nil}, - {"insert into t values (1,1), (2,2), (3,3), (4,4), (5,5), (6,null)", nil}, - {"select a from t where a=?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, true}, - {[]string{"3"}, true}, - }}, - {"select a from t where a in (?,?,?)", []ExecCase{ - {[]string{"1", "1", "1"}, false}, - {[]string{"2", "3", "4"}, true}, - {[]string{"3", "5", "7"}, true}, - }}, - {"select a from t where a>? and a? and a? and a? and a? and a?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ HASH_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ HASH_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ MERGE_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ MERGE_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b? and t1.a > (select min(t2.a) from t t2 where t2.b < t1.b)", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, false}, // plans with sub-queries cannot be cached, but the result must be correct - {[]string{"5"}, false}, - }}, - {"select * from t t1 where t1.a > (select min(t2.a) from t t2 where t2.b < t1.b+?)", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, false}, - {[]string{"5"}, false}, - }}, - {"select * from t t1 where t1.b>? and t1.a > (select min(t2.a) from t t2 where t2.b < t1.b+?)", []ExecCase{ - {[]string{"1", "1"}, false}, - {[]string{"3", "2"}, false}, - {[]string{"5", "3"}, false}, - }}, - {"drop table t", nil}, - - // cases for Window - {"create table t (name varchar(50), y int, sale decimal(14,2))", nil}, - {"insert into t values ('Bob',2016,2.4), ('Bob',2017,3.2), ('Bob',2018,2.1), ('Alice',2016,1.4), ('Alice',2017,2), ('Alice',2018,3.3), ('John',2016,4), ('John',2017,2.1), ('John',2018,5)", nil}, - {"select *, sum(sale) over (partition by y order by sale) total from t where sale>? order by y", []ExecCase{ - {[]string{"0.1"}, false}, - {[]string{"0.5"}, true}, - {[]string{"1.5"}, true}, - {[]string{"3.5"}, true}, - }}, - {"select *, sum(sale) over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ - {[]string{"0.1"}, false}, - {[]string{"0.5"}, true}, - {[]string{"1.5"}, true}, - {[]string{"3.5"}, true}, - }}, - {"select *, rank() over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ - {[]string{"0.1"}, false}, - {[]string{"0.5"}, true}, - {[]string{"1.5"}, true}, - {[]string{"3.5"}, true}, - }}, - {"select *, first_value(sale) over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ - {[]string{"0.1"}, false}, - {[]string{"0.5"}, true}, - {[]string{"1.5"}, true}, - {[]string{"3.5"}, true}, - }}, - {"select *, first_value(sale) over (partition by y order by sale rows ? preceding) total from t order by y", []ExecCase{ - {[]string{"1"}, false}, // window plans with parameters in frame cannot be cached - {[]string{"2"}, false}, - {[]string{"3"}, false}, - {[]string{"4"}, false}, - }}, - {"drop table t", nil}, - - // cases for Limit - {"create table t (a int)", nil}, - {"insert into t values (1), (1), (2), (2), (3), (4), (5), (6), (7), (8), (9), (0), (0)", nil}, - {"select * from t limit ?", []ExecCase{ - {[]string{"20"}, false}, - {[]string{"30"}, false}, - }}, - {"select * from t limit 40, ?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, false}, - }}, - {"select * from t limit ?, 10", []ExecCase{ - {[]string{"20"}, false}, - {[]string{"30"}, false}, - }}, - {"select * from t limit ?, ?", []ExecCase{ - {[]string{"20", "20"}, false}, - {[]string{"20", "40"}, false}, - }}, - {"select * from t where a? order by mod(a, 3)", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, true}, - {[]string{"3"}, true}, - }}, - - // cases for topN - {"select * from t order by b limit ?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, false}, - }}, - {"select * from t order by b limit 10, ?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, false}, - }}, - {"select * from t order by ? limit 10", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, false}, - }}, - {"select * from t order by ? limit ?", []ExecCase{ - {[]string{"1", "10"}, false}, - {[]string{"2", "20"}, false}, - }}, - } - - for _, prepCase := range cases { - isQuery := strings.Contains(prepCase.PrepStmt, "select") - if !isQuery { - tk.MustExec(prepCase.PrepStmt) - continue - } - - tk.MustExec(fmt.Sprintf(`prepare stmt from '%v'`, prepCase.PrepStmt)) - for _, execCase := range prepCase.ExecCases { - // set all parameters - usingStmt := "" - if len(execCase.Parameters) > 0 { - setStmt := "set " - usingStmt = "using " - for i, parameter := range execCase.Parameters { - if i > 0 { - setStmt += ", " - usingStmt += ", " - } - setStmt += fmt.Sprintf("@x%v=%v", i, parameter) - usingStmt += fmt.Sprintf("@x%v", i) - } - tk.MustExec(setStmt) - } - - // execute this statement and check whether it uses a cached plan - results := tk.MustQuery("execute stmt " + usingStmt).Sort().Rows() - - // check whether the result is correct - tmp := strings.Split(prepCase.PrepStmt, "?") - require.Equal(t, len(execCase.Parameters)+1, len(tmp)) - query := "" - for i := range tmp { - query += tmp[i] - if i < len(execCase.Parameters) { - query += execCase.Parameters[i] - } - } - tk.MustQuery(query).Sort().Check(results) - } - } -} - -func TestIssue28782(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("prepare stmt from 'SELECT IF(?, 1, 0);';") - tk.MustExec("set @a=1, @b=null, @c=0") - - tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("0")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @c;").Check(testkit.Rows("0")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) -} - -func TestIssue29101(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - - tk.MustExec(`use test`) - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec(`CREATE TABLE customer ( - c_id int(11) NOT NULL, - c_d_id int(11) NOT NULL, - c_w_id int(11) NOT NULL, - c_first varchar(16) DEFAULT NULL, - c_last varchar(16) DEFAULT NULL, - c_credit char(2) DEFAULT NULL, - c_discount decimal(4,4) DEFAULT NULL, - PRIMARY KEY (c_w_id,c_d_id,c_id), - KEY idx_customer (c_w_id,c_d_id,c_last,c_first) - )`) - tk.MustExec(`CREATE TABLE warehouse ( - w_id int(11) NOT NULL, - w_tax decimal(4,4) DEFAULT NULL, - PRIMARY KEY (w_id) - )`) - tk.MustExec(`prepare s1 from 'SELECT /*+ TIDB_INLJ(customer,warehouse) */ c_discount, c_last, c_credit, w_tax FROM customer, warehouse WHERE w_id = ? AND c_w_id = w_id AND c_d_id = ? AND c_id = ?'`) - tk.MustExec(`set @a=936,@b=7,@c=158`) - tk.MustQuery(`execute s1 using @a,@b,@c`).Check(testkit.Rows()) - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Check(testkit.Rows( // can use IndexJoin - `Projection_6 1.00 root test.customer.c_discount, test.customer.c_last, test.customer.c_credit, test.warehouse.w_tax`, - `└─IndexJoin_14 1.00 root inner join, inner:TableReader_10, outer key:test.customer.c_w_id, inner key:test.warehouse.w_id, equal cond:eq(test.customer.c_w_id, test.warehouse.w_id)`, - ` ├─Point_Get_33(Build) 1.00 root table:customer, index:PRIMARY(c_w_id, c_d_id, c_id) `, - ` └─TableReader_10(Probe) 0.00 root data:Selection_9`, - ` └─Selection_9 0.00 cop[tikv] eq(test.warehouse.w_id, 936)`, - ` └─TableRangeScan_8 1.00 cop[tikv] table:warehouse range: decided by [test.customer.c_w_id], keep order:false, stats:pseudo`)) - tk.MustQuery(`execute s1 using @a,@b,@c`).Check(testkit.Rows()) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can use the plan-cache - - tk.MustExec(`CREATE TABLE order_line ( - ol_o_id int(11) NOT NULL, - ol_d_id int(11) NOT NULL, - ol_w_id int(11) NOT NULL, - ol_number int(11) NOT NULL, - ol_i_id int(11) NOT NULL, - PRIMARY KEY (ol_w_id,ol_d_id,ol_o_id,ol_number))`) - tk.MustExec(`CREATE TABLE stock ( - s_i_id int(11) NOT NULL, - s_w_id int(11) NOT NULL, - s_quantity int(11) DEFAULT NULL, - PRIMARY KEY (s_w_id,s_i_id))`) - tk.MustExec(`prepare s1 from 'SELECT /*+ TIDB_INLJ(order_line,stock) */ COUNT(DISTINCT (s_i_id)) stock_count FROM order_line, stock WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id < ? AND ol_o_id >= ? - 20 AND s_w_id = ? AND s_i_id = ol_i_id AND s_quantity < ?'`) - tk.MustExec(`set @a=391,@b=1,@c=3058,@d=18`) - tk.MustExec(`execute s1 using @a,@b,@c,@c,@a,@d`) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Check(testkit.Rows( // can use index-join - `StreamAgg_9 1.00 root funcs:count(distinct test.stock.s_i_id)->Column#11`, - `└─IndexJoin_14 0.03 root inner join, inner:IndexLookUp_13, outer key:test.order_line.ol_i_id, inner key:test.stock.s_i_id, equal cond:eq(test.order_line.ol_i_id, test.stock.s_i_id)`, - ` ├─Selection_30(Build) 0.03 root eq(test.order_line.ol_d_id, 1), eq(test.order_line.ol_w_id, 391), ge(test.order_line.ol_o_id, 3038), lt(test.order_line.ol_o_id, 3058)`, - ` │ └─IndexLookUp_29 0.03 root `, - ` │ ├─IndexRangeScan_27(Build) 0.03 cop[tikv] table:order_line, index:PRIMARY(ol_w_id, ol_d_id, ol_o_id, ol_number) range:[391 1 3038,391 1 3058), keep order:false, stats:pseudo`, - ` │ └─TableRowIDScan_28(Probe) 0.03 cop[tikv] table:order_line keep order:false, stats:pseudo`, - ` └─IndexLookUp_13(Probe) 1.00 root `, - ` ├─IndexRangeScan_10(Build) 1.00 cop[tikv] table:stock, index:PRIMARY(s_w_id, s_i_id) range: decided by [eq(test.stock.s_i_id, test.order_line.ol_i_id) eq(test.stock.s_w_id, 391)], keep order:false, stats:pseudo`, - ` └─Selection_12(Probe) 1.00 cop[tikv] lt(test.stock.s_quantity, 18)`, - ` └─TableRowIDScan_11 1.00 cop[tikv] table:stock keep order:false, stats:pseudo`)) - tk.MustExec(`execute s1 using @a,@b,@c,@c,@a,@d`) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can use the plan-cache -} - -func TestIssue28087And28162(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - - // issue 28087 - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists IDT_26207`) - tk.MustExec(`CREATE TABLE IDT_26207 (col1 bit(1))`) - tk.MustExec(`insert into IDT_26207 values(0x0), (0x1)`) - tk.MustExec(`prepare stmt from 'select t1.col1 from IDT_26207 as t1 left join IDT_26207 as t2 on t1.col1 = t2.col1 where t1.col1 in (?, ?, ?)'`) - tk.MustExec(`set @a=0x01, @b=0x01, @c=0x01`) - tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x01")) - tk.MustExec(`set @a=0x00, @b=0x00, @c=0x01`) - tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x00", "\x01")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0")) - - // issue 28162 - tk.MustExec(`drop table if exists IDT_MC21780`) - tk.MustExec(`CREATE TABLE IDT_MC21780 ( - COL1 timestamp NULL DEFAULT NULL, - COL2 timestamp NULL DEFAULT NULL, - COL3 timestamp NULL DEFAULT NULL, - KEY U_M_COL (COL1,COL2) - )`) - tk.MustExec(`insert into IDT_MC21780 values("1970-12-18 10:53:28", "1970-12-18 10:53:28", "1970-12-18 10:53:28")`) - tk.MustExec(`prepare stmt from 'select/*+ hash_join(t1) */ * from IDT_MC21780 t1 join IDT_MC21780 t2 on t1.col1 = t2.col1 where t1. col1 < ? and t2. col1 in (?, ?, ?);'`) - tk.MustExec(`set @a="2038-01-19 03:14:07", @b="2038-01-19 03:14:07", @c="2038-01-19 03:14:07", @d="2038-01-19 03:14:07"`) - tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows()) - tk.MustExec(`set @a="1976-09-09 20:21:11", @b="2021-07-14 09:28:16", @c="1982-01-09 03:36:39", @d="1970-12-18 10:53:28"`) - tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows("1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) -} - -func TestParameterPushDown(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - require.NoError(t, err) - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists t`) - tk.MustExec(`create table t (a int, b int, c int, key(a))`) - tk.MustExec(`insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6)`) - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec(`set @x1=1,@x5=5,@x10=10,@x20=20`) - - var input []struct { - SQL string - } - var output []struct { - Result []string - Plan []string - FromCache string - } - prepareMergeSuiteData.GetTestCases(t, &input, &output) - - for i, tt := range input { - if strings.HasPrefix(tt.SQL, "execute") { - res := tk.MustQuery(tt.SQL).Sort() - fromCache := tk.MustQuery("select @@last_plan_from_cache") - tk.MustQuery(tt.SQL) - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - plan := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - - testdata.OnRecord(func() { - output[i].Result = testdata.ConvertRowsToStrings(res.Rows()) - output[i].Plan = testdata.ConvertRowsToStrings(plan.Rows()) - output[i].FromCache = fromCache.Rows()[0][0].(string) - }) - - res.Check(testkit.Rows(output[i].Result...)) - plan.Check(testkit.Rows(output[i].Plan...)) - require.Equal(t, fromCache.Rows()[0][0].(string), output[i].FromCache) - } else { - tk.MustExec(tt.SQL) - testdata.OnRecord(func() { - output[i].Result = nil - }) - } - } -} - -func TestPreparePlanCache4Function(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - - // Testing for non-deterministic functions - tk.MustExec("prepare stmt from 'select rand()';") - res := tk.MustQuery("execute stmt;") - require.Equal(t, 1, len(res.Rows())) - - res1 := tk.MustQuery("execute stmt;") - require.Equal(t, 1, len(res1.Rows())) - require.NotEqual(t, res.Rows()[0][0], res1.Rows()[0][0]) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - - // Testing for control functions - tk.MustExec("prepare stmt from 'SELECT IFNULL(?,0);';") - tk.MustExec("set @a = 1, @b = null;") - tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("0")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int);") - tk.MustExec("prepare stmt from 'select a, case when a = ? then 0 when a <=> ? then 1 else 2 end b from t order by a;';") - tk.MustExec("insert into t values(0), (1), (2), (null);") - tk.MustExec("set @a = 0, @b = 1, @c = 2, @d = null;") - tk.MustQuery("execute stmt using @a, @b;").Check(testkit.Rows(" 2", "0 0", "1 1", "2 2")) - tk.MustQuery("execute stmt using @c, @d;").Check(testkit.Rows(" 1", "0 2", "1 2", "2 0")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) -} - -func TestPreparePlanCache4DifferentSystemVars(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - - // Testing for 'sql_select_limit' - tk.MustExec("set @@sql_select_limit = 1") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int);") - tk.MustExec("insert into t values(0), (1), (null);") - tk.MustExec("prepare stmt from 'select a from t order by a;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows("")) - - tk.MustExec("set @@sql_select_limit = 2") - tk.MustQuery("execute stmt;").Check(testkit.Rows("", "0")) - // The 'sql_select_limit' will be stored in the cache key. So if the `sql_select_limit` - // have been changed, the plan cache can not be reused. - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - tk.MustExec("set @@sql_select_limit = 18446744073709551615") - tk.MustQuery("execute stmt;").Check(testkit.Rows("", "0", "1")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - // test for 'tidb_enable_index_merge' - tk.MustExec("set @@tidb_enable_index_merge = 1;") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int, b int, index idx_a(a), index idx_b(b));") - tk.MustExec("prepare stmt from 'select * from t use index(idx_a, idx_b) where a > 1 or b > 1;';") - tk.MustExec("execute stmt;") - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res := tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Equal(t, 4, len(res.Rows())) - require.Contains(t, res.Rows()[0][0], "IndexMerge") - - tk.MustExec("set @@tidb_enable_index_merge = 0;") - tk.MustExec("execute stmt;") - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Equal(t, 4, len(res.Rows())) - require.Contains(t, res.Rows()[0][0], "IndexMerge") - tk.MustExec("execute stmt;") - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - // test for 'tidb_enable_parallel_apply' - tk.MustExec("set @@tidb_enable_collect_execution_info=1;") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int)") - tk.MustExec("insert into t values (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (null, null)") - - tk.MustExec("set tidb_enable_parallel_apply=true") - tk.MustExec("prepare stmt from 'select t1.b from t t1 where t1.b > (select max(b) from t t2 where t1.a > t2.a);';") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Contains(t, res.Rows()[1][0], "Apply") - require.Contains(t, res.Rows()[1][5], "Concurrency") - - tk.MustExec("set tidb_enable_parallel_apply=false") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Contains(t, res.Rows()[1][0], "Apply") - executionInfo := fmt.Sprintf("%v", res.Rows()[1][4]) - // Do not use the parallel apply. - require.False(t, strings.Contains(executionInfo, "Concurrency")) - tk.MustExec("execute stmt;") - // The subquery plan can not be cached. - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - // test for apply cache - tk.MustExec("set @@tidb_enable_collect_execution_info=1;") - tk.MustExec("set tidb_mem_quota_apply_cache=33554432") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int)") - tk.MustExec("insert into t values (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (null, null)") - - tk.MustExec("prepare stmt from 'select t1.b from t t1 where t1.b > (select max(b) from t t2 where t1.a > t2.a);';") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Contains(t, res.Rows()[1][0], "Apply") - require.Contains(t, res.Rows()[1][5], "cache:ON") - - tk.MustExec("set tidb_mem_quota_apply_cache=0") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Contains(t, res.Rows()[1][0], "Apply") - executionInfo = fmt.Sprintf("%v", res.Rows()[1][5]) - // Do not use the apply cache. - require.True(t, strings.Contains(executionInfo, "cache:OFF")) - tk.MustExec("execute stmt;") - // The subquery plan can not be cached. - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) -} - -func TestTemporaryTable4PlanCache(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("drop table if exists tmp2") - tk.MustExec("create temporary table tmp2 (a int, b int, key(a), key(b));") - tk.MustExec("prepare stmt from 'select * from tmp2;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - tk.MustExec("drop table if exists tmp_t;") - tk.MustExec("create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows") - tk.MustExec("prepare stmt from 'select * from tmp_t;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - -} - -func TestPrepareStmtAfterIsolationReadChange(t *testing.T) { - if israce.RaceEnabled { - t.Skip("race test for this case takes too long time") - } - store, clean := testkit.CreateMockStore(t) - defer clean() - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(false) // requires plan cache disabled - tk := testkit.NewTestKit(t, store) - tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost", CurrentUser: true, AuthUsername: "root", AuthHostname: "%"}, nil, []byte("012345678901234567890")) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - // create virtual tiflash replica. - dom := domain.GetDomain(tk.Session()) - is := dom.InfoSchema() - db, exists := is.SchemaByName(model.NewCIStr("test")) - require.True(t, exists) - for _, tblInfo := range db.Tables { - if tblInfo.Name.L == "t" { - tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ - Count: 1, - Available: true, - } - } - } - - tk.MustExec("set @@session.tidb_isolation_read_engines='tikv'") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("prepare stmt from \"select * from t\"") - tk.MustQuery("execute stmt") - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() - require.Equal(t, "cop[tikv]", rows[len(rows)-1][2]) - - tk.MustExec("set @@session.tidb_isolation_read_engines='tiflash'") - tk.MustExec("execute stmt") - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() - require.Equal(t, rows[len(rows)-1][2], "cop[tiflash]") - - require.Equal(t, 1, len(tk.Session().GetSessionVars().PreparedStmts)) - require.Equal(t, "select * from `t`", tk.Session().GetSessionVars().PreparedStmts[1].(*plannercore.CachedPrepareStmt).NormalizedSQL) - require.Equal(t, "", tk.Session().GetSessionVars().PreparedStmts[1].(*plannercore.CachedPrepareStmt).NormalizedPlan) -} diff --git a/executor/prepared_test.go b/executor/prepared_test.go index 5908af0437848..e24178b9817b2 100644 --- a/executor/prepared_test.go +++ b/executor/prepared_test.go @@ -17,15 +17,23 @@ package executor_test import ( "crypto/tls" "fmt" + "strconv" + "strings" "sync/atomic" "testing" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/parser/auth" + "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" - txninfo "github.com/pingcap/tidb/session/txninfo" + "github.com/pingcap/tidb/session/txninfo" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testdata" "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/israce" "github.com/stretchr/testify/require" ) @@ -218,3 +226,1149 @@ func TestIssue29850(t *testing.T) { ` └─TableRangeScan_5 1.00 cop[tikv] table:t range:[1,1], keep order:false, stats:pseudo`)) tk.MustQuery(`execute stmt using @a1, @a2`).Check(testkit.Rows("1", "2")) } + +func TestIssue28064(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + tk.MustExec("use test") + tk.MustExec("drop table if exists t28064") + tk.MustExec("CREATE TABLE `t28064` (" + + "`a` decimal(10,0) DEFAULT NULL," + + "`b` decimal(10,0) DEFAULT NULL," + + "`c` decimal(10,0) DEFAULT NULL," + + "`d` decimal(10,0) DEFAULT NULL," + + "KEY `iabc` (`a`,`b`,`c`));") + tk.MustExec("set @a='123', @b='234', @c='345';") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("prepare stmt1 from 'select * from t28064 use index (iabc) where a = ? and b = ? and c = ?';") + + tk.MustExec("execute stmt1 using @a, @b, @c;") + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + rows.Check(testkit.Rows("Selection_8 0.00 root eq(test.t28064.a, 123), eq(test.t28064.b, 234), eq(test.t28064.c, 345)", + "└─IndexLookUp_7 0.00 root ", + " ├─IndexRangeScan_5(Build) 0.00 cop[tikv] table:t28064, index:iabc(a, b, c) range:[123 234 345,123 234 345], keep order:false, stats:pseudo", + " └─TableRowIDScan_6(Probe) 0.00 cop[tikv] table:t28064 keep order:false, stats:pseudo")) + + tk.MustExec("execute stmt1 using @a, @b, @c;") + rows = tk.MustQuery("select @@last_plan_from_cache") + rows.Check(testkit.Rows("1")) + + tk.MustExec("execute stmt1 using @a, @b, @c;") + rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + rows.Check(testkit.Rows("Selection_8 0.00 root eq(test.t28064.a, 123), eq(test.t28064.b, 234), eq(test.t28064.c, 345)", + "└─IndexLookUp_7 0.00 root ", + " ├─IndexRangeScan_5(Build) 0.00 cop[tikv] table:t28064, index:iabc(a, b, c) range:[123 234 345,123 234 345], keep order:false, stats:pseudo", + " └─TableRowIDScan_6(Probe) 0.00 cop[tikv] table:t28064 keep order:false, stats:pseudo")) +} + +func TestPreparePlanCache4Blacklist(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + + // test the blacklist of optimization rules + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int);") + tk.MustExec("prepare stmt from 'select min(a) from t;';") + tk.MustExec("execute stmt;") + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + require.Contains(t, res.Rows()[1][0], "TopN") + + res = tk.MustQuery("explain format = 'brief' select min(a) from t") + require.Contains(t, res.Rows()[1][0], "TopN") + + tk.MustExec("INSERT INTO mysql.opt_rule_blacklist VALUES('max_min_eliminate');") + tk.MustExec("ADMIN reload opt_rule_blacklist;") + + tk.MustExec("execute stmt;") + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + tk.MustExec("execute stmt;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + // Plans that have been cached will not be affected by the blacklist. + require.Contains(t, res.Rows()[1][0], "TopN") + + res = tk.MustQuery("explain format = 'brief' select min(a) from t") + require.Contains(t, res.Rows()[0][0], "StreamAgg") + + // test the blacklist of Expression Pushdown + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int);") + tk.MustExec("prepare stmt from 'SELECT * FROM t WHERE a < 2 and a > 2;';") + tk.MustExec("execute stmt;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + require.Equal(t, 3, len(res.Rows())) + require.Contains(t, res.Rows()[1][0], "Selection") + require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) + + res = tk.MustQuery("explain format = 'brief' SELECT * FROM t WHERE a < 2 and a > 2;") + require.Equal(t, 3, len(res.Rows())) + require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) + + tk.MustExec("INSERT INTO mysql.expr_pushdown_blacklist VALUES('<','tikv','');") + tk.MustExec("ADMIN reload expr_pushdown_blacklist;") + + tk.MustExec("execute stmt;") + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + tk.MustExec("execute stmt;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + // The expressions can still be pushed down to tikv. + require.Equal(t, 3, len(res.Rows())) + require.Contains(t, res.Rows()[1][0], "Selection") + require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) + + res = tk.MustQuery("explain format = 'brief' SELECT * FROM t WHERE a < 2 and a > 2;") + require.Equal(t, 4, len(res.Rows())) + require.Contains(t, res.Rows()[0][0], "Selection") + require.Equal(t, "lt(test.t.a, 2)", res.Rows()[0][4]) + require.Contains(t, res.Rows()[2][0], "Selection") + require.Equal(t, "gt(test.t.a, 2)", res.Rows()[2][4]) + + tk.MustExec("DELETE FROM mysql.expr_pushdown_blacklist;") + tk.MustExec("ADMIN reload expr_pushdown_blacklist;") +} + +func TestPlanCacheClusterIndex(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("create table t1(a varchar(20), b varchar(20), c varchar(20), primary key(a, b))") + tk.MustExec("insert into t1 values('1','1','111'),('2','2','222'),('3','3','333')") + + // For table scan + tk.MustExec(`prepare stmt1 from "select * from t1 where t1.a = ? and t1.b > ?"`) + tk.MustExec("set @v1 = '1'") + tk.MustExec("set @v2 = '0'") + tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("1 1 111")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + tk.MustExec("set @v1 = '2'") + tk.MustExec("set @v2 = '1'") + tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("2 2 222")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + tk.MustExec("set @v1 = '3'") + tk.MustExec("set @v2 = '2'") + tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("3 3 333")) + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.Equal(t, 0, strings.Index(rows[len(rows)-1][4].(string), `range:("3" "2","3" +inf]`)) + // For point get + tk.MustExec(`prepare stmt2 from "select * from t1 where t1.a = ? and t1.b = ?"`) + tk.MustExec("set @v1 = '1'") + tk.MustExec("set @v2 = '1'") + tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("1 1 111")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + tk.MustExec("set @v1 = '2'") + tk.MustExec("set @v2 = '2'") + tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("2 2 222")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + tk.MustExec("set @v1 = '3'") + tk.MustExec("set @v2 = '3'") + tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("3 3 333")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.Equal(t, 0, strings.Index(rows[len(rows)-1][0].(string), `Point_Get`)) + // For CBO point get and batch point get + // case 1: + tk.MustExec(`drop table if exists ta, tb`) + tk.MustExec(`create table ta (a varchar(8) primary key, b int)`) + tk.MustExec(`insert ta values ('a', 1), ('b', 2)`) + tk.MustExec(`create table tb (a varchar(8) primary key, b int)`) + tk.MustExec(`insert tb values ('a', 1), ('b', 2)`) + tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.a = tb.a and ta.a = ?"`) + tk.MustExec(`set @v1 = 'a', @v2 = 'b'`) + tk.MustQuery(`execute stmt1 using @v1`).Check(testkit.Rows("a 1 a 1")) + tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 b 2")) + + // case 2: + tk.MustExec(`drop table if exists ta, tb`) + tk.MustExec(`create table ta (a varchar(10) primary key, b int not null)`) + tk.MustExec(`insert ta values ('a', 1), ('b', 2)`) + tk.MustExec(`create table tb (b int primary key, c int)`) + tk.MustExec(`insert tb values (1, 1), (2, 2)`) + tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.b = tb.b and ta.a = ?"`) + tk.MustExec(`set @v1 = 'a', @v2 = 'b'`) + tk.MustQuery(`execute stmt1 using @v1`).Check(testkit.Rows("a 1 1 1")) + tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 2 2")) + tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 2 2")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.True(t, strings.Contains(rows[3][0].(string), `TableRangeScan`)) + + // case 3: + tk.MustExec(`drop table if exists ta, tb`) + tk.MustExec(`create table ta (a varchar(10), b varchar(10), c int, primary key (a, b))`) + tk.MustExec(`insert ta values ('a', 'a', 1), ('b', 'b', 2), ('c', 'c', 3)`) + tk.MustExec(`create table tb (b int primary key, c int)`) + tk.MustExec(`insert tb values (1, 1), (2, 2), (3,3)`) + tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.c = tb.b and ta.a = ? and ta.b = ?"`) + tk.MustExec(`set @v1 = 'a', @v2 = 'b', @v3 = 'c'`) + tk.MustQuery(`execute stmt1 using @v1, @v1`).Check(testkit.Rows("a a 1 1 1")) + tk.MustQuery(`execute stmt1 using @v2, @v2`).Check(testkit.Rows("b b 2 2 2")) + tk.MustExec(`prepare stmt2 from "select * from ta, tb where ta.c = tb.b and (ta.a, ta.b) in ((?, ?), (?, ?))"`) + tk.MustQuery(`execute stmt2 using @v1, @v1, @v2, @v2`).Check(testkit.Rows("a a 1 1 1", "b b 2 2 2")) + tk.MustQuery(`execute stmt2 using @v2, @v2, @v3, @v3`).Check(testkit.Rows("b b 2 2 2", "c c 3 3 3")) + + // For issue 19002 + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.MustExec(`drop table if exists t1`) + tk.MustExec(`create table t1(a int, b int, c int, primary key(a, b))`) + tk.MustExec(`insert into t1 values(1,1,111),(2,2,222),(3,3,333)`) + // Point Get: + tk.MustExec(`prepare stmt1 from "select * from t1 where t1.a = ? and t1.b = ?"`) + tk.MustExec(`set @v1=1, @v2=1`) + tk.MustQuery(`execute stmt1 using @v1,@v2`).Check(testkit.Rows("1 1 111")) + tk.MustExec(`set @v1=2, @v2=2`) + tk.MustQuery(`execute stmt1 using @v1,@v2`).Check(testkit.Rows("2 2 222")) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) + // Batch Point Get: + tk.MustExec(`prepare stmt2 from "select * from t1 where (t1.a,t1.b) in ((?,?),(?,?))"`) + tk.MustExec(`set @v1=1, @v2=1, @v3=2, @v4=2`) + tk.MustQuery(`execute stmt2 using @v1,@v2,@v3,@v4`).Check(testkit.Rows("1 1 111", "2 2 222")) + tk.MustExec(`set @v1=2, @v2=2, @v3=3, @v4=3`) + tk.MustQuery(`execute stmt2 using @v1,@v2,@v3,@v4`).Check(testkit.Rows("2 2 222", "3 3 333")) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) +} + +func TestPlanCacheWithDifferentVariableTypes(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + require.NoError(t, err) + + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("create table t1(a varchar(20), b int, c float, key(b, a))") + tk.MustExec("insert into t1 values('1',1,1.1),('2',2,222),('3',3,333)") + tk.MustExec("create table t2(a varchar(20), b int, c float, key(b, a))") + tk.MustExec("insert into t2 values('3',3,3.3),('2',2,222),('3',3,333)") + + var input []struct { + PrepareStmt string + Executes []struct { + Vars []struct { + Name string + Value string + } + ExecuteSQL string + } + } + var output []struct { + PrepareStmt string + Executes []struct { + SQL string + Vars []struct { + Name string + Value string + } + Plan []string + LastPlanUseCache string + Result []string + } + } + prepareMergeSuiteData.GetTestCases(t, &input, &output) + for i, tt := range input { + tk.MustExec(tt.PrepareStmt) + testdata.OnRecord(func() { + output[i].PrepareStmt = tt.PrepareStmt + output[i].Executes = make([]struct { + SQL string + Vars []struct { + Name string + Value string + } + Plan []string + LastPlanUseCache string + Result []string + }, len(tt.Executes)) + }) + require.Equal(t, tt.PrepareStmt, output[i].PrepareStmt) + for j, exec := range tt.Executes { + for _, v := range exec.Vars { + tk.MustExec(fmt.Sprintf(`set @%s = %s`, v.Name, v.Value)) + } + res := tk.MustQuery(exec.ExecuteSQL) + lastPlanUseCache := tk.MustQuery("select @@last_plan_from_cache").Rows()[0][0] + tk.MustQuery(exec.ExecuteSQL) + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + plan := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + testdata.OnRecord(func() { + output[i].Executes[j].SQL = exec.ExecuteSQL + output[i].Executes[j].Plan = testdata.ConvertRowsToStrings(plan.Rows()) + output[i].Executes[j].Vars = exec.Vars + output[i].Executes[j].LastPlanUseCache = lastPlanUseCache.(string) + output[i].Executes[j].Result = testdata.ConvertRowsToStrings(res.Rows()) + }) + + require.Equal(t, exec.ExecuteSQL, output[i].Executes[j].SQL) + plan.Check(testkit.Rows(output[i].Executes[j].Plan...)) + require.Equal(t, exec.Vars, output[i].Executes[j].Vars) + require.Equal(t, lastPlanUseCache.(string), output[i].Executes[j].LastPlanUseCache) + res.Check(testkit.Rows(output[i].Executes[j].Result...)) + } + } +} + +func TestPlanCacheOperators(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + type ExecCase struct { + Parameters []string + UseCache bool + } + type PrepCase struct { + PrepStmt string + ExecCases []ExecCase + } + + cases := []PrepCase{ + {"use test", nil}, + + // cases for TableReader on PK + {"create table t (a int, b int, primary key(a))", nil}, + {"insert into t values (1,1), (2,2), (3,3), (4,4), (5,5), (6,null)", nil}, + {"select a from t where a=?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, true}, + {[]string{"3"}, true}, + }}, + {"select a from t where a in (?,?,?)", []ExecCase{ + {[]string{"1", "1", "1"}, false}, + {[]string{"2", "3", "4"}, true}, + {[]string{"3", "5", "7"}, true}, + }}, + {"select a from t where a>? and a? and a? and a? and a? and a?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ HASH_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ HASH_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ MERGE_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ MERGE_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b? and t1.a > (select min(t2.a) from t t2 where t2.b < t1.b)", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, false}, // plans with sub-queries cannot be cached, but the result must be correct + {[]string{"5"}, false}, + }}, + {"select * from t t1 where t1.a > (select min(t2.a) from t t2 where t2.b < t1.b+?)", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, false}, + {[]string{"5"}, false}, + }}, + {"select * from t t1 where t1.b>? and t1.a > (select min(t2.a) from t t2 where t2.b < t1.b+?)", []ExecCase{ + {[]string{"1", "1"}, false}, + {[]string{"3", "2"}, false}, + {[]string{"5", "3"}, false}, + }}, + {"drop table t", nil}, + + // cases for Window + {"create table t (name varchar(50), y int, sale decimal(14,2))", nil}, + {"insert into t values ('Bob',2016,2.4), ('Bob',2017,3.2), ('Bob',2018,2.1), ('Alice',2016,1.4), ('Alice',2017,2), ('Alice',2018,3.3), ('John',2016,4), ('John',2017,2.1), ('John',2018,5)", nil}, + {"select *, sum(sale) over (partition by y order by sale) total from t where sale>? order by y", []ExecCase{ + {[]string{"0.1"}, false}, + {[]string{"0.5"}, true}, + {[]string{"1.5"}, true}, + {[]string{"3.5"}, true}, + }}, + {"select *, sum(sale) over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ + {[]string{"0.1"}, false}, + {[]string{"0.5"}, true}, + {[]string{"1.5"}, true}, + {[]string{"3.5"}, true}, + }}, + {"select *, rank() over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ + {[]string{"0.1"}, false}, + {[]string{"0.5"}, true}, + {[]string{"1.5"}, true}, + {[]string{"3.5"}, true}, + }}, + {"select *, first_value(sale) over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ + {[]string{"0.1"}, false}, + {[]string{"0.5"}, true}, + {[]string{"1.5"}, true}, + {[]string{"3.5"}, true}, + }}, + {"select *, first_value(sale) over (partition by y order by sale rows ? preceding) total from t order by y", []ExecCase{ + {[]string{"1"}, false}, // window plans with parameters in frame cannot be cached + {[]string{"2"}, false}, + {[]string{"3"}, false}, + {[]string{"4"}, false}, + }}, + {"drop table t", nil}, + + // cases for Limit + {"create table t (a int)", nil}, + {"insert into t values (1), (1), (2), (2), (3), (4), (5), (6), (7), (8), (9), (0), (0)", nil}, + {"select * from t limit ?", []ExecCase{ + {[]string{"20"}, false}, + {[]string{"30"}, false}, + }}, + {"select * from t limit 40, ?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, false}, + }}, + {"select * from t limit ?, 10", []ExecCase{ + {[]string{"20"}, false}, + {[]string{"30"}, false}, + }}, + {"select * from t limit ?, ?", []ExecCase{ + {[]string{"20", "20"}, false}, + {[]string{"20", "40"}, false}, + }}, + {"select * from t where a? order by mod(a, 3)", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, true}, + {[]string{"3"}, true}, + }}, + + // cases for topN + {"select * from t order by b limit ?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, false}, + }}, + {"select * from t order by b limit 10, ?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, false}, + }}, + {"select * from t order by ? limit 10", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, false}, + }}, + {"select * from t order by ? limit ?", []ExecCase{ + {[]string{"1", "10"}, false}, + {[]string{"2", "20"}, false}, + }}, + } + + for _, prepCase := range cases { + isQuery := strings.Contains(prepCase.PrepStmt, "select") + if !isQuery { + tk.MustExec(prepCase.PrepStmt) + continue + } + + tk.MustExec(fmt.Sprintf(`prepare stmt from '%v'`, prepCase.PrepStmt)) + for _, execCase := range prepCase.ExecCases { + // set all parameters + usingStmt := "" + if len(execCase.Parameters) > 0 { + setStmt := "set " + usingStmt = "using " + for i, parameter := range execCase.Parameters { + if i > 0 { + setStmt += ", " + usingStmt += ", " + } + setStmt += fmt.Sprintf("@x%v=%v", i, parameter) + usingStmt += fmt.Sprintf("@x%v", i) + } + tk.MustExec(setStmt) + } + + // execute this statement and check whether it uses a cached plan + results := tk.MustQuery("execute stmt " + usingStmt).Sort().Rows() + + // check whether the result is correct + tmp := strings.Split(prepCase.PrepStmt, "?") + require.Equal(t, len(execCase.Parameters)+1, len(tmp)) + query := "" + for i := range tmp { + query += tmp[i] + if i < len(execCase.Parameters) { + query += execCase.Parameters[i] + } + } + tk.MustQuery(query).Sort().Check(results) + } + } +} + +func TestIssue28782(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("prepare stmt from 'SELECT IF(?, 1, 0);';") + tk.MustExec("set @a=1, @b=null, @c=0") + + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) + tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("0")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + tk.MustQuery("execute stmt using @c;").Check(testkit.Rows("0")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) +} + +func TestIssue29101(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + + tk.MustExec(`use test`) + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec(`CREATE TABLE customer ( + c_id int(11) NOT NULL, + c_d_id int(11) NOT NULL, + c_w_id int(11) NOT NULL, + c_first varchar(16) DEFAULT NULL, + c_last varchar(16) DEFAULT NULL, + c_credit char(2) DEFAULT NULL, + c_discount decimal(4,4) DEFAULT NULL, + PRIMARY KEY (c_w_id,c_d_id,c_id), + KEY idx_customer (c_w_id,c_d_id,c_last,c_first) + )`) + tk.MustExec(`CREATE TABLE warehouse ( + w_id int(11) NOT NULL, + w_tax decimal(4,4) DEFAULT NULL, + PRIMARY KEY (w_id) + )`) + tk.MustExec(`prepare s1 from 'SELECT /*+ TIDB_INLJ(customer,warehouse) */ c_discount, c_last, c_credit, w_tax FROM customer, warehouse WHERE w_id = ? AND c_w_id = w_id AND c_d_id = ? AND c_id = ?'`) + tk.MustExec(`set @a=936,@b=7,@c=158`) + tk.MustQuery(`execute s1 using @a,@b,@c`).Check(testkit.Rows()) + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Check(testkit.Rows( // can use IndexJoin + `Projection_6 1.00 root test.customer.c_discount, test.customer.c_last, test.customer.c_credit, test.warehouse.w_tax`, + `└─IndexJoin_14 1.00 root inner join, inner:TableReader_10, outer key:test.customer.c_w_id, inner key:test.warehouse.w_id, equal cond:eq(test.customer.c_w_id, test.warehouse.w_id)`, + ` ├─Point_Get_33(Build) 1.00 root table:customer, index:PRIMARY(c_w_id, c_d_id, c_id) `, + ` └─TableReader_10(Probe) 0.00 root data:Selection_9`, + ` └─Selection_9 0.00 cop[tikv] eq(test.warehouse.w_id, 936)`, + ` └─TableRangeScan_8 1.00 cop[tikv] table:warehouse range: decided by [test.customer.c_w_id], keep order:false, stats:pseudo`)) + tk.MustQuery(`execute s1 using @a,@b,@c`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can use the plan-cache + + tk.MustExec(`CREATE TABLE order_line ( + ol_o_id int(11) NOT NULL, + ol_d_id int(11) NOT NULL, + ol_w_id int(11) NOT NULL, + ol_number int(11) NOT NULL, + ol_i_id int(11) NOT NULL, + PRIMARY KEY (ol_w_id,ol_d_id,ol_o_id,ol_number))`) + tk.MustExec(`CREATE TABLE stock ( + s_i_id int(11) NOT NULL, + s_w_id int(11) NOT NULL, + s_quantity int(11) DEFAULT NULL, + PRIMARY KEY (s_w_id,s_i_id))`) + tk.MustExec(`prepare s1 from 'SELECT /*+ TIDB_INLJ(order_line,stock) */ COUNT(DISTINCT (s_i_id)) stock_count FROM order_line, stock WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id < ? AND ol_o_id >= ? - 20 AND s_w_id = ? AND s_i_id = ol_i_id AND s_quantity < ?'`) + tk.MustExec(`set @a=391,@b=1,@c=3058,@d=18`) + tk.MustExec(`execute s1 using @a,@b,@c,@c,@a,@d`) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Check(testkit.Rows( // can use index-join + `StreamAgg_9 1.00 root funcs:count(distinct test.stock.s_i_id)->Column#11`, + `└─IndexJoin_14 0.03 root inner join, inner:IndexLookUp_13, outer key:test.order_line.ol_i_id, inner key:test.stock.s_i_id, equal cond:eq(test.order_line.ol_i_id, test.stock.s_i_id)`, + ` ├─Selection_30(Build) 0.03 root eq(test.order_line.ol_d_id, 1), eq(test.order_line.ol_w_id, 391), ge(test.order_line.ol_o_id, 3038), lt(test.order_line.ol_o_id, 3058)`, + ` │ └─IndexLookUp_29 0.03 root `, + ` │ ├─IndexRangeScan_27(Build) 0.03 cop[tikv] table:order_line, index:PRIMARY(ol_w_id, ol_d_id, ol_o_id, ol_number) range:[391 1 3038,391 1 3058), keep order:false, stats:pseudo`, + ` │ └─TableRowIDScan_28(Probe) 0.03 cop[tikv] table:order_line keep order:false, stats:pseudo`, + ` └─IndexLookUp_13(Probe) 1.00 root `, + ` ├─IndexRangeScan_10(Build) 1.00 cop[tikv] table:stock, index:PRIMARY(s_w_id, s_i_id) range: decided by [eq(test.stock.s_i_id, test.order_line.ol_i_id) eq(test.stock.s_w_id, 391)], keep order:false, stats:pseudo`, + ` └─Selection_12(Probe) 1.00 cop[tikv] lt(test.stock.s_quantity, 18)`, + ` └─TableRowIDScan_11 1.00 cop[tikv] table:stock keep order:false, stats:pseudo`)) + tk.MustExec(`execute s1 using @a,@b,@c,@c,@a,@d`) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can use the plan-cache +} + +func TestIssue28087And28162(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + + // issue 28087 + tk.MustExec(`use test`) + tk.MustExec(`drop table if exists IDT_26207`) + tk.MustExec(`CREATE TABLE IDT_26207 (col1 bit(1))`) + tk.MustExec(`insert into IDT_26207 values(0x0), (0x1)`) + tk.MustExec(`prepare stmt from 'select t1.col1 from IDT_26207 as t1 left join IDT_26207 as t2 on t1.col1 = t2.col1 where t1.col1 in (?, ?, ?)'`) + tk.MustExec(`set @a=0x01, @b=0x01, @c=0x01`) + tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x01")) + tk.MustExec(`set @a=0x00, @b=0x00, @c=0x01`) + tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x00", "\x01")) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0")) + + // issue 28162 + tk.MustExec(`drop table if exists IDT_MC21780`) + tk.MustExec(`CREATE TABLE IDT_MC21780 ( + COL1 timestamp NULL DEFAULT NULL, + COL2 timestamp NULL DEFAULT NULL, + COL3 timestamp NULL DEFAULT NULL, + KEY U_M_COL (COL1,COL2) + )`) + tk.MustExec(`insert into IDT_MC21780 values("1970-12-18 10:53:28", "1970-12-18 10:53:28", "1970-12-18 10:53:28")`) + tk.MustExec(`prepare stmt from 'select/*+ hash_join(t1) */ * from IDT_MC21780 t1 join IDT_MC21780 t2 on t1.col1 = t2.col1 where t1. col1 < ? and t2. col1 in (?, ?, ?);'`) + tk.MustExec(`set @a="2038-01-19 03:14:07", @b="2038-01-19 03:14:07", @c="2038-01-19 03:14:07", @d="2038-01-19 03:14:07"`) + tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows()) + tk.MustExec(`set @a="1976-09-09 20:21:11", @b="2021-07-14 09:28:16", @c="1982-01-09 03:36:39", @d="1970-12-18 10:53:28"`) + tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows("1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28")) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) +} + +func TestParameterPushDown(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + require.NoError(t, err) + tk.MustExec(`use test`) + tk.MustExec(`drop table if exists t`) + tk.MustExec(`create table t (a int, b int, c int, key(a))`) + tk.MustExec(`insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6)`) + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec(`set @x1=1,@x5=5,@x10=10,@x20=20`) + + var input []struct { + SQL string + } + var output []struct { + Result []string + Plan []string + FromCache string + } + prepareMergeSuiteData.GetTestCases(t, &input, &output) + + for i, tt := range input { + if strings.HasPrefix(tt.SQL, "execute") { + res := tk.MustQuery(tt.SQL).Sort() + fromCache := tk.MustQuery("select @@last_plan_from_cache") + tk.MustQuery(tt.SQL) + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + plan := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + + testdata.OnRecord(func() { + output[i].Result = testdata.ConvertRowsToStrings(res.Rows()) + output[i].Plan = testdata.ConvertRowsToStrings(plan.Rows()) + output[i].FromCache = fromCache.Rows()[0][0].(string) + }) + + res.Check(testkit.Rows(output[i].Result...)) + plan.Check(testkit.Rows(output[i].Plan...)) + require.Equal(t, fromCache.Rows()[0][0].(string), output[i].FromCache) + } else { + tk.MustExec(tt.SQL) + testdata.OnRecord(func() { + output[i].Result = nil + }) + } + } +} + +func TestPreparePlanCache4Function(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + + // Testing for non-deterministic functions + tk.MustExec("prepare stmt from 'select rand()';") + res := tk.MustQuery("execute stmt;") + require.Equal(t, 1, len(res.Rows())) + + res1 := tk.MustQuery("execute stmt;") + require.Equal(t, 1, len(res1.Rows())) + require.NotEqual(t, res.Rows()[0][0], res1.Rows()[0][0]) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + // Testing for control functions + tk.MustExec("prepare stmt from 'SELECT IFNULL(?,0);';") + tk.MustExec("set @a = 1, @b = null;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) + tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("0")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int);") + tk.MustExec("prepare stmt from 'select a, case when a = ? then 0 when a <=> ? then 1 else 2 end b from t order by a;';") + tk.MustExec("insert into t values(0), (1), (2), (null);") + tk.MustExec("set @a = 0, @b = 1, @c = 2, @d = null;") + tk.MustQuery("execute stmt using @a, @b;").Check(testkit.Rows(" 2", "0 0", "1 1", "2 2")) + tk.MustQuery("execute stmt using @c, @d;").Check(testkit.Rows(" 1", "0 2", "1 2", "2 0")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) +} + +func TestPreparePlanCache4DifferentSystemVars(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + + // Testing for 'sql_select_limit' + tk.MustExec("set @@sql_select_limit = 1") + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int);") + tk.MustExec("insert into t values(0), (1), (null);") + tk.MustExec("prepare stmt from 'select a from t order by a;';") + tk.MustQuery("execute stmt;").Check(testkit.Rows("")) + + tk.MustExec("set @@sql_select_limit = 2") + tk.MustQuery("execute stmt;").Check(testkit.Rows("", "0")) + // The 'sql_select_limit' will be stored in the cache key. So if the `sql_select_limit` + // have been changed, the plan cache can not be reused. + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + + tk.MustExec("set @@sql_select_limit = 18446744073709551615") + tk.MustQuery("execute stmt;").Check(testkit.Rows("", "0", "1")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + + // test for 'tidb_enable_index_merge' + tk.MustExec("set @@tidb_enable_index_merge = 1;") + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int, b int, index idx_a(a), index idx_b(b));") + tk.MustExec("prepare stmt from 'select * from t use index(idx_a, idx_b) where a > 1 or b > 1;';") + tk.MustExec("execute stmt;") + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res := tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Equal(t, 4, len(res.Rows())) + require.Contains(t, res.Rows()[0][0], "IndexMerge") + + tk.MustExec("set @@tidb_enable_index_merge = 0;") + tk.MustExec("execute stmt;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Equal(t, 4, len(res.Rows())) + require.Contains(t, res.Rows()[0][0], "IndexMerge") + tk.MustExec("execute stmt;") + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + + // test for 'tidb_enable_parallel_apply' + tk.MustExec("set @@tidb_enable_collect_execution_info=1;") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int, b int)") + tk.MustExec("insert into t values (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (null, null)") + + tk.MustExec("set tidb_enable_parallel_apply=true") + tk.MustExec("prepare stmt from 'select t1.b from t t1 where t1.b > (select max(b) from t t2 where t1.a > t2.a);';") + tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Contains(t, res.Rows()[1][0], "Apply") + require.Contains(t, res.Rows()[1][5], "Concurrency") + + tk.MustExec("set tidb_enable_parallel_apply=false") + tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Contains(t, res.Rows()[1][0], "Apply") + executionInfo := fmt.Sprintf("%v", res.Rows()[1][4]) + // Do not use the parallel apply. + require.False(t, strings.Contains(executionInfo, "Concurrency")) + tk.MustExec("execute stmt;") + // The subquery plan can not be cached. + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + + // test for apply cache + tk.MustExec("set @@tidb_enable_collect_execution_info=1;") + tk.MustExec("set tidb_mem_quota_apply_cache=33554432") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int, b int)") + tk.MustExec("insert into t values (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (null, null)") + + tk.MustExec("prepare stmt from 'select t1.b from t t1 where t1.b > (select max(b) from t t2 where t1.a > t2.a);';") + tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Contains(t, res.Rows()[1][0], "Apply") + require.Contains(t, res.Rows()[1][5], "cache:ON") + + tk.MustExec("set tidb_mem_quota_apply_cache=0") + tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Contains(t, res.Rows()[1][0], "Apply") + executionInfo = fmt.Sprintf("%v", res.Rows()[1][5]) + // Do not use the apply cache. + require.True(t, strings.Contains(executionInfo, "cache:OFF")) + tk.MustExec("execute stmt;") + // The subquery plan can not be cached. + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) +} + +func TestTemporaryTable4PlanCache(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("drop table if exists tmp2") + tk.MustExec("create temporary table tmp2 (a int, b int, key(a), key(b));") + tk.MustExec("prepare stmt from 'select * from tmp2;';") + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + + tk.MustExec("drop table if exists tmp_t;") + tk.MustExec("create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows") + tk.MustExec("prepare stmt from 'select * from tmp_t;';") + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + +} + +func TestPrepareStmtAfterIsolationReadChange(t *testing.T) { + if israce.RaceEnabled { + t.Skip("race test for this case takes too long time") + } + store, clean := testkit.CreateMockStore(t) + defer clean() + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(false) // requires plan cache disabled + tk := testkit.NewTestKit(t, store) + tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost", CurrentUser: true, AuthUsername: "root", AuthHostname: "%"}, nil, []byte("012345678901234567890")) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + // create virtual tiflash replica. + dom := domain.GetDomain(tk.Session()) + is := dom.InfoSchema() + db, exists := is.SchemaByName(model.NewCIStr("test")) + require.True(t, exists) + for _, tblInfo := range db.Tables { + if tblInfo.Name.L == "t" { + tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ + Count: 1, + Available: true, + } + } + } + + tk.MustExec("set @@session.tidb_isolation_read_engines='tikv'") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("prepare stmt from \"select * from t\"") + tk.MustQuery("execute stmt") + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.Equal(t, "cop[tikv]", rows[len(rows)-1][2]) + + tk.MustExec("set @@session.tidb_isolation_read_engines='tiflash'") + tk.MustExec("execute stmt") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.Equal(t, rows[len(rows)-1][2], "cop[tiflash]") + + require.Equal(t, 1, len(tk.Session().GetSessionVars().PreparedStmts)) + require.Equal(t, "select * from `t`", tk.Session().GetSessionVars().PreparedStmts[1].(*plannercore.CachedPrepareStmt).NormalizedSQL) + require.Equal(t, "", tk.Session().GetSessionVars().PreparedStmts[1].(*plannercore.CachedPrepareStmt).NormalizedPlan) +} diff --git a/executor/seqtest/prepared_serial_test.go b/executor/seqtest/prepared_test.go similarity index 100% rename from executor/seqtest/prepared_serial_test.go rename to executor/seqtest/prepared_test.go diff --git a/executor/seqtest/seq_executor_serial_test.go b/executor/seqtest/seq_executor_test.go similarity index 100% rename from executor/seqtest/seq_executor_serial_test.go rename to executor/seqtest/seq_executor_test.go diff --git a/executor/show_stats_serial_test.go b/executor/show_stats_serial_test.go deleted file mode 100644 index c2f7e7ca828ee..0000000000000 --- a/executor/show_stats_serial_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "testing" - - "github.com/pingcap/tidb/statistics" - "github.com/pingcap/tidb/testkit" - "github.com/stretchr/testify/require" -) - -func TestShowAnalyzeStatus(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - statistics.ClearHistoryJobs() - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int, primary key(a), index idx(b))") - tk.MustExec(`insert into t values (1, 1), (2, 2)`) - - tk.MustExec("set @@tidb_analyze_version=2") - tk.MustExec("analyze table t") - result := tk.MustQuery("show analyze status").Sort() - require.Len(t, result.Rows(), 1) - require.Equal(t, "test", result.Rows()[0][0]) - require.Equal(t, "t", result.Rows()[0][1]) - require.Equal(t, "", result.Rows()[0][2]) - require.Equal(t, "analyze table", result.Rows()[0][3]) - require.Equal(t, "2", result.Rows()[0][4]) - require.NotNil(t, result.Rows()[0][5]) - require.NotNil(t, result.Rows()[0][6]) - require.Equal(t, "finished", result.Rows()[0][7]) - - statistics.ClearHistoryJobs() - - tk.MustExec("set @@tidb_analyze_version=1") - tk.MustExec("analyze table t") - result = tk.MustQuery("show analyze status").Sort() - require.Len(t, result.Rows(), 2) - require.Equal(t, "test", result.Rows()[0][0]) - require.Equal(t, "t", result.Rows()[0][1]) - require.Equal(t, "", result.Rows()[0][2]) - require.Equal(t, "analyze columns", result.Rows()[0][3]) - require.Equal(t, "2", result.Rows()[0][4]) - require.NotNil(t, result.Rows()[0][5]) - require.NotNil(t, result.Rows()[0][6]) - require.Equal(t, "finished", result.Rows()[0][7]) - - require.Len(t, result.Rows(), 2) - require.Equal(t, "test", result.Rows()[1][0]) - require.Equal(t, "t", result.Rows()[1][1]) - require.Equal(t, "", result.Rows()[1][2]) - require.Equal(t, "analyze index idx", result.Rows()[1][3]) - require.Equal(t, "2", result.Rows()[1][4]) - require.NotNil(t, result.Rows()[1][5]) - require.NotNil(t, result.Rows()[1][6]) - require.Equal(t, "finished", result.Rows()[1][7]) -} diff --git a/executor/show_stats_test.go b/executor/show_stats_test.go index 1f2ffafd8c54d..f48fe9988649d 100644 --- a/executor/show_stats_test.go +++ b/executor/show_stats_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" ) @@ -354,3 +355,53 @@ func TestShowHistogramsInFlight(t *testing.T) { require.Equal(t, len(rows), 1) require.Equal(t, rows[0][0], "0") } + +func TestShowAnalyzeStatus(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + statistics.ClearHistoryJobs() + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int, b int, primary key(a), index idx(b))") + tk.MustExec(`insert into t values (1, 1), (2, 2)`) + + tk.MustExec("set @@tidb_analyze_version=2") + tk.MustExec("analyze table t") + result := tk.MustQuery("show analyze status").Sort() + require.Len(t, result.Rows(), 1) + require.Equal(t, "test", result.Rows()[0][0]) + require.Equal(t, "t", result.Rows()[0][1]) + require.Equal(t, "", result.Rows()[0][2]) + require.Equal(t, "analyze table", result.Rows()[0][3]) + require.Equal(t, "2", result.Rows()[0][4]) + require.NotNil(t, result.Rows()[0][5]) + require.NotNil(t, result.Rows()[0][6]) + require.Equal(t, "finished", result.Rows()[0][7]) + + statistics.ClearHistoryJobs() + + tk.MustExec("set @@tidb_analyze_version=1") + tk.MustExec("analyze table t") + result = tk.MustQuery("show analyze status").Sort() + require.Len(t, result.Rows(), 2) + require.Equal(t, "test", result.Rows()[0][0]) + require.Equal(t, "t", result.Rows()[0][1]) + require.Equal(t, "", result.Rows()[0][2]) + require.Equal(t, "analyze columns", result.Rows()[0][3]) + require.Equal(t, "2", result.Rows()[0][4]) + require.NotNil(t, result.Rows()[0][5]) + require.NotNil(t, result.Rows()[0][6]) + require.Equal(t, "finished", result.Rows()[0][7]) + + require.Len(t, result.Rows(), 2) + require.Equal(t, "test", result.Rows()[1][0]) + require.Equal(t, "t", result.Rows()[1][1]) + require.Equal(t, "", result.Rows()[1][2]) + require.Equal(t, "analyze index idx", result.Rows()[1][3]) + require.Equal(t, "2", result.Rows()[1][4]) + require.NotNil(t, result.Rows()[1][5]) + require.NotNil(t, result.Rows()[1][6]) + require.Equal(t, "finished", result.Rows()[1][7]) +} diff --git a/executor/slow_query_test.go b/executor/slow_query_test.go index 4fdd9281c5142..9828263402ac1 100644 --- a/executor/slow_query_test.go +++ b/executor/slow_query_test.go @@ -452,7 +452,7 @@ select 7;` sctx.GetSessionVars().TimeZone = loc sctx.GetSessionVars().SlowQueryFile = fileName3 for i, cas := range cases { - extractor := &plannercore.SlowQueryExtractor{Enable: (len(cas.startTime) > 0 && len(cas.endTime) > 0)} + extractor := &plannercore.SlowQueryExtractor{Enable: len(cas.startTime) > 0 && len(cas.endTime) > 0} if extractor.Enable { startTime, err := ParseTime(cas.startTime) c.Assert(err, IsNil) @@ -622,7 +622,7 @@ select 9;` sctx.GetSessionVars().TimeZone = loc sctx.GetSessionVars().SlowQueryFile = fileName3 for i, cas := range cases { - extractor := &plannercore.SlowQueryExtractor{Enable: (len(cas.startTime) > 0 && len(cas.endTime) > 0), Desc: true} + extractor := &plannercore.SlowQueryExtractor{Enable: len(cas.startTime) > 0 && len(cas.endTime) > 0, Desc: true} if extractor.Enable { startTime, err := ParseTime(cas.startTime) c.Assert(err, IsNil) diff --git a/executor/temporary_table_serial_test.go b/executor/temporary_table_test.go similarity index 100% rename from executor/temporary_table_serial_test.go rename to executor/temporary_table_test.go diff --git a/executor/write_serial_test.go b/executor/write_serial_test.go deleted file mode 100644 index 440ecbeb177e2..0000000000000 --- a/executor/write_serial_test.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "testing" - - "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/planner/core" - "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/collate" - "github.com/stretchr/testify/require" -) - -func TestUpdate(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - fillData(tk, "update_test") - - updateStr := `UPDATE update_test SET name = "abc" where id > 0;` - tk.MustExec(updateStr) - tk.CheckExecResult(2, 0) - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 2 Changed: 2 Warnings: 0") - - // select data - tk.MustExec("begin") - r := tk.MustQuery(`SELECT * from update_test limit 2;`) - r.Check(testkit.Rows("1 abc", "2 abc")) - tk.MustExec("commit") - - tk.MustExec(`UPDATE update_test SET name = "foo"`) - tk.CheckExecResult(2, 0) - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 2 Changed: 2 Warnings: 0") - - // table option is auto-increment - tk.MustExec("begin") - tk.MustExec("drop table if exists update_test;") - tk.MustExec("commit") - tk.MustExec("begin") - tk.MustExec("create table update_test(id int not null auto_increment, name varchar(255), primary key(id))") - tk.MustExec("insert into update_test(name) values ('aa')") - tk.MustExec("update update_test set id = 8 where name = 'aa'") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - tk.MustExec("insert into update_test(name) values ('bb')") - tk.MustExec("commit") - tk.MustExec("begin") - r = tk.MustQuery("select * from update_test;") - r.Check(testkit.Rows("8 aa", "9 bb")) - tk.MustExec("commit") - - tk.MustExec("begin") - tk.MustExec("drop table if exists update_test;") - tk.MustExec("commit") - tk.MustExec("begin") - tk.MustExec("create table update_test(id int not null auto_increment, name varchar(255), index(id))") - tk.MustExec("insert into update_test(name) values ('aa')") - _, err := tk.Exec("update update_test set id = null where name = 'aa'") - require.EqualError(t, err, "[table:1048]Column 'id' cannot be null") - - tk.MustExec("drop table update_test") - tk.MustExec("create table update_test(id int)") - tk.MustExec("begin") - tk.MustExec("insert into update_test(id) values (1)") - tk.MustExec("update update_test set id = 2 where id = 1 limit 1") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - r = tk.MustQuery("select * from update_test;") - r.Check(testkit.Rows("2")) - tk.MustExec("commit") - - // Test that in a transaction, when a constraint failed in an update statement, the record is not inserted. - tk.MustExec("create table update_unique (id int primary key, name int unique)") - tk.MustExec("insert update_unique values (1, 1), (2, 2);") - tk.MustExec("begin") - _, err = tk.Exec("update update_unique set name = 1 where id = 2") - require.Error(t, err) - tk.MustExec("commit") - tk.MustQuery("select * from update_unique").Check(testkit.Rows("1 1", "2 2")) - - // test update ignore for pimary key - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a bigint, primary key (a));") - tk.MustExec("insert into t values (1)") - tk.MustExec("insert into t values (2)") - _, err = tk.Exec("update ignore t set a = 1 where a = 2;") - require.NoError(t, err) - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 1") - r = tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'PRIMARY'")) - tk.MustQuery("select * from t").Check(testkit.Rows("1", "2")) - - // test update ignore for truncate as warning - _, err = tk.Exec("update ignore t set a = 1 where a = (select '2a')") - require.NoError(t, err) - r = tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1062 Duplicate entry '1' for key 'PRIMARY'")) - - tk.MustExec("update ignore t set a = 42 where a = 2;") - tk.MustQuery("select * from t").Check(testkit.Rows("1", "42")) - - // test update ignore for unique key - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a bigint, unique key I_uniq (a));") - tk.MustExec("insert into t values (1)") - tk.MustExec("insert into t values (2)") - _, err = tk.Exec("update ignore t set a = 1 where a = 2;") - require.NoError(t, err) - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 1") - r = tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'I_uniq'")) - tk.MustQuery("select * from t").Check(testkit.Rows("1", "2")) - - // test issue21965 - tk.MustExec("drop table if exists t;") - tk.MustExec("set @@session.tidb_enable_list_partition = ON") - tk.MustExec("create table t (a int) partition by list (a) (partition p0 values in (0,1));") - tk.MustExec("insert ignore into t values (1);") - tk.MustExec("update ignore t set a=2 where a=1;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 0") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t (a int key) partition by list (a) (partition p0 values in (0,1));") - tk.MustExec("insert ignore into t values (1);") - tk.MustExec("update ignore t set a=2 where a=1;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 0") - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(id integer auto_increment, t1 datetime, t2 datetime, primary key (id))") - tk.MustExec("insert into t(t1, t2) values('2000-10-01 01:01:01', '2017-01-01 10:10:10')") - tk.MustQuery("select * from t").Check(testkit.Rows("1 2000-10-01 01:01:01 2017-01-01 10:10:10")) - tk.MustExec("update t set t1 = '2017-10-01 10:10:11', t2 = date_add(t1, INTERVAL 10 MINUTE) where id = 1") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - tk.MustQuery("select * from t").Check(testkit.Rows("1 2017-10-01 10:10:11 2000-10-01 01:11:01")) - - // for issue #5132 - tk.MustExec("CREATE TABLE `tt1` (" + - "`a` int(11) NOT NULL," + - "`b` varchar(32) DEFAULT NULL," + - "`c` varchar(32) DEFAULT NULL," + - "PRIMARY KEY (`a`)," + - "UNIQUE KEY `b_idx` (`b`)" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;") - tk.MustExec("insert into tt1 values(1, 'a', 'a');") - tk.MustExec("insert into tt1 values(2, 'd', 'b');") - r = tk.MustQuery("select * from tt1;") - r.Check(testkit.Rows("1 a a", "2 d b")) - tk.MustExec("update tt1 set a=5 where c='b';") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - r = tk.MustQuery("select * from tt1;") - r.Check(testkit.Rows("1 a a", "5 d b")) - - // Automatic Updating for TIMESTAMP - tk.MustExec("CREATE TABLE `tsup` (" + - "`a` int," + - "`ts` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP," + - "KEY `idx` (`ts`)" + - ");") - tk.MustExec("set @orig_sql_mode=@@sql_mode; set @@sql_mode='';") - tk.MustExec("insert into tsup values(1, '0000-00-00 00:00:00');") - tk.MustExec("update tsup set a=5;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - r1 := tk.MustQuery("select ts from tsup use index (idx);") - r2 := tk.MustQuery("select ts from tsup;") - r1.Check(r2.Rows()) - tk.MustExec("update tsup set ts='2019-01-01';") - tk.MustQuery("select ts from tsup;").Check(testkit.Rows("2019-01-01 00:00:00")) - tk.MustExec("set @@sql_mode=@orig_sql_mode;") - - // issue 5532 - tk.MustExec("create table decimals (a decimal(20, 0) not null)") - tk.MustExec("insert into decimals values (201)") - // A warning rather than data truncated error. - tk.MustExec("update decimals set a = a + 1.23;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 1") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect DECIMAL value: '202.23'")) - r = tk.MustQuery("select * from decimals") - r.Check(testkit.Rows("202")) - - tk.MustExec("drop table t") - tk.MustExec("CREATE TABLE `t` ( `c1` year DEFAULT NULL, `c2` year DEFAULT NULL, `c3` date DEFAULT NULL, `c4` datetime DEFAULT NULL, KEY `idx` (`c1`,`c2`))") - _, err = tk.Exec("UPDATE t SET c2=16777215 WHERE c1>= -8388608 AND c1 < -9 ORDER BY c1 LIMIT 2") - require.NoError(t, err) - - tk.MustGetErrCode("update (select * from t) t set c1 = 1111111", mysql.ErrNonUpdatableTable) - - // test update ignore for bad null error - tk.MustExec("drop table if exists t;") - tk.MustExec(`create table t (i int not null default 10)`) - tk.MustExec("insert into t values (1)") - tk.MustExec("update ignore t set i = null;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 1") - r = tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1048 Column 'i' cannot be null")) - tk.MustQuery("select * from t").Check(testkit.Rows("0")) - - // issue 7237, update subquery table should be forbidden - tk.MustExec("drop table t") - tk.MustExec("create table t (k int, v int)") - _, err = tk.Exec("update t, (select * from t) as b set b.k = t.k") - require.EqualError(t, err, "[planner:1288]The target table b of the UPDATE is not updatable") - tk.MustExec("update t, (select * from t) as b set t.k = b.k") - - // issue 8045 - tk.MustExec("drop table if exists t1") - tk.MustExec(`CREATE TABLE t1 (c1 float)`) - tk.MustExec("INSERT INTO t1 SET c1 = 1") - tk.MustExec("UPDATE t1 SET c1 = 1.2 WHERE c1=1;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - - // issue 8119 - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t (c1 float(1,1));") - tk.MustExec("insert into t values (0.0);") - _, err = tk.Exec("update t set c1 = 2.0;") - require.True(t, types.ErrWarnDataOutOfRange.Equal(err)) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a datetime not null, b datetime)") - tk.MustExec("insert into t value('1999-12-12', '1999-12-13')") - tk.MustExec("set @orig_sql_mode=@@sql_mode; set @@sql_mode='';") - tk.MustQuery("select * from t").Check(testkit.Rows("1999-12-12 00:00:00 1999-12-13 00:00:00")) - tk.MustExec("update t set a = ''") - tk.MustQuery("select * from t").Check(testkit.Rows("0000-00-00 00:00:00 1999-12-13 00:00:00")) - tk.MustExec("update t set b = ''") - tk.MustQuery("select * from t").Check(testkit.Rows("0000-00-00 00:00:00 0000-00-00 00:00:00")) - tk.MustExec("set @@sql_mode=@orig_sql_mode;") - - tk.MustExec("create view v as select * from t") - _, err = tk.Exec("update v set a = '2000-11-11'") - require.EqualError(t, err, core.ErrViewInvalid.GenWithStackByArgs("test", "v").Error()) - tk.MustExec("drop view v") - - tk.MustExec("create sequence seq") - tk.MustGetErrCode("update seq set minvalue=1", mysql.ErrBadField) - tk.MustExec("drop sequence seq") - - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int, b int, c int, d int, e int, index idx(a))") - tk.MustExec("create table t2(a int, b int, c int)") - tk.MustExec("update t1 join t2 on t1.a=t2.a set t1.a=1 where t2.b=1 and t2.c=2") - - // Assign `DEFAULT` in `UPDATE` statement - tk.MustExec("drop table if exists t1, t2;") - tk.MustExec("create table t1 (a int default 1, b int default 2);") - tk.MustExec("insert into t1 values (10, 10), (20, 20);") - tk.MustExec("update t1 set a=default where b=10;") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "20 20")) - tk.MustExec("update t1 set a=30, b=default where a=20;") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "30 2")) - tk.MustExec("update t1 set a=default, b=default where a=30;") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "1 2")) - tk.MustExec("insert into t1 values (40, 40)") - tk.MustExec("update t1 set a=default, b=default") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 2", "1 2", "1 2")) - tk.MustExec("update t1 set a=default(b), b=default(a)") - tk.MustQuery("select * from t1;").Check(testkit.Rows("2 1", "2 1", "2 1")) - // With generated columns - tk.MustExec("create table t2 (a int default 1, b int generated always as (-a) virtual, c int generated always as (-a) stored);") - tk.MustExec("insert into t2 values (10, default, default), (20, default, default)") - tk.MustExec("update t2 set b=default;") - tk.MustQuery("select * from t2;").Check(testkit.Rows("10 -10 -10", "20 -20 -20")) - tk.MustExec("update t2 set a=30, b=default where a=10;") - tk.MustQuery("select * from t2;").Check(testkit.Rows("30 -30 -30", "20 -20 -20")) - tk.MustExec("update t2 set c=default, a=40 where c=-20;") - tk.MustQuery("select * from t2;").Check(testkit.Rows("30 -30 -30", "40 -40 -40")) - tk.MustExec("update t2 set a=default, b=default, c=default where b=-30;") - tk.MustQuery("select * from t2;").Check(testkit.Rows("1 -1 -1", "40 -40 -40")) - tk.MustExec("update t2 set a=default(a), b=default, c=default;") - tk.MustQuery("select * from t2;").Check(testkit.Rows("1 -1 -1", "1 -1 -1")) - tk.MustGetErrCode("update t2 set b=default(a);", mysql.ErrBadGeneratedColumn) - tk.MustGetErrCode("update t2 set a=default(b), b=default(b);", mysql.ErrBadGeneratedColumn) - tk.MustGetErrCode("update t2 set a=default(a), c=default(c);", mysql.ErrBadGeneratedColumn) - tk.MustGetErrCode("update t2 set a=default(a), c=default(a);", mysql.ErrBadGeneratedColumn) - tk.MustExec("drop table t1, t2") -} - -func TestListColumnsPartitionWithGlobalIndex(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_enable_list_partition = ON") - // Test generated column with global index - restoreConfig := config.RestoreFunc() - defer restoreConfig() - config.UpdateGlobal(func(conf *config.Config) { - conf.EnableGlobalIndex = true - }) - tableDefs := []string{ - // Test for virtual generated column with global index - `create table t (a varchar(10), b varchar(1) GENERATED ALWAYS AS (substr(a,1,1)) VIRTUAL) partition by list columns(b) (partition p0 values in ('a','c'), partition p1 values in ('b','d'));`, - // Test for stored generated column with global index - `create table t (a varchar(10), b varchar(1) GENERATED ALWAYS AS (substr(a,1,1)) STORED) partition by list columns(b) (partition p0 values in ('a','c'), partition p1 values in ('b','d'));`, - } - for _, tbl := range tableDefs { - tk.MustExec("drop table if exists t") - tk.MustExec(tbl) - tk.MustExec("alter table t add unique index (a)") - tk.MustExec("insert into t (a) values ('aaa'),('abc'),('acd')") - tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("aaa", "abc", "acd")) - tk.MustQuery("select * from t where a = 'abc' order by a").Check(testkit.Rows("abc a")) - tk.MustExec("update t set a='bbb' where a = 'aaa'") - tk.MustExec("admin check table t") - tk.MustQuery("select a from t order by a").Check(testkit.Rows("abc", "acd", "bbb")) - // TODO: fix below test. - //tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("abc", "acd")) - //tk.MustQuery("select a from t partition (p1) order by a").Check(testkit.Rows("bbb")) - tk.MustQuery("select * from t where a = 'bbb' order by a").Check(testkit.Rows("bbb b")) - // Test insert meet duplicate error. - _, err := tk.Exec("insert into t (a) values ('abc')") - require.Error(t, err) - // Test insert on duplicate update - tk.MustExec("insert into t (a) values ('abc') on duplicate key update a='bbc'") - tk.MustQuery("select a from t order by a").Check(testkit.Rows("acd", "bbb", "bbc")) - tk.MustQuery("select * from t where a = 'bbc'").Check(testkit.Rows("bbc b")) - // TODO: fix below test. - //tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("acd")) - //tk.MustQuery("select a from t partition (p1) order by a").Check(testkit.Rows("bbb", "bbc")) - } -} - -func TestIssue20724(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1(a varchar(10) collate utf8mb4_general_ci)") - tk.MustExec("insert into t1 values ('a')") - tk.MustExec("update t1 set a = 'A'") - tk.MustQuery("select * from t1").Check(testkit.Rows("A")) - tk.MustExec("drop table t1") -} - -func TestIssue20840(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly - tk.MustExec("create table t1 (i varchar(20) unique key) collate=utf8mb4_general_ci") - tk.MustExec("insert into t1 values ('a')") - tk.MustExec("replace into t1 values ('A')") - tk.MustQuery("select * from t1").Check(testkit.Rows("A")) - tk.MustExec("drop table t1") -} - -func TestIssueInsertPrefixIndexForNonUTF8Collation(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2, t3") - tk.MustExec("create table t1 ( c_int int, c_str varchar(40) character set ascii collate ascii_bin, primary key(c_int, c_str(8)) clustered , unique key(c_str))") - tk.MustExec("create table t2 ( c_int int, c_str varchar(40) character set latin1 collate latin1_bin, primary key(c_int, c_str(8)) clustered , unique key(c_str))") - tk.MustExec("insert into t1 values (3, 'fervent brattain')") - tk.MustExec("insert into t2 values (3, 'fervent brattain')") - tk.MustExec("admin check table t1") - tk.MustExec("admin check table t2") - - tk.MustExec("create table t3 (x varchar(40) CHARACTER SET ascii COLLATE ascii_bin, UNIQUE KEY uk(x(4)))") - tk.MustExec("insert into t3 select 'abc '") - tk.MustGetErrCode("insert into t3 select 'abc d'", 1062) -} diff --git a/executor/write_test.go b/executor/write_test.go index 04609968d486a..11e402f446631 100644 --- a/executor/write_test.go +++ b/executor/write_test.go @@ -21,6 +21,7 @@ import ( "strconv" "testing" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -30,12 +31,14 @@ import ( "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/testutil" "github.com/stretchr/testify/require" @@ -3878,3 +3881,364 @@ func testEqualDatumsAsBinary(t *testing.T, a []interface{}, b []interface{}, sam require.NoError(t, err) require.Equal(t, same, res, "a: %v, b: %v", a, b) } + +func TestUpdate(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + fillData(tk, "update_test") + + updateStr := `UPDATE update_test SET name = "abc" where id > 0;` + tk.MustExec(updateStr) + tk.CheckExecResult(2, 0) + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 2 Changed: 2 Warnings: 0") + + // select data + tk.MustExec("begin") + r := tk.MustQuery(`SELECT * from update_test limit 2;`) + r.Check(testkit.Rows("1 abc", "2 abc")) + tk.MustExec("commit") + + tk.MustExec(`UPDATE update_test SET name = "foo"`) + tk.CheckExecResult(2, 0) + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 2 Changed: 2 Warnings: 0") + + // table option is auto-increment + tk.MustExec("begin") + tk.MustExec("drop table if exists update_test;") + tk.MustExec("commit") + tk.MustExec("begin") + tk.MustExec("create table update_test(id int not null auto_increment, name varchar(255), primary key(id))") + tk.MustExec("insert into update_test(name) values ('aa')") + tk.MustExec("update update_test set id = 8 where name = 'aa'") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + tk.MustExec("insert into update_test(name) values ('bb')") + tk.MustExec("commit") + tk.MustExec("begin") + r = tk.MustQuery("select * from update_test;") + r.Check(testkit.Rows("8 aa", "9 bb")) + tk.MustExec("commit") + + tk.MustExec("begin") + tk.MustExec("drop table if exists update_test;") + tk.MustExec("commit") + tk.MustExec("begin") + tk.MustExec("create table update_test(id int not null auto_increment, name varchar(255), index(id))") + tk.MustExec("insert into update_test(name) values ('aa')") + _, err := tk.Exec("update update_test set id = null where name = 'aa'") + require.EqualError(t, err, "[table:1048]Column 'id' cannot be null") + + tk.MustExec("drop table update_test") + tk.MustExec("create table update_test(id int)") + tk.MustExec("begin") + tk.MustExec("insert into update_test(id) values (1)") + tk.MustExec("update update_test set id = 2 where id = 1 limit 1") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + r = tk.MustQuery("select * from update_test;") + r.Check(testkit.Rows("2")) + tk.MustExec("commit") + + // Test that in a transaction, when a constraint failed in an update statement, the record is not inserted. + tk.MustExec("create table update_unique (id int primary key, name int unique)") + tk.MustExec("insert update_unique values (1, 1), (2, 2);") + tk.MustExec("begin") + _, err = tk.Exec("update update_unique set name = 1 where id = 2") + require.Error(t, err) + tk.MustExec("commit") + tk.MustQuery("select * from update_unique").Check(testkit.Rows("1 1", "2 2")) + + // test update ignore for pimary key + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a bigint, primary key (a));") + tk.MustExec("insert into t values (1)") + tk.MustExec("insert into t values (2)") + _, err = tk.Exec("update ignore t set a = 1 where a = 2;") + require.NoError(t, err) + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 1") + r = tk.MustQuery("SHOW WARNINGS;") + r.Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'PRIMARY'")) + tk.MustQuery("select * from t").Check(testkit.Rows("1", "2")) + + // test update ignore for truncate as warning + _, err = tk.Exec("update ignore t set a = 1 where a = (select '2a')") + require.NoError(t, err) + r = tk.MustQuery("SHOW WARNINGS;") + r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1062 Duplicate entry '1' for key 'PRIMARY'")) + + tk.MustExec("update ignore t set a = 42 where a = 2;") + tk.MustQuery("select * from t").Check(testkit.Rows("1", "42")) + + // test update ignore for unique key + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a bigint, unique key I_uniq (a));") + tk.MustExec("insert into t values (1)") + tk.MustExec("insert into t values (2)") + _, err = tk.Exec("update ignore t set a = 1 where a = 2;") + require.NoError(t, err) + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 1") + r = tk.MustQuery("SHOW WARNINGS;") + r.Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'I_uniq'")) + tk.MustQuery("select * from t").Check(testkit.Rows("1", "2")) + + // test issue21965 + tk.MustExec("drop table if exists t;") + tk.MustExec("set @@session.tidb_enable_list_partition = ON") + tk.MustExec("create table t (a int) partition by list (a) (partition p0 values in (0,1));") + tk.MustExec("insert ignore into t values (1);") + tk.MustExec("update ignore t set a=2 where a=1;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 0") + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int key) partition by list (a) (partition p0 values in (0,1));") + tk.MustExec("insert ignore into t values (1);") + tk.MustExec("update ignore t set a=2 where a=1;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 0") + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(id integer auto_increment, t1 datetime, t2 datetime, primary key (id))") + tk.MustExec("insert into t(t1, t2) values('2000-10-01 01:01:01', '2017-01-01 10:10:10')") + tk.MustQuery("select * from t").Check(testkit.Rows("1 2000-10-01 01:01:01 2017-01-01 10:10:10")) + tk.MustExec("update t set t1 = '2017-10-01 10:10:11', t2 = date_add(t1, INTERVAL 10 MINUTE) where id = 1") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + tk.MustQuery("select * from t").Check(testkit.Rows("1 2017-10-01 10:10:11 2000-10-01 01:11:01")) + + // for issue #5132 + tk.MustExec("CREATE TABLE `tt1` (" + + "`a` int(11) NOT NULL," + + "`b` varchar(32) DEFAULT NULL," + + "`c` varchar(32) DEFAULT NULL," + + "PRIMARY KEY (`a`)," + + "UNIQUE KEY `b_idx` (`b`)" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;") + tk.MustExec("insert into tt1 values(1, 'a', 'a');") + tk.MustExec("insert into tt1 values(2, 'd', 'b');") + r = tk.MustQuery("select * from tt1;") + r.Check(testkit.Rows("1 a a", "2 d b")) + tk.MustExec("update tt1 set a=5 where c='b';") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + r = tk.MustQuery("select * from tt1;") + r.Check(testkit.Rows("1 a a", "5 d b")) + + // Automatic Updating for TIMESTAMP + tk.MustExec("CREATE TABLE `tsup` (" + + "`a` int," + + "`ts` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP," + + "KEY `idx` (`ts`)" + + ");") + tk.MustExec("set @orig_sql_mode=@@sql_mode; set @@sql_mode='';") + tk.MustExec("insert into tsup values(1, '0000-00-00 00:00:00');") + tk.MustExec("update tsup set a=5;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + r1 := tk.MustQuery("select ts from tsup use index (idx);") + r2 := tk.MustQuery("select ts from tsup;") + r1.Check(r2.Rows()) + tk.MustExec("update tsup set ts='2019-01-01';") + tk.MustQuery("select ts from tsup;").Check(testkit.Rows("2019-01-01 00:00:00")) + tk.MustExec("set @@sql_mode=@orig_sql_mode;") + + // issue 5532 + tk.MustExec("create table decimals (a decimal(20, 0) not null)") + tk.MustExec("insert into decimals values (201)") + // A warning rather than data truncated error. + tk.MustExec("update decimals set a = a + 1.23;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 1") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect DECIMAL value: '202.23'")) + r = tk.MustQuery("select * from decimals") + r.Check(testkit.Rows("202")) + + tk.MustExec("drop table t") + tk.MustExec("CREATE TABLE `t` ( `c1` year DEFAULT NULL, `c2` year DEFAULT NULL, `c3` date DEFAULT NULL, `c4` datetime DEFAULT NULL, KEY `idx` (`c1`,`c2`))") + _, err = tk.Exec("UPDATE t SET c2=16777215 WHERE c1>= -8388608 AND c1 < -9 ORDER BY c1 LIMIT 2") + require.NoError(t, err) + + tk.MustGetErrCode("update (select * from t) t set c1 = 1111111", mysql.ErrNonUpdatableTable) + + // test update ignore for bad null error + tk.MustExec("drop table if exists t;") + tk.MustExec(`create table t (i int not null default 10)`) + tk.MustExec("insert into t values (1)") + tk.MustExec("update ignore t set i = null;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 1") + r = tk.MustQuery("SHOW WARNINGS;") + r.Check(testkit.Rows("Warning 1048 Column 'i' cannot be null")) + tk.MustQuery("select * from t").Check(testkit.Rows("0")) + + // issue 7237, update subquery table should be forbidden + tk.MustExec("drop table t") + tk.MustExec("create table t (k int, v int)") + _, err = tk.Exec("update t, (select * from t) as b set b.k = t.k") + require.EqualError(t, err, "[planner:1288]The target table b of the UPDATE is not updatable") + tk.MustExec("update t, (select * from t) as b set t.k = b.k") + + // issue 8045 + tk.MustExec("drop table if exists t1") + tk.MustExec(`CREATE TABLE t1 (c1 float)`) + tk.MustExec("INSERT INTO t1 SET c1 = 1") + tk.MustExec("UPDATE t1 SET c1 = 1.2 WHERE c1=1;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + + // issue 8119 + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (c1 float(1,1));") + tk.MustExec("insert into t values (0.0);") + _, err = tk.Exec("update t set c1 = 2.0;") + require.True(t, types.ErrWarnDataOutOfRange.Equal(err)) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a datetime not null, b datetime)") + tk.MustExec("insert into t value('1999-12-12', '1999-12-13')") + tk.MustExec("set @orig_sql_mode=@@sql_mode; set @@sql_mode='';") + tk.MustQuery("select * from t").Check(testkit.Rows("1999-12-12 00:00:00 1999-12-13 00:00:00")) + tk.MustExec("update t set a = ''") + tk.MustQuery("select * from t").Check(testkit.Rows("0000-00-00 00:00:00 1999-12-13 00:00:00")) + tk.MustExec("update t set b = ''") + tk.MustQuery("select * from t").Check(testkit.Rows("0000-00-00 00:00:00 0000-00-00 00:00:00")) + tk.MustExec("set @@sql_mode=@orig_sql_mode;") + + tk.MustExec("create view v as select * from t") + _, err = tk.Exec("update v set a = '2000-11-11'") + require.EqualError(t, err, core.ErrViewInvalid.GenWithStackByArgs("test", "v").Error()) + tk.MustExec("drop view v") + + tk.MustExec("create sequence seq") + tk.MustGetErrCode("update seq set minvalue=1", mysql.ErrBadField) + tk.MustExec("drop sequence seq") + + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1(a int, b int, c int, d int, e int, index idx(a))") + tk.MustExec("create table t2(a int, b int, c int)") + tk.MustExec("update t1 join t2 on t1.a=t2.a set t1.a=1 where t2.b=1 and t2.c=2") + + // Assign `DEFAULT` in `UPDATE` statement + tk.MustExec("drop table if exists t1, t2;") + tk.MustExec("create table t1 (a int default 1, b int default 2);") + tk.MustExec("insert into t1 values (10, 10), (20, 20);") + tk.MustExec("update t1 set a=default where b=10;") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "20 20")) + tk.MustExec("update t1 set a=30, b=default where a=20;") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "30 2")) + tk.MustExec("update t1 set a=default, b=default where a=30;") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "1 2")) + tk.MustExec("insert into t1 values (40, 40)") + tk.MustExec("update t1 set a=default, b=default") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 2", "1 2", "1 2")) + tk.MustExec("update t1 set a=default(b), b=default(a)") + tk.MustQuery("select * from t1;").Check(testkit.Rows("2 1", "2 1", "2 1")) + // With generated columns + tk.MustExec("create table t2 (a int default 1, b int generated always as (-a) virtual, c int generated always as (-a) stored);") + tk.MustExec("insert into t2 values (10, default, default), (20, default, default)") + tk.MustExec("update t2 set b=default;") + tk.MustQuery("select * from t2;").Check(testkit.Rows("10 -10 -10", "20 -20 -20")) + tk.MustExec("update t2 set a=30, b=default where a=10;") + tk.MustQuery("select * from t2;").Check(testkit.Rows("30 -30 -30", "20 -20 -20")) + tk.MustExec("update t2 set c=default, a=40 where c=-20;") + tk.MustQuery("select * from t2;").Check(testkit.Rows("30 -30 -30", "40 -40 -40")) + tk.MustExec("update t2 set a=default, b=default, c=default where b=-30;") + tk.MustQuery("select * from t2;").Check(testkit.Rows("1 -1 -1", "40 -40 -40")) + tk.MustExec("update t2 set a=default(a), b=default, c=default;") + tk.MustQuery("select * from t2;").Check(testkit.Rows("1 -1 -1", "1 -1 -1")) + tk.MustGetErrCode("update t2 set b=default(a);", mysql.ErrBadGeneratedColumn) + tk.MustGetErrCode("update t2 set a=default(b), b=default(b);", mysql.ErrBadGeneratedColumn) + tk.MustGetErrCode("update t2 set a=default(a), c=default(c);", mysql.ErrBadGeneratedColumn) + tk.MustGetErrCode("update t2 set a=default(a), c=default(a);", mysql.ErrBadGeneratedColumn) + tk.MustExec("drop table t1, t2") +} + +func TestListColumnsPartitionWithGlobalIndex(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@session.tidb_enable_list_partition = ON") + // Test generated column with global index + restoreConfig := config.RestoreFunc() + defer restoreConfig() + config.UpdateGlobal(func(conf *config.Config) { + conf.EnableGlobalIndex = true + }) + tableDefs := []string{ + // Test for virtual generated column with global index + `create table t (a varchar(10), b varchar(1) GENERATED ALWAYS AS (substr(a,1,1)) VIRTUAL) partition by list columns(b) (partition p0 values in ('a','c'), partition p1 values in ('b','d'));`, + // Test for stored generated column with global index + `create table t (a varchar(10), b varchar(1) GENERATED ALWAYS AS (substr(a,1,1)) STORED) partition by list columns(b) (partition p0 values in ('a','c'), partition p1 values in ('b','d'));`, + } + for _, tbl := range tableDefs { + tk.MustExec("drop table if exists t") + tk.MustExec(tbl) + tk.MustExec("alter table t add unique index (a)") + tk.MustExec("insert into t (a) values ('aaa'),('abc'),('acd')") + tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("aaa", "abc", "acd")) + tk.MustQuery("select * from t where a = 'abc' order by a").Check(testkit.Rows("abc a")) + tk.MustExec("update t set a='bbb' where a = 'aaa'") + tk.MustExec("admin check table t") + tk.MustQuery("select a from t order by a").Check(testkit.Rows("abc", "acd", "bbb")) + // TODO: fix below test. + //tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("abc", "acd")) + //tk.MustQuery("select a from t partition (p1) order by a").Check(testkit.Rows("bbb")) + tk.MustQuery("select * from t where a = 'bbb' order by a").Check(testkit.Rows("bbb b")) + // Test insert meet duplicate error. + _, err := tk.Exec("insert into t (a) values ('abc')") + require.Error(t, err) + // Test insert on duplicate update + tk.MustExec("insert into t (a) values ('abc') on duplicate key update a='bbc'") + tk.MustQuery("select a from t order by a").Check(testkit.Rows("acd", "bbb", "bbc")) + tk.MustQuery("select * from t where a = 'bbc'").Check(testkit.Rows("bbc b")) + // TODO: fix below test. + //tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("acd")) + //tk.MustQuery("select a from t partition (p1) order by a").Check(testkit.Rows("bbb", "bbc")) + } +} + +func TestIssue20724(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1(a varchar(10) collate utf8mb4_general_ci)") + tk.MustExec("insert into t1 values ('a')") + tk.MustExec("update t1 set a = 'A'") + tk.MustQuery("select * from t1").Check(testkit.Rows("A")) + tk.MustExec("drop table t1") +} + +func TestIssue20840(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly + tk.MustExec("create table t1 (i varchar(20) unique key) collate=utf8mb4_general_ci") + tk.MustExec("insert into t1 values ('a')") + tk.MustExec("replace into t1 values ('A')") + tk.MustQuery("select * from t1").Check(testkit.Rows("A")) + tk.MustExec("drop table t1") +} + +func TestIssueInsertPrefixIndexForNonUTF8Collation(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2, t3") + tk.MustExec("create table t1 ( c_int int, c_str varchar(40) character set ascii collate ascii_bin, primary key(c_int, c_str(8)) clustered , unique key(c_str))") + tk.MustExec("create table t2 ( c_int int, c_str varchar(40) character set latin1 collate latin1_bin, primary key(c_int, c_str(8)) clustered , unique key(c_str))") + tk.MustExec("insert into t1 values (3, 'fervent brattain')") + tk.MustExec("insert into t2 values (3, 'fervent brattain')") + tk.MustExec("admin check table t1") + tk.MustExec("admin check table t2") + + tk.MustExec("create table t3 (x varchar(40) CHARACTER SET ascii COLLATE ascii_bin, UNIQUE KEY uk(x(4)))") + tk.MustExec("insert into t3 select 'abc '") + tk.MustGetErrCode("insert into t3 select 'abc d'", 1062) +} diff --git a/metrics/grafana/tidb.json b/metrics/grafana/tidb.json index 518112d04abcc..f6606d18ef4c6 100644 --- a/metrics/grafana/tidb.json +++ b/metrics/grafana/tidb.json @@ -6657,7 +6657,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_distsql_copr_cache_count{tidb_cluster=\"$tidb_cluster\"}[1m])) by (type)", + "expr": "sum(rate(tidb_distsql_copr_cache_sum{tidb_cluster=\"$tidb_cluster\"}[1m])) by (type)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{type}}", diff --git a/planner/core/cache.go b/planner/core/cache.go index ea6d0c32e3b39..a386c4a5a3649 100644 --- a/planner/core/cache.go +++ b/planner/core/cache.go @@ -74,6 +74,7 @@ type pstmtPlanCacheKey struct { timezoneOffset int isolationReadEngines map[kv.StoreType]struct{} selectLimit uint64 + bindSQL string hash []byte } @@ -104,6 +105,7 @@ func (key *pstmtPlanCacheKey) Hash() []byte { key.hash = append(key.hash, kv.TiFlash.Name()...) } key.hash = codec.EncodeInt(key.hash, int64(key.selectLimit)) + key.hash = append(key.hash, hack.Slice(key.bindSQL)...) } return key.hash } @@ -125,7 +127,7 @@ func SetPstmtIDSchemaVersion(key kvcache.Key, pstmtID uint32, schemaVersion int6 } // NewPSTMTPlanCacheKey creates a new pstmtPlanCacheKey object. -func NewPSTMTPlanCacheKey(sessionVars *variable.SessionVars, pstmtID uint32, schemaVersion int64) kvcache.Key { +func NewPSTMTPlanCacheKey(sessionVars *variable.SessionVars, pstmtID uint32, schemaVersion int64, bindSQL string) kvcache.Key { timezoneOffset := 0 if sessionVars.TimeZone != nil { _, timezoneOffset = time.Now().In(sessionVars.TimeZone).Zone() @@ -139,6 +141,7 @@ func NewPSTMTPlanCacheKey(sessionVars *variable.SessionVars, pstmtID uint32, sch timezoneOffset: timezoneOffset, isolationReadEngines: make(map[kv.StoreType]struct{}), selectLimit: sessionVars.SelectLimit, + bindSQL: bindSQL, } for k, v := range sessionVars.IsolationReadEngines { key.isolationReadEngines[k] = v diff --git a/planner/core/cache_test.go b/planner/core/cache_test.go index ff0ab53fa558b..074d1e4cf2828 100644 --- a/planner/core/cache_test.go +++ b/planner/core/cache_test.go @@ -28,6 +28,6 @@ func TestCacheKey(t *testing.T) { ctx.GetSessionVars().SQLMode = mysql.ModeNone ctx.GetSessionVars().TimeZone = time.UTC ctx.GetSessionVars().ConnectionID = 0 - key := NewPSTMTPlanCacheKey(ctx.GetSessionVars(), 1, 1) + key := NewPSTMTPlanCacheKey(ctx.GetSessionVars(), 1, 1, "") require.Equal(t, []byte{0x74, 0x65, 0x73, 0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x74, 0x69, 0x64, 0x62, 0x74, 0x69, 0x6b, 0x76, 0x74, 0x69, 0x66, 0x6c, 0x61, 0x73, 0x68, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, key.Hash()) } diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index 9af659ef6c972..d3e56600b2c25 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -401,8 +401,10 @@ func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, } stmtCtx.UseCache = prepared.UseCache + var bindSQL string if prepared.UseCache { - cacheKey = NewPSTMTPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion) + bindSQL = GetBindSQL4PlanCache(sctx, prepared.Stmt) + cacheKey = NewPSTMTPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion, bindSQL) } tps := make([]*types.FieldType, len(e.UsingVars)) for i, param := range e.UsingVars { @@ -468,6 +470,14 @@ func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, if err != nil { return err } + if len(bindSQL) > 0 { + // When the `len(bindSQL) > 0`, it means we use the binding. + // So we need to record this. + err = sessVars.SetSystemVar(variable.TiDBFoundInBinding, variable.BoolToOnOff(true)) + if err != nil { + return err + } + } if metrics.ResettablePlanCacheCounterFortTest { metrics.PlanCacheCounter.WithLabelValues("prepare").Inc() } else { @@ -500,8 +510,11 @@ REBUILD: // rebuild key to exclude kv.TiFlash when stmt is not read only if _, isolationReadContainTiFlash := sessVars.IsolationReadEngines[kv.TiFlash]; isolationReadContainTiFlash && !IsReadOnly(stmt, sessVars) { delete(sessVars.IsolationReadEngines, kv.TiFlash) - cacheKey = NewPSTMTPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion) + cacheKey = NewPSTMTPlanCacheKey(sessVars, e.ExecID, prepared.SchemaVersion, sessVars.StmtCtx.BindSQL) sessVars.IsolationReadEngines[kv.TiFlash] = struct{}{} + } else { + // We need to reconstruct the plan cache key based on the bindSQL. + cacheKey = NewPSTMTPlanCacheKey(sessVars, e.ExecID, prepared.SchemaVersion, sessVars.StmtCtx.BindSQL) } cached := NewPSTMTPlanCacheValue(p, names, stmtCtx.TblInfo2UnionScan, tps) preparedStmt.NormalizedPlan, preparedStmt.PlanDigest = NormalizePlan(p) diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index c3b1239d4ffac..84149462965e1 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -4052,11 +4052,6 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as } else { columns = tbl.Cols() } - var statisticTable *statistics.Table - if _, ok := tbl.(table.PartitionedTable); !ok || b.ctx.GetSessionVars().UseDynamicPartitionPrune() { - statisticTable = getStatsTable(b.ctx, tbl.Meta(), tbl.Meta().ID) - } - // extract the IndexMergeHint var indexMergeHints []indexHintInfo if hints := b.TableHints(); hints != nil { @@ -4101,7 +4096,7 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as TableAsName: asName, table: tbl, tableInfo: tableInfo, - statisticTable: statisticTable, + physicalTableID: tableInfo.ID, astIndexHints: tn.IndexHints, IndexHints: b.TableHints().indexHintList, indexMergeHints: indexMergeHints, diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index 5103ea4fdf38d..89b156e632cea 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -44,6 +44,9 @@ import ( // OptimizeAstNode optimizes the query to a physical plan directly. var OptimizeAstNode func(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (Plan, types.NameSlice, error) +// GetBindSQL4PlanCache get the bindSQL for the ast.StmtNode +var GetBindSQL4PlanCache func(sctx sessionctx.Context, stmtNode ast.StmtNode) (bindSQL string) + // AllowCartesianProduct means whether tidb allows cartesian join without equal conditions. var AllowCartesianProduct = atomic.NewBool(true) diff --git a/planner/core/rule_partition_processor.go b/planner/core/rule_partition_processor.go index bb57b0fac33da..1264a47ac97bc 100644 --- a/planner/core/rule_partition_processor.go +++ b/planner/core/rule_partition_processor.go @@ -1415,9 +1415,6 @@ func (s *partitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.Part // id as FromID. So we set the id of the newDataSource with the original one to // avoid traversing the whole plan tree to update the references. newDataSource.id = ds.id - if !ds.ctx.GetSessionVars().UseDynamicPartitionPrune() { - newDataSource.statisticTable = getStatsTable(ds.SCtx(), ds.table.Meta(), pi.Definitions[i].ID) - } err := s.resolveOptimizeHint(&newDataSource, pi.Definitions[i].Name) partitionNameSet.Insert(pi.Definitions[i].Name.L) if err != nil { diff --git a/planner/core/stats.go b/planner/core/stats.go index 2e7fd14a67b8d..d3f23427b2f40 100644 --- a/planner/core/stats.go +++ b/planner/core/stats.go @@ -226,7 +226,7 @@ func (ds *DataSource) initStats(colGroups [][]*expression.Column) { return } if ds.statisticTable == nil { - ds.statisticTable = getStatsTable(ds.ctx, ds.tableInfo, ds.table.Meta().ID) + ds.statisticTable = getStatsTable(ds.ctx, ds.tableInfo, ds.physicalTableID) } tableStats := &property.StatsInfo{ RowCount: float64(ds.statisticTable.Count), diff --git a/planner/optimize.go b/planner/optimize.go index 363c3e6f5374a..b16fc09a238f0 100644 --- a/planner/optimize.go +++ b/planner/optimize.go @@ -99,6 +99,28 @@ func GetExecuteForUpdateReadIS(node ast.Node, sctx sessionctx.Context) infoschem return nil } +// GetBindSQL4PlanCache used to get the bindSQL for plan cache to build the plan cache key. +func GetBindSQL4PlanCache(sctx sessionctx.Context, stmtNode ast.StmtNode) (bindSQL string) { + bindRecord, _, match := matchSQLBinding(sctx, stmtNode) + if match { + bindSQL = bindRecord.Bindings[0].BindSQL + } + return bindSQL +} + +func matchSQLBinding(sctx sessionctx.Context, stmtNode ast.StmtNode) (bindRecord *bindinfo.BindRecord, scope string, matched bool) { + useBinding := sctx.GetSessionVars().UsePlanBaselines + if !useBinding || stmtNode == nil { + return nil, "", false + } + var err error + bindRecord, scope, err = getBindRecord(sctx, stmtNode) + if err != nil || bindRecord == nil || len(bindRecord.Bindings) == 0 { + return nil, "", false + } + return bindRecord, scope, true +} + // Optimize does optimization and creates a Plan. // The node must be prepared first. func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (plannercore.Plan, types.NameSlice, error) { @@ -149,16 +171,9 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in if !ok { useBinding = false } - var ( - bindRecord *bindinfo.BindRecord - scope string - err error - ) - if useBinding { - bindRecord, scope, err = getBindRecord(sctx, stmtNode) - if err != nil || bindRecord == nil || len(bindRecord.Bindings) == 0 { - useBinding = false - } + bindRecord, scope, match := matchSQLBinding(sctx, stmtNode) + if !match { + useBinding = false } if ok { // add the extra Limit after matching the bind record @@ -166,14 +181,15 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in node = stmtNode } - var names types.NameSlice - var bestPlan, bestPlanFromBind plannercore.Plan + var ( + names types.NameSlice + bestPlan, bestPlanFromBind plannercore.Plan + chosenBinding bindinfo.Binding + err error + ) if useBinding { minCost := math.MaxFloat64 - var ( - bindStmtHints stmtctx.StmtHints - chosenBinding bindinfo.Binding - ) + var bindStmtHints stmtctx.StmtHints originHints := hint.CollectHint(stmtNode) // bindRecord must be not nil when coming here, try to find the best binding. for _, binding := range bindRecord.Bindings { @@ -206,7 +222,7 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in for _, warn := range warns { sessVars.StmtCtx.AppendWarning(warn) } - if err := setFoundInBinding(sctx, true); err != nil { + if err := setFoundInBinding(sctx, true, chosenBinding.BindSQL); err != nil { logutil.BgLogger().Warn("set tidb_found_in_binding failed", zap.Error(err)) } if sessVars.StmtCtx.InVerboseExplain { @@ -694,13 +710,15 @@ func handleStmtHints(hints []*ast.TableOptimizerHint) (stmtHints stmtctx.StmtHin return } -func setFoundInBinding(sctx sessionctx.Context, opt bool) error { +func setFoundInBinding(sctx sessionctx.Context, opt bool, bindSQL string) error { vars := sctx.GetSessionVars() + vars.StmtCtx.BindSQL = bindSQL err := vars.SetSystemVar(variable.TiDBFoundInBinding, variable.BoolToOnOff(opt)) return err } func init() { plannercore.OptimizeAstNode = Optimize + plannercore.GetBindSQL4PlanCache = GetBindSQL4PlanCache plannercore.IsReadOnly = IsReadOnly } diff --git a/server/driver_tidb.go b/server/driver_tidb.go index 6dae49084eeee..9a13eea632962 100644 --- a/server/driver_tidb.go +++ b/server/driver_tidb.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/planner" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/stmtctx" @@ -164,8 +165,10 @@ func (ts *TiDBStatement) Close() error { if !ok { return errors.Errorf("invalid CachedPrepareStmt type") } + preparedAst := preparedObj.PreparedAst + bindSQL := planner.GetBindSQL4PlanCache(ts.ctx, preparedAst.Stmt) ts.ctx.PreparedPlanCache().Delete(core.NewPSTMTPlanCacheKey( - ts.ctx.GetSessionVars(), ts.id, preparedObj.PreparedAst.SchemaVersion)) + ts.ctx.GetSessionVars(), ts.id, preparedObj.PreparedAst.SchemaVersion, bindSQL)) } ts.ctx.GetSessionVars().RemovePreparedStmt(ts.id) } diff --git a/server/http_handler_test.go b/server/http_handler_test.go index ac57db6425891..48205c192c11e 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -448,7 +448,6 @@ func (ts *basicHTTPHandlerTestSuite) startServer(t *testing.T) { cfg.Port = 0 cfg.Status.StatusPort = 0 cfg.Status.ReportStatus = true - cfg.Socket = fmt.Sprintf("/tmp/%s.sock", t.Name()) server, err := NewServer(cfg, ts.tidbdrv) require.NoError(t, err) diff --git a/session/bootstrap.go b/session/bootstrap.go index 815e921c4a6fb..2244e3eaa5484 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -538,11 +538,14 @@ const ( version78 = 78 // version79 adds the mysql.table_cache_meta table version79 = 79 + // version80 fixes the issue https://github.com/pingcap/tidb/issues/25422. + // If the TiDB upgrading from the 4.x to a newer version, we keep the tidb_analyze_version to 1. + version80 = 80 ) // currentBootstrapVersion is defined as a variable, so we can modify its value for testing. // please make sure this is the largest version -var currentBootstrapVersion int64 = version79 +var currentBootstrapVersion int64 = version80 var ( bootstrapVersion = []func(Session, int64){ @@ -625,6 +628,7 @@ var ( upgradeToVer77, upgradeToVer78, upgradeToVer79, + upgradeToVer80, } ) @@ -1630,6 +1634,27 @@ func upgradeToVer79(s Session, ver int64) { doReentrantDDL(s, CreateTableCacheMetaTable) } +func upgradeToVer80(s Session, ver int64) { + if ver >= version80 { + return + } + // Check if tidb_analyze_version exists in mysql.GLOBAL_VARIABLES. + // If not, insert "tidb_analyze_version | 1" since this is the old behavior before we introduce this variable. + ctx := context.Background() + rs, err := s.ExecuteInternal(ctx, "SELECT VARIABLE_VALUE FROM %n.%n WHERE VARIABLE_NAME=%?;", + mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBAnalyzeVersion) + terror.MustNil(err) + req := rs.NewChunk(nil) + err = rs.Next(ctx, req) + terror.MustNil(err) + if req.NumRows() != 0 { + return + } + + mustExecute(s, "INSERT HIGH_PRIORITY IGNORE INTO %n.%n VALUES (%?, %?);", + mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBAnalyzeVersion, 1) +} + func writeOOMAction(s Session) { comment := "oom-action is `log` by default in v3.0.x, `cancel` by default in v4.0.11+" mustExecute(s, `INSERT HIGH_PRIORITY INTO %n.%n VALUES (%?, %?, %?) ON DUPLICATE KEY UPDATE VARIABLE_VALUE= %?`, diff --git a/session/bootstrap_serial_test.go b/session/bootstrap_serial_test.go index 235cf9556ace2..1de04a5ca91f3 100644 --- a/session/bootstrap_serial_test.go +++ b/session/bootstrap_serial_test.go @@ -879,3 +879,52 @@ func TestReferencesPrivilegeOnColumn(t *testing.T) { mustExec(t, se, "create table t1 (a int)") mustExec(t, se, "GRANT select (a), update (a),insert(a), references(a) on t1 to issue28531") } + +func TestAnalyzeVersionUpgradeFrom300To500(t *testing.T) { + ctx := context.Background() + store, _ := createStoreAndBootstrap(t) + defer func() { require.NoError(t, store.Close()) }() + + // Upgrade from 3.0.0 to 5.1+ or above. + ver300 := 33 + seV3 := createSessionAndSetID(t, store) + txn, err := store.Begin() + require.NoError(t, err) + m := meta.NewMeta(txn) + err = m.FinishBootstrap(int64(ver300)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + mustExec(t, seV3, fmt.Sprintf("update mysql.tidb set variable_value=%d where variable_name='tidb_server_version'", ver300)) + mustExec(t, seV3, fmt.Sprintf("delete from mysql.GLOBAL_VARIABLES where variable_name='%s'", variable.TiDBAnalyzeVersion)) + mustExec(t, seV3, "commit") + unsetStoreBootstrapped(store.UUID()) + ver, err := getBootstrapVersion(seV3) + require.NoError(t, err) + require.Equal(t, int64(ver300), ver) + + // We are now in 3.0.0, check tidb_analyze_version should not exist. + res := mustExec(t, seV3, fmt.Sprintf("select * from mysql.GLOBAL_VARIABLES where variable_name='%s'", variable.TiDBAnalyzeVersion)) + chk := res.NewChunk(nil) + err = res.Next(ctx, chk) + require.NoError(t, err) + require.Equal(t, 0, chk.NumRows()) + + domCurVer, err := BootstrapSession(store) + require.NoError(t, err) + defer domCurVer.Close() + seCurVer := createSessionAndSetID(t, store) + ver, err = getBootstrapVersion(seCurVer) + require.NoError(t, err) + require.Equal(t, currentBootstrapVersion, ver) + + // We are now in version no lower than 5.x, tidb_enable_index_merge should be 1. + res = mustExec(t, seCurVer, "select @@tidb_analyze_version") + chk = res.NewChunk(nil) + err = res.Next(ctx, chk) + require.NoError(t, err) + require.Equal(t, 1, chk.NumRows()) + row := chk.GetRow(0) + require.Equal(t, 1, row.Len()) + require.Equal(t, "1", row.GetString(0)) +} diff --git a/session/session.go b/session/session.go index bfc5288a7ff4f..465de576b37c7 100644 --- a/session/session.go +++ b/session/session.go @@ -309,7 +309,8 @@ func (s *session) cleanRetryInfo() { preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt) if ok { preparedAst = preparedObj.PreparedAst - cacheKey = plannercore.NewPSTMTPlanCacheKey(s.sessionVars, firstStmtID, preparedAst.SchemaVersion) + bindSQL := planner.GetBindSQL4PlanCache(s, preparedAst.Stmt) + cacheKey = plannercore.NewPSTMTPlanCacheKey(s.sessionVars, firstStmtID, preparedAst.SchemaVersion, bindSQL) } } } diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index 3125ae419641e..e41eb4766b47b 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -153,6 +153,9 @@ type StatementContext struct { normalized string digest *parser.Digest } + // BindSQL used to construct the key for plan cache. It records the binding used by the stmt. + // If the binding is not used by the stmt, the value is empty + BindSQL string // planNormalized use for cache the normalized plan, avoid duplicate builds. planNormalized string planDigest *parser.Digest diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go index 16bf3bbcb7b7c..0889d00e431e5 100644 --- a/statistics/handle/handle.go +++ b/statistics/handle/handle.go @@ -1481,6 +1481,15 @@ func (h *Handle) InsertExtendedStats(statsName string, colIDs []int64, tp int, t return errors.Errorf("extended statistics '%s' with same type on same columns already exists", statsName) } } + txn, err := h.mu.ctx.Txn(true) + if err != nil { + return errors.Trace(err) + } + version := txn.StartTS() + // Bump version in `mysql.stats_meta` to trigger stats cache refresh. + if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID); err != nil { + return err + } // Remove the existing 'deleted' records. if _, err = exec.ExecuteInternal(ctx, "DELETE FROM mysql.stats_extended WHERE name = %? and table_id = %?", statsName, tableID); err != nil { return err @@ -1491,17 +1500,10 @@ func (h *Handle) InsertExtendedStats(statsName string, colIDs []int64, tp int, t // the record from the table, tidb-b should delete the cached item synchronously. While for tidb-c, it has to wait for // next `Update()` to remove the cached item then. h.removeExtendedStatsItem(tableID, statsName) - txn, err := h.mu.ctx.Txn(true) - if err != nil { - return errors.Trace(err) - } - version := txn.StartTS() const sql = "INSERT INTO mysql.stats_extended(name, type, table_id, column_ids, version, status) VALUES (%?, %?, %?, %?, %?, %?)" if _, err = exec.ExecuteInternal(ctx, sql, statsName, tp, tableID, strColIDs, version, StatsStatusInited); err != nil { return err } - // Bump version in `mysql.stats_meta` to trigger stats cache refresh. - _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID) return } @@ -1541,10 +1543,10 @@ func (h *Handle) MarkExtendedStatsDeleted(statsName string, tableID int64, ifExi return errors.Trace(err) } version := txn.StartTS() - if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_extended SET version = %?, status = %? WHERE name = %? and table_id = %?", version, StatsStatusDeleted, statsName, tableID); err != nil { + if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID); err != nil { return err } - if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID); err != nil { + if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_extended SET version = %?, status = %? WHERE name = %? and table_id = %?", version, StatsStatusDeleted, statsName, tableID); err != nil { return err } return nil diff --git a/testkit/testkit.go b/testkit/testkit.go index 50af5e0178a1b..c99791efe369a 100644 --- a/testkit/testkit.go +++ b/testkit/testkit.go @@ -128,6 +128,16 @@ func (tk *TestKit) HasPlan(sql string, plan string, args ...interface{}) bool { return false } +// HasPlan4ExplainFor checks if the result execution plan contains specific plan. +func (tk *TestKit) HasPlan4ExplainFor(result *Result, plan string) bool { + for i := range result.rows { + if strings.Contains(result.rows[i][0], plan) { + return true + } + } + return false +} + // Exec executes a sql statement using the prepared stmt API func (tk *TestKit) Exec(sql string, args ...interface{}) (sqlexec.RecordSet, error) { ctx := context.Background() @@ -228,6 +238,20 @@ func (tk *TestKit) MustUseIndex(sql string, index string, args ...interface{}) b return false } +// MustUseIndex4ExplainFor checks if the result execution plan contains specific index(es). +func (tk *TestKit) MustUseIndex4ExplainFor(result *Result, index string) bool { + for i := range result.rows { + // It depends on whether we enable to collect the execution info. + if strings.Contains(result.rows[i][3], "index:"+index) { + return true + } + if strings.Contains(result.rows[i][4], "index:"+index) { + return true + } + } + return false +} + // CheckExecResult checks the affected rows and the insert id after executing MustExec. func (tk *TestKit) CheckExecResult(affectedRows, insertID int64) { tk.require.Equal(int64(tk.Session().AffectedRows()), affectedRows)