From 87dbcc25a0245d4809e786560472a55b1cae3eb1 Mon Sep 17 00:00:00 2001 From: Jack Yu Date: Wed, 15 Dec 2021 19:22:35 +0800 Subject: [PATCH 01/15] metrics: fix copr-cache metrics (#30712) --- metrics/grafana/tidb.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metrics/grafana/tidb.json b/metrics/grafana/tidb.json index 518112d04abcc..f6606d18ef4c6 100644 --- a/metrics/grafana/tidb.json +++ b/metrics/grafana/tidb.json @@ -6657,7 +6657,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(tidb_distsql_copr_cache_count{tidb_cluster=\"$tidb_cluster\"}[1m])) by (type)", + "expr": "sum(rate(tidb_distsql_copr_cache_sum{tidb_cluster=\"$tidb_cluster\"}[1m])) by (type)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{type}}", From 2e79433b71a37fd6c2665694ddeace1467d226e1 Mon Sep 17 00:00:00 2001 From: tangenta Date: Wed, 15 Dec 2021 19:36:35 +0800 Subject: [PATCH 02/15] test: merge executor's serial tests to other tests (#30711) --- .golangci.yml | 2 +- executor/admin_serial_test.go | 167 --- executor/admin_test.go | 138 ++ executor/aggregate_serial_test.go | 126 -- executor/aggregate_test.go | 102 ++ executor/analyze_serial_test.go | 302 ----- executor/analyze_test.go | 271 ++++ executor/batch_point_get_serial_test.go | 50 - executor/batch_point_get_test.go | 28 + ...ation_serial_test.go => collation_test.go} | 0 executor/cte_serial_test.go | 85 -- executor/cte_test.go | 62 + executor/executor_pkg_serial_test.go | 194 --- executor/executor_pkg_test.go | 168 +++ executor/explainfor_test.go | 2 +- ...able_serial_test.go => hash_table_test.go} | 0 executor/infoschema_reader_test.go | 2 +- .../{oom_serial_test.go => oom_test.go} | 0 executor/partition_table_test.go | 2 +- executor/prepared_serial_test.go | 1179 ----------------- executor/prepared_test.go | 1156 +++++++++++++++- ...epared_serial_test.go => prepared_test.go} | 0 ...or_serial_test.go => seq_executor_test.go} | 0 executor/show_stats_serial_test.go | 73 - executor/show_stats_test.go | 51 + executor/slow_query_test.go | 4 +- ...serial_test.go => temporary_table_test.go} | 0 executor/write_serial_test.go | 389 ------ executor/write_test.go | 364 +++++ 29 files changed, 2345 insertions(+), 2572 deletions(-) delete mode 100644 executor/admin_serial_test.go delete mode 100644 executor/aggregate_serial_test.go delete mode 100644 executor/analyze_serial_test.go delete mode 100644 executor/batch_point_get_serial_test.go rename executor/{collation_serial_test.go => collation_test.go} (100%) delete mode 100644 executor/cte_serial_test.go delete mode 100644 executor/executor_pkg_serial_test.go rename executor/{hash_table_serial_test.go => hash_table_test.go} (100%) rename executor/oomtest/{oom_serial_test.go => oom_test.go} (100%) delete mode 100644 executor/prepared_serial_test.go rename executor/seqtest/{prepared_serial_test.go => prepared_test.go} (100%) rename executor/seqtest/{seq_executor_serial_test.go => seq_executor_test.go} (100%) delete mode 100644 executor/show_stats_serial_test.go rename executor/{temporary_table_serial_test.go => temporary_table_test.go} (100%) delete mode 100644 executor/write_serial_test.go diff --git a/.golangci.yml b/.golangci.yml index d262ed0e0457b..60670adf3c311 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,5 @@ run: - timeout: 7m + timeout: 10m linters: disable-all: true enable: diff --git a/executor/admin_serial_test.go b/executor/admin_serial_test.go deleted file mode 100644 index c9a60228ac7f8..0000000000000 --- a/executor/admin_serial_test.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "context" - "testing" - - "github.com/pingcap/tidb/executor" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/table/tables" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/mock" - "github.com/stretchr/testify/require" -) - -func TestAdminCheckTableFailed(t *testing.T) { - store, domain, clean := testkit.CreateMockStoreAndDomain(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists admin_test") - tk.MustExec("create table admin_test (c1 int, c2 int, c3 varchar(255) default '1', primary key(c1), key(c3), unique key(c2), key(c2, c3))") - tk.MustExec("insert admin_test (c1, c2, c3) values (-10, -20, 'y'), (-1, -10, 'z'), (1, 11, 'a'), (2, 12, 'b'), (5, 15, 'c'), (10, 20, 'd'), (20, 30, 'e')") - - // Make some corrupted index. Build the index information. - ctx := mock.NewContext() - ctx.Store = store - is := domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") - tbl, err := is.TableByName(dbName, tblName) - require.NoError(t, err) - tblInfo := tbl.Meta() - idxInfo := tblInfo.Indices[1] - indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) - sc := ctx.GetSessionVars().StmtCtx - tk.Session().GetSessionVars().IndexLookupSize = 3 - tk.Session().GetSessionVars().MaxChunkSize = 3 - - // Reduce one row of index. - // Table count > index count. - // Index c2 is missing 11. - txn, err := store.Begin() - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(-10), kv.IntHandle(-1)) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8003]admin_test err:[admin:8223]index: != record:&admin.RecordData{Handle:-1, Values:[]types.Datum{types.Datum{k:0x1, decimal:0x0, length:0x0, i:-10, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}}}") - require.True(t, executor.ErrAdminCheckTable.Equal(err)) - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8003]admin_test err:[admin:8223]index:\"?\" != record:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - r := tk.MustQuery("admin recover index admin_test c2") - r.Check(testkit.Rows("1 7")) - tk.MustExec("admin check table admin_test") - - // Add one row of index. - // Table count < index count. - // Index c2 has one more values than table data: 0, and the handle 0 hasn't correlative record. - txn, err = store.Begin() - require.NoError(t, err) - _, err = indexOpr.Create(ctx, txn, types.MakeDatums(0), kv.IntHandle(0), nil) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8133]handle 0, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:0, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8133]handle \"?\", index:\"?\" != record:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Add one row of index. - // Table count < index count. - // Index c2 has two more values than table data: 10, 13, and these handles have correlative record. - txn, err = store.Begin() - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(0), kv.IntHandle(0)) - require.NoError(t, err) - // Make sure the index value "19" is smaller "21". Then we scan to "19" before "21". - _, err = indexOpr.Create(ctx, txn, types.MakeDatums(19), kv.IntHandle(10), nil) - require.NoError(t, err) - _, err = indexOpr.Create(ctx, txn, types.MakeDatums(13), kv.IntHandle(2), nil) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle 2, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:13, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:12, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Table count = index count. - // Two indices have the same handle. - txn, err = store.Begin() - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(13), kv.IntHandle(2)) - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(12), kv.IntHandle(2)) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Table count = index count. - // Index c2 has one line of data is 19, the corresponding table data is 20. - txn, err = store.Begin() - require.NoError(t, err) - _, err = indexOpr.Create(ctx, txn, types.MakeDatums(12), kv.IntHandle(2), nil) - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(20), kv.IntHandle(10)) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - require.Error(t, err) - require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Recover records. - txn, err = store.Begin() - require.NoError(t, err) - err = indexOpr.Delete(sc, txn, types.MakeDatums(19), kv.IntHandle(10)) - require.NoError(t, err) - _, err = indexOpr.Create(ctx, txn, types.MakeDatums(20), kv.IntHandle(10), nil) - require.NoError(t, err) - err = txn.Commit(context.Background()) - require.NoError(t, err) - tk.MustExec("admin check table admin_test") -} diff --git a/executor/admin_test.go b/executor/admin_test.go index 2257060782b41..72aa436babe9b 100644 --- a/executor/admin_test.go +++ b/executor/admin_test.go @@ -1120,3 +1120,141 @@ func TestAdminCheckWithSnapshot(t *testing.T) { tk.MustExec("admin check index admin_t_s a;") tk.MustExec("drop table if exists admin_t_s") } + +func TestAdminCheckTableFailed(t *testing.T) { + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists admin_test") + tk.MustExec("create table admin_test (c1 int, c2 int, c3 varchar(255) default '1', primary key(c1), key(c3), unique key(c2), key(c2, c3))") + tk.MustExec("insert admin_test (c1, c2, c3) values (-10, -20, 'y'), (-1, -10, 'z'), (1, 11, 'a'), (2, 12, 'b'), (5, 15, 'c'), (10, 20, 'd'), (20, 30, 'e')") + + // Make some corrupted index. Build the index information. + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() + dbName := model.NewCIStr("test") + tblName := model.NewCIStr("admin_test") + tbl, err := is.TableByName(dbName, tblName) + require.NoError(t, err) + tblInfo := tbl.Meta() + idxInfo := tblInfo.Indices[1] + indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) + sc := ctx.GetSessionVars().StmtCtx + tk.Session().GetSessionVars().IndexLookupSize = 3 + tk.Session().GetSessionVars().MaxChunkSize = 3 + + // Reduce one row of index. + // Table count > index count. + // Index c2 is missing 11. + txn, err := store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(-10), kv.IntHandle(-1)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8003]admin_test err:[admin:8223]index: != record:&admin.RecordData{Handle:-1, Values:[]types.Datum{types.Datum{k:0x1, decimal:0x0, length:0x0, i:-10, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}}}") + require.True(t, executor.ErrAdminCheckTable.Equal(err)) + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8003]admin_test err:[admin:8223]index:\"?\" != record:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + r := tk.MustQuery("admin recover index admin_test c2") + r.Check(testkit.Rows("1 7")) + tk.MustExec("admin check table admin_test") + + // Add one row of index. + // Table count < index count. + // Index c2 has one more values than table data: 0, and the handle 0 hasn't correlative record. + txn, err = store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(0), kv.IntHandle(0), nil) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8133]handle 0, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:0, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8133]handle \"?\", index:\"?\" != record:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Add one row of index. + // Table count < index count. + // Index c2 has two more values than table data: 10, 13, and these handles have correlative record. + txn, err = store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(0), kv.IntHandle(0)) + require.NoError(t, err) + // Make sure the index value "19" is smaller "21". Then we scan to "19" before "21". + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(19), kv.IntHandle(10), nil) + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(13), kv.IntHandle(2), nil) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle 2, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:13, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:12, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Table count = index count. + // Two indices have the same handle. + txn, err = store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(13), kv.IntHandle(2)) + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(12), kv.IntHandle(2)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Table count = index count. + // Index c2 has one line of data is 19, the corresponding table data is 20. + txn, err = store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(12), kv.IntHandle(2), nil) + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(20), kv.IntHandle(10)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Recover records. + txn, err = store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(19), kv.IntHandle(10)) + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(20), kv.IntHandle(10), nil) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + tk.MustExec("admin check table admin_test") +} diff --git a/executor/aggregate_serial_test.go b/executor/aggregate_serial_test.go deleted file mode 100644 index 114b444a91979..0000000000000 --- a/executor/aggregate_serial_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/util/sqlexec" - "github.com/stretchr/testify/require" -) - -func TestAggInDisk(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_hashagg_final_concurrency = 1;") - tk.MustExec("set tidb_hashagg_partial_concurrency = 1;") - tk.MustExec("set tidb_mem_quota_query = 4194304") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(a int)") - sql := "insert into t values (0)" - for i := 1; i <= 200; i++ { - sql += fmt.Sprintf(",(%v)", i) - } - sql += ";" - tk.MustExec(sql) - rows := tk.MustQuery("desc analyze select /*+ HASH_AGG() */ avg(t1.a) from t t1 join t t2 group by t1.a, t2.a;").Rows() - for _, row := range rows { - length := len(row) - line := fmt.Sprintf("%v", row) - disk := fmt.Sprintf("%v", row[length-1]) - if strings.Contains(line, "HashAgg") { - require.False(t, strings.Contains(disk, "0 Bytes")) - require.True(t, strings.Contains(disk, "MB") || - strings.Contains(disk, "KB") || - strings.Contains(disk, "Bytes")) - } - } - - // Add code cover - // Test spill chunk. Add a line to avoid tmp spill chunk is always full. - tk.MustExec("insert into t values(0)") - tk.MustQuery("select sum(tt.b) from ( select /*+ HASH_AGG() */ avg(t1.a) as b from t t1 join t t2 group by t1.a, t2.a) as tt").Check( - testkit.Rows("4040100.0000")) - // Test no groupby and no data. - tk.MustExec("drop table t;") - tk.MustExec("create table t(c int, c1 int);") - tk.MustQuery("select /*+ HASH_AGG() */ count(c) from t;").Check(testkit.Rows("0")) - tk.MustQuery("select /*+ HASH_AGG() */ count(c) from t group by c1;").Check(testkit.Rows()) -} - -func TestRandomPanicAggConsume(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@tidb_max_chunk_size=32") - tk.MustExec("set @@tidb_init_chunk_size=1") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - for i := 0; i <= 1000; i++ { - tk.MustExec(fmt.Sprintf("insert into t values(%v),(%v),(%v)", i, i, i)) - } - - fpName := "github.com/pingcap/tidb/executor/ConsumeRandomPanic" - require.NoError(t, failpoint.Enable(fpName, "5%panic(\"ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]\")")) - defer func() { - require.NoError(t, failpoint.Disable(fpName)) - }() - - // Test 10 times panic for each AggExec. - var res sqlexec.RecordSet - for i := 1; i <= 10; i++ { - var err error - for err == nil { - // Test paralleled hash agg. - res, err = tk.Exec("select /*+ HASH_AGG() */ count(a) from t group by a") - if err == nil { - _, err = session.GetRows4Test(context.Background(), tk.Session(), res) - require.NoError(t, res.Close()) - } - } - require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") - - err = nil - for err == nil { - // Test unparalleled hash agg. - res, err = tk.Exec("select /*+ HASH_AGG() */ count(distinct a) from t") - if err == nil { - _, err = session.GetRows4Test(context.Background(), tk.Session(), res) - require.NoError(t, res.Close()) - } - } - require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") - - err = nil - for err == nil { - // Test stream agg. - res, err = tk.Exec("select /*+ STREAM_AGG() */ count(a) from t") - if err == nil { - _, err = session.GetRows4Test(context.Background(), tk.Session(), res) - require.NoError(t, res.Close()) - } - } - require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") - } -} diff --git a/executor/aggregate_test.go b/executor/aggregate_test.go index 25b9bd96840e9..a99d7e71a69f4 100644 --- a/executor/aggregate_test.go +++ b/executor/aggregate_test.go @@ -15,6 +15,7 @@ package executor_test import ( + "context" "fmt" "math" "math/rand" @@ -26,12 +27,15 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/parser/terror" plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/testkit/testdata" + "github.com/pingcap/tidb/util/sqlexec" "github.com/stretchr/testify/require" ) @@ -1494,3 +1498,101 @@ func TestIssue23314(t *testing.T) { res := tk.MustQuery("select col1 from t1 group by col1") res.Check(testkit.Rows("16:40:20.01")) } + +func TestAggInDisk(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set tidb_hashagg_final_concurrency = 1;") + tk.MustExec("set tidb_hashagg_partial_concurrency = 1;") + tk.MustExec("set tidb_mem_quota_query = 4194304") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t(a int)") + sql := "insert into t values (0)" + for i := 1; i <= 200; i++ { + sql += fmt.Sprintf(",(%v)", i) + } + sql += ";" + tk.MustExec(sql) + rows := tk.MustQuery("desc analyze select /*+ HASH_AGG() */ avg(t1.a) from t t1 join t t2 group by t1.a, t2.a;").Rows() + for _, row := range rows { + length := len(row) + line := fmt.Sprintf("%v", row) + disk := fmt.Sprintf("%v", row[length-1]) + if strings.Contains(line, "HashAgg") { + require.False(t, strings.Contains(disk, "0 Bytes")) + require.True(t, strings.Contains(disk, "MB") || + strings.Contains(disk, "KB") || + strings.Contains(disk, "Bytes")) + } + } + + // Add code cover + // Test spill chunk. Add a line to avoid tmp spill chunk is always full. + tk.MustExec("insert into t values(0)") + tk.MustQuery("select sum(tt.b) from ( select /*+ HASH_AGG() */ avg(t1.a) as b from t t1 join t t2 group by t1.a, t2.a) as tt").Check( + testkit.Rows("4040100.0000")) + // Test no groupby and no data. + tk.MustExec("drop table t;") + tk.MustExec("create table t(c int, c1 int);") + tk.MustQuery("select /*+ HASH_AGG() */ count(c) from t;").Check(testkit.Rows("0")) + tk.MustQuery("select /*+ HASH_AGG() */ count(c) from t group by c1;").Check(testkit.Rows()) +} + +func TestRandomPanicAggConsume(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_max_chunk_size=32") + tk.MustExec("set @@tidb_init_chunk_size=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + for i := 0; i <= 1000; i++ { + tk.MustExec(fmt.Sprintf("insert into t values(%v),(%v),(%v)", i, i, i)) + } + + fpName := "github.com/pingcap/tidb/executor/ConsumeRandomPanic" + require.NoError(t, failpoint.Enable(fpName, "5%panic(\"ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]\")")) + defer func() { + require.NoError(t, failpoint.Disable(fpName)) + }() + + // Test 10 times panic for each AggExec. + var res sqlexec.RecordSet + for i := 1; i <= 10; i++ { + var err error + for err == nil { + // Test paralleled hash agg. + res, err = tk.Exec("select /*+ HASH_AGG() */ count(a) from t group by a") + if err == nil { + _, err = session.GetRows4Test(context.Background(), tk.Session(), res) + require.NoError(t, res.Close()) + } + } + require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") + + err = nil + for err == nil { + // Test unparalleled hash agg. + res, err = tk.Exec("select /*+ HASH_AGG() */ count(distinct a) from t") + if err == nil { + _, err = session.GetRows4Test(context.Background(), tk.Session(), res) + require.NoError(t, res.Close()) + } + } + require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") + + err = nil + for err == nil { + // Test stream agg. + res, err = tk.Exec("select /*+ STREAM_AGG() */ count(a) from t") + if err == nil { + _, err = session.GetRows4Test(context.Background(), tk.Session(), res) + require.NoError(t, res.Close()) + } + } + require.EqualError(t, err, "failpoint panic: ERROR 1105 (HY000): Out Of Memory Quota![conn_id=1]") + } +} diff --git a/executor/analyze_serial_test.go b/executor/analyze_serial_test.go deleted file mode 100644 index 3cb412c55b65a..0000000000000 --- a/executor/analyze_serial_test.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/statistics" - "github.com/pingcap/tidb/statistics/handle" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/codec" - "github.com/pingcap/tidb/util/collate" - "github.com/stretchr/testify/require" -) - -func TestFastAnalyze4GlobalStats(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`create database if not exists test_fast_gstats`) - tk.MustExec("use test_fast_gstats") - tk.MustExec("set @@session.tidb_enable_fast_analyze=1") - tk.MustExec("set @@session.tidb_build_stats_concurrency=1") - // test fast analyze in dynamic mode - tk.MustExec("set @@session.tidb_analyze_version = 2;") - tk.MustExec("set @@session.tidb_partition_prune_mode = 'dynamic';") - tk.MustExec("drop table if exists test_fast_gstats;") - tk.MustExec("create table test_fast_gstats(a int, b int) PARTITION BY HASH(a) PARTITIONS 2;") - tk.MustExec("insert into test_fast_gstats values(1,1),(3,3),(4,4),(2,2),(5,5);") - err := tk.ExecToErr("analyze table test_fast_gstats;") - require.EqualError(t, err, "Fast analyze hasn't reached General Availability and only support analyze version 1 currently.") -} - -func TestAnalyzeIndex(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1 (id int, v int, primary key(id), index k(v))") - tk.MustExec("insert into t1(id, v) values(1, 2), (2, 2), (3, 2), (4, 2), (5, 1), (6, 3), (7, 4)") - tk.MustExec("set @@tidb_analyze_version=1") - tk.MustExec("analyze table t1 index k") - require.Greater(t, len(tk.MustQuery("show stats_buckets where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 0) - tk.MustExec("set @@tidb_analyze_version=default") - tk.MustExec("analyze table t1") - require.Greater(t, len(tk.MustQuery("show stats_topn where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 0) - - func() { - defer tk.MustExec("set @@session.tidb_enable_fast_analyze=0") - tk.MustExec("drop stats t1") - tk.MustExec("set @@session.tidb_enable_fast_analyze=1") - tk.MustExec("set @@tidb_analyze_version=1") - tk.MustExec("analyze table t1 index k") - require.Greater(t, len(tk.MustQuery("show stats_buckets where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 1) - }() -} - -func TestAnalyzeIncremental(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - dom, err := session.BootstrapSession(store) - require.NoError(t, err) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@tidb_analyze_version = 1") - tk.Session().GetSessionVars().EnableStreaming = false - testAnalyzeIncremental(tk, t, dom) -} - -func TestAnalyzeIncrementalStreaming(t *testing.T) { - t.Skip("unistore hasn't support streaming yet.") - store, clean := testkit.CreateMockStore(t) - dom, err := session.BootstrapSession(store) - require.NoError(t, err) - - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.Session().GetSessionVars().EnableStreaming = true - testAnalyzeIncremental(tk, t, dom) -} - -func testAnalyzeIncremental(tk *testkit.TestKit, t *testing.T, dom *domain.Domain) { - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int, primary key(a), index idx(b))") - tk.MustExec("analyze incremental table t index") - tk.MustQuery("show stats_buckets").Check(testkit.Rows()) - tk.MustExec("insert into t values (1,1)") - tk.MustExec("analyze incremental table t index") - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t idx 1 0 1 1 1 1 0")) - tk.MustExec("insert into t values (2,2)") - tk.MustExec("analyze incremental table t index") - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) - tk.MustExec("analyze incremental table t index") - // Result should not change. - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) - - // Test analyze incremental with feedback. - tk.MustExec("insert into t values (3,3)") - oriProbability := statistics.FeedbackProbability.Load() - oriMinLogCount := handle.MinLogScanCount.Load() - defer func() { - statistics.FeedbackProbability.Store(oriProbability) - handle.MinLogScanCount.Store(oriMinLogCount) - }() - statistics.FeedbackProbability.Store(1) - handle.MinLogScanCount.Store(0) - is := dom.InfoSchema() - table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - require.NoError(t, err) - tblInfo := table.Meta() - tk.MustQuery("select * from t use index(idx) where b = 3") - tk.MustQuery("select * from t where a > 1") - h := dom.StatsHandle() - require.NoError(t, h.DumpStatsDeltaToKV(handle.DumpAll)) - require.NoError(t, h.DumpStatsFeedbackToKV()) - require.NoError(t, h.HandleUpdateStats(is)) - require.NoError(t, h.Update(is)) - require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 3 0 2 2147483647 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) - tblStats := h.GetTableStats(tblInfo) - val, err := codec.EncodeKey(tk.Session().GetSessionVars().StmtCtx, nil, types.NewIntDatum(3)) - require.NoError(t, err) - require.Equal(t, uint64(1), tblStats.Indices[tblInfo.Indices[0].ID].QueryBytes(val)) - require.False(t, statistics.IsAnalyzed(tblStats.Indices[tblInfo.Indices[0].ID].Flag)) - require.False(t, statistics.IsAnalyzed(tblStats.Columns[tblInfo.Columns[0].ID].Flag)) - - tk.MustExec("analyze incremental table t index") - require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t a 0 2 3 1 3 3 0", - "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0", "test t idx 1 2 3 1 3 3 0")) - tblStats = h.GetTableStats(tblInfo) - require.Equal(t, uint64(1), tblStats.Indices[tblInfo.Indices[0].ID].QueryBytes(val)) - - // test analyzeIndexIncremental for global-level stats; - tk.MustExec("set @@session.tidb_analyze_version = 1;") - tk.MustQuery("select @@tidb_analyze_version").Check(testkit.Rows("1")) - tk.MustExec("set @@tidb_partition_prune_mode = 'static';") - tk.MustExec("drop table if exists t;") - tk.MustExec(`create table t (a int, b int, primary key(a), index idx(b)) partition by range (a) ( - partition p0 values less than (10), - partition p1 values less than (20), - partition p2 values less than (30) - );`) - tk.MustExec("analyze incremental table t index") - require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows()) - tk.MustExec("insert into t values (1,1)") - tk.MustExec("analyze incremental table t index") - tk.MustQuery("show warnings").Check(testkit.Rows()) // no warning - require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 idx 1 0 1 1 1 1 0")) - tk.MustExec("insert into t values (2,2)") - tk.MustExec("analyze incremental table t index") - require.NoError(t, h.LoadNeededHistograms()) - tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 a 0 1 2 1 2 2 0", "test t p0 idx 1 0 1 1 1 1 0", "test t p0 idx 1 1 2 1 2 2 0")) - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic';") - tk.MustExec("insert into t values (11,11)") - err = tk.ExecToErr("analyze incremental table t index") - require.Equal(t, "[stats]: global statistics for partitioned tables unavailable in ANALYZE INCREMENTAL", err.Error()) -} - -func TestIssue27429(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table test.t(id int, value varchar(20) charset utf8mb4 collate utf8mb4_general_ci, value1 varchar(20) charset utf8mb4 collate utf8mb4_bin)") - tk.MustExec("insert into test.t values (1, 'abc', 'abc '),(4, 'Abc', 'abc'),(3,'def', 'def ');") - - tk.MustQuery("select upper(group_concat(distinct value order by 1)) from test.t;").Check(testkit.Rows("ABC,DEF")) - tk.MustQuery("select upper(group_concat(distinct value)) from test.t;").Check(testkit.Rows("ABC,DEF")) -} - -func TestIssue20874(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("delete from mysql.stats_histograms") - tk.MustExec("create table t (a char(10) collate utf8mb4_unicode_ci not null, b char(20) collate utf8mb4_general_ci not null, key idxa(a), key idxb(b))") - tk.MustExec("insert into t values ('#', 'C'), ('$', 'c'), ('a', 'a')") - tk.MustExec("set @@tidb_analyze_version=1") - tk.MustExec("analyze table t") - tk.MustQuery("show stats_buckets where db_name = 'test' and table_name = 't'").Sort().Check(testkit.Rows( - "test t a 0 0 1 1 \x02\xd2 \x02\xd2 0", - "test t a 0 1 2 1 \x0e\x0f \x0e\x0f 0", - "test t a 0 2 3 1 \x0e3 \x0e3 0", - "test t b 0 0 1 1 \x00A \x00A 0", - "test t b 0 1 3 2 \x00C \x00C 0", - "test t idxa 1 0 1 1 \x02\xd2 \x02\xd2 0", - "test t idxa 1 1 2 1 \x0e\x0f \x0e\x0f 0", - "test t idxa 1 2 3 1 \x0e3 \x0e3 0", - "test t idxb 1 0 1 1 \x00A \x00A 0", - "test t idxb 1 1 3 2 \x00C \x00C 0", - )) - tk.MustQuery("select is_index, hist_id, distinct_count, null_count, tot_col_size, stats_ver, correlation from mysql.stats_histograms").Sort().Check(testkit.Rows( - "0 1 3 0 9 1 1", - "0 2 2 0 9 1 -0.5", - "1 1 3 0 0 1 0", - "1 2 2 0 0 1 0", - )) - tk.MustExec("set @@tidb_analyze_version=2") - tk.MustExec("analyze table t") - tk.MustQuery("show stats_topn where db_name = 'test' and table_name = 't'").Sort().Check(testkit.Rows( - "test t a 0 \x02\xd2 1", - "test t a 0 \x0e\x0f 1", - "test t a 0 \x0e3 1", - "test t b 0 \x00A 1", - "test t b 0 \x00C 2", - "test t idxa 1 \x02\xd2 1", - "test t idxa 1 \x0e\x0f 1", - "test t idxa 1 \x0e3 1", - "test t idxb 1 \x00A 1", - "test t idxb 1 \x00C 2", - )) - tk.MustQuery("select is_index, hist_id, distinct_count, null_count, tot_col_size, stats_ver, correlation from mysql.stats_histograms").Sort().Check(testkit.Rows( - "0 1 3 0 6 2 1", - "0 2 2 0 6 2 -0.5", - "1 1 3 0 6 2 0", - "1 2 2 0 6 2 0", - )) -} - -func TestAnalyzeClusteredIndexPrimary(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t0") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t0(a varchar(20), primary key(a) clustered)") - tk.MustExec("create table t1(a varchar(20), primary key(a))") - tk.MustExec("insert into t0 values('1111')") - tk.MustExec("insert into t1 values('1111')") - tk.MustExec("set @@session.tidb_analyze_version = 1") - tk.MustExec("analyze table t0 index primary") - tk.MustExec("analyze table t1 index primary") - tk.MustQuery("show stats_buckets").Check(testkit.Rows( - "test t0 PRIMARY 1 0 1 1 1111 1111 0", - "test t1 PRIMARY 1 0 1 1 1111 1111 0")) - tk.MustExec("set @@session.tidb_analyze_version = 2") - tk.MustExec("analyze table t0") - tk.MustExec("analyze table t1") - tk.MustQuery("show stats_topn").Sort().Check(testkit.Rows(""+ - "test t0 PRIMARY 1 1111 1", - "test t0 a 0 1111 1", - "test t1 PRIMARY 1 1111 1", - "test t1 a 0 1111 1")) -} - -func TestAnalyzeSamplingWorkPanic(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_analyze_version = 2") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12)") - tk.MustExec("split table t between (-9223372036854775808) and (9223372036854775807) regions 12") - - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingBuildWorkerPanic", "return(1)")) - err := tk.ExecToErr("analyze table t") - require.NotNil(t, err) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingBuildWorkerPanic")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingMergeWorkerPanic", "return(1)")) - err = tk.ExecToErr("analyze table t") - require.NotNil(t, err) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingMergeWorkerPanic")) -} diff --git a/executor/analyze_test.go b/executor/analyze_test.go index 29d2aca24ee68..3ee10fe9a3626 100644 --- a/executor/analyze_test.go +++ b/executor/analyze_test.go @@ -962,3 +962,274 @@ func TestAdjustSampleRateNote(t *testing.T) { tk.MustExec("analyze table t") tk.MustQuery("show warnings").Check(testkit.Rows("Note 1105 Analyze use auto adjusted sample rate 1.000000 for table test.t.")) } + +func TestFastAnalyze4GlobalStats(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec(`create database if not exists test_fast_gstats`) + tk.MustExec("use test_fast_gstats") + tk.MustExec("set @@session.tidb_enable_fast_analyze=1") + tk.MustExec("set @@session.tidb_build_stats_concurrency=1") + // test fast analyze in dynamic mode + tk.MustExec("set @@session.tidb_analyze_version = 2;") + tk.MustExec("set @@session.tidb_partition_prune_mode = 'dynamic';") + tk.MustExec("drop table if exists test_fast_gstats;") + tk.MustExec("create table test_fast_gstats(a int, b int) PARTITION BY HASH(a) PARTITIONS 2;") + tk.MustExec("insert into test_fast_gstats values(1,1),(3,3),(4,4),(2,2),(5,5);") + err := tk.ExecToErr("analyze table test_fast_gstats;") + require.EqualError(t, err, "Fast analyze hasn't reached General Availability and only support analyze version 1 currently.") +} + +func TestAnalyzeIndex(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (id int, v int, primary key(id), index k(v))") + tk.MustExec("insert into t1(id, v) values(1, 2), (2, 2), (3, 2), (4, 2), (5, 1), (6, 3), (7, 4)") + tk.MustExec("set @@tidb_analyze_version=1") + tk.MustExec("analyze table t1 index k") + require.Greater(t, len(tk.MustQuery("show stats_buckets where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 0) + tk.MustExec("set @@tidb_analyze_version=default") + tk.MustExec("analyze table t1") + require.Greater(t, len(tk.MustQuery("show stats_topn where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 0) + + func() { + defer tk.MustExec("set @@session.tidb_enable_fast_analyze=0") + tk.MustExec("drop stats t1") + tk.MustExec("set @@session.tidb_enable_fast_analyze=1") + tk.MustExec("set @@tidb_analyze_version=1") + tk.MustExec("analyze table t1 index k") + require.Greater(t, len(tk.MustQuery("show stats_buckets where table_name = 't1' and column_name = 'k' and is_index = 1").Rows()), 1) + }() +} + +func TestAnalyzeIncremental(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + dom, err := session.BootstrapSession(store) + require.NoError(t, err) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_analyze_version = 1") + tk.Session().GetSessionVars().EnableStreaming = false + testAnalyzeIncremental(tk, t, dom) +} + +func TestAnalyzeIncrementalStreaming(t *testing.T) { + t.Skip("unistore hasn't support streaming yet.") + store, clean := testkit.CreateMockStore(t) + dom, err := session.BootstrapSession(store) + require.NoError(t, err) + + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.Session().GetSessionVars().EnableStreaming = true + testAnalyzeIncremental(tk, t, dom) +} + +func testAnalyzeIncremental(tk *testkit.TestKit, t *testing.T, dom *domain.Domain) { + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, primary key(a), index idx(b))") + tk.MustExec("analyze incremental table t index") + tk.MustQuery("show stats_buckets").Check(testkit.Rows()) + tk.MustExec("insert into t values (1,1)") + tk.MustExec("analyze incremental table t index") + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t idx 1 0 1 1 1 1 0")) + tk.MustExec("insert into t values (2,2)") + tk.MustExec("analyze incremental table t index") + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) + tk.MustExec("analyze incremental table t index") + // Result should not change. + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) + + // Test analyze incremental with feedback. + tk.MustExec("insert into t values (3,3)") + oriProbability := statistics.FeedbackProbability.Load() + oriMinLogCount := handle.MinLogScanCount.Load() + defer func() { + statistics.FeedbackProbability.Store(oriProbability) + handle.MinLogScanCount.Store(oriMinLogCount) + }() + statistics.FeedbackProbability.Store(1) + handle.MinLogScanCount.Store(0) + is := dom.InfoSchema() + table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + require.NoError(t, err) + tblInfo := table.Meta() + tk.MustQuery("select * from t use index(idx) where b = 3") + tk.MustQuery("select * from t where a > 1") + h := dom.StatsHandle() + require.NoError(t, h.DumpStatsDeltaToKV(handle.DumpAll)) + require.NoError(t, h.DumpStatsFeedbackToKV()) + require.NoError(t, h.HandleUpdateStats(is)) + require.NoError(t, h.Update(is)) + require.NoError(t, h.LoadNeededHistograms()) + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 3 0 2 2147483647 0", "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0")) + tblStats := h.GetTableStats(tblInfo) + val, err := codec.EncodeKey(tk.Session().GetSessionVars().StmtCtx, nil, types.NewIntDatum(3)) + require.NoError(t, err) + require.Equal(t, uint64(1), tblStats.Indices[tblInfo.Indices[0].ID].QueryBytes(val)) + require.False(t, statistics.IsAnalyzed(tblStats.Indices[tblInfo.Indices[0].ID].Flag)) + require.False(t, statistics.IsAnalyzed(tblStats.Columns[tblInfo.Columns[0].ID].Flag)) + + tk.MustExec("analyze incremental table t index") + require.NoError(t, h.LoadNeededHistograms()) + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t a 0 0 1 1 1 1 0", "test t a 0 1 2 1 2 2 0", "test t a 0 2 3 1 3 3 0", + "test t idx 1 0 1 1 1 1 0", "test t idx 1 1 2 1 2 2 0", "test t idx 1 2 3 1 3 3 0")) + tblStats = h.GetTableStats(tblInfo) + require.Equal(t, uint64(1), tblStats.Indices[tblInfo.Indices[0].ID].QueryBytes(val)) + + // test analyzeIndexIncremental for global-level stats; + tk.MustExec("set @@session.tidb_analyze_version = 1;") + tk.MustQuery("select @@tidb_analyze_version").Check(testkit.Rows("1")) + tk.MustExec("set @@tidb_partition_prune_mode = 'static';") + tk.MustExec("drop table if exists t;") + tk.MustExec(`create table t (a int, b int, primary key(a), index idx(b)) partition by range (a) ( + partition p0 values less than (10), + partition p1 values less than (20), + partition p2 values less than (30) + );`) + tk.MustExec("analyze incremental table t index") + require.NoError(t, h.LoadNeededHistograms()) + tk.MustQuery("show stats_buckets").Check(testkit.Rows()) + tk.MustExec("insert into t values (1,1)") + tk.MustExec("analyze incremental table t index") + tk.MustQuery("show warnings").Check(testkit.Rows()) // no warning + require.NoError(t, h.LoadNeededHistograms()) + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 idx 1 0 1 1 1 1 0")) + tk.MustExec("insert into t values (2,2)") + tk.MustExec("analyze incremental table t index") + require.NoError(t, h.LoadNeededHistograms()) + tk.MustQuery("show stats_buckets").Check(testkit.Rows("test t p0 a 0 0 1 1 1 1 0", "test t p0 a 0 1 2 1 2 2 0", "test t p0 idx 1 0 1 1 1 1 0", "test t p0 idx 1 1 2 1 2 2 0")) + tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic';") + tk.MustExec("insert into t values (11,11)") + err = tk.ExecToErr("analyze incremental table t index") + require.Equal(t, "[stats]: global statistics for partitioned tables unavailable in ANALYZE INCREMENTAL", err.Error()) +} + +func TestIssue27429(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table test.t(id int, value varchar(20) charset utf8mb4 collate utf8mb4_general_ci, value1 varchar(20) charset utf8mb4 collate utf8mb4_bin)") + tk.MustExec("insert into test.t values (1, 'abc', 'abc '),(4, 'Abc', 'abc'),(3,'def', 'def ');") + + tk.MustQuery("select upper(group_concat(distinct value order by 1)) from test.t;").Check(testkit.Rows("ABC,DEF")) + tk.MustQuery("select upper(group_concat(distinct value)) from test.t;").Check(testkit.Rows("ABC,DEF")) +} + +func TestIssue20874(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("delete from mysql.stats_histograms") + tk.MustExec("create table t (a char(10) collate utf8mb4_unicode_ci not null, b char(20) collate utf8mb4_general_ci not null, key idxa(a), key idxb(b))") + tk.MustExec("insert into t values ('#', 'C'), ('$', 'c'), ('a', 'a')") + tk.MustExec("set @@tidb_analyze_version=1") + tk.MustExec("analyze table t") + tk.MustQuery("show stats_buckets where db_name = 'test' and table_name = 't'").Sort().Check(testkit.Rows( + "test t a 0 0 1 1 \x02\xd2 \x02\xd2 0", + "test t a 0 1 2 1 \x0e\x0f \x0e\x0f 0", + "test t a 0 2 3 1 \x0e3 \x0e3 0", + "test t b 0 0 1 1 \x00A \x00A 0", + "test t b 0 1 3 2 \x00C \x00C 0", + "test t idxa 1 0 1 1 \x02\xd2 \x02\xd2 0", + "test t idxa 1 1 2 1 \x0e\x0f \x0e\x0f 0", + "test t idxa 1 2 3 1 \x0e3 \x0e3 0", + "test t idxb 1 0 1 1 \x00A \x00A 0", + "test t idxb 1 1 3 2 \x00C \x00C 0", + )) + tk.MustQuery("select is_index, hist_id, distinct_count, null_count, tot_col_size, stats_ver, correlation from mysql.stats_histograms").Sort().Check(testkit.Rows( + "0 1 3 0 9 1 1", + "0 2 2 0 9 1 -0.5", + "1 1 3 0 0 1 0", + "1 2 2 0 0 1 0", + )) + tk.MustExec("set @@tidb_analyze_version=2") + tk.MustExec("analyze table t") + tk.MustQuery("show stats_topn where db_name = 'test' and table_name = 't'").Sort().Check(testkit.Rows( + "test t a 0 \x02\xd2 1", + "test t a 0 \x0e\x0f 1", + "test t a 0 \x0e3 1", + "test t b 0 \x00A 1", + "test t b 0 \x00C 2", + "test t idxa 1 \x02\xd2 1", + "test t idxa 1 \x0e\x0f 1", + "test t idxa 1 \x0e3 1", + "test t idxb 1 \x00A 1", + "test t idxb 1 \x00C 2", + )) + tk.MustQuery("select is_index, hist_id, distinct_count, null_count, tot_col_size, stats_ver, correlation from mysql.stats_histograms").Sort().Check(testkit.Rows( + "0 1 3 0 6 2 1", + "0 2 2 0 6 2 -0.5", + "1 1 3 0 6 2 0", + "1 2 2 0 6 2 0", + )) +} + +func TestAnalyzeClusteredIndexPrimary(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t0") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t0(a varchar(20), primary key(a) clustered)") + tk.MustExec("create table t1(a varchar(20), primary key(a))") + tk.MustExec("insert into t0 values('1111')") + tk.MustExec("insert into t1 values('1111')") + tk.MustExec("set @@session.tidb_analyze_version = 1") + tk.MustExec("analyze table t0 index primary") + tk.MustExec("analyze table t1 index primary") + tk.MustQuery("show stats_buckets").Check(testkit.Rows( + "test t0 PRIMARY 1 0 1 1 1111 1111 0", + "test t1 PRIMARY 1 0 1 1 1111 1111 0")) + tk.MustExec("set @@session.tidb_analyze_version = 2") + tk.MustExec("analyze table t0") + tk.MustExec("analyze table t1") + tk.MustQuery("show stats_topn").Sort().Check(testkit.Rows(""+ + "test t0 PRIMARY 1 1111 1", + "test t0 a 0 1111 1", + "test t1 PRIMARY 1 1111 1", + "test t1 a 0 1111 1")) +} + +func TestAnalyzeSamplingWorkPanic(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@session.tidb_analyze_version = 2") + tk.MustExec("create table t(a int)") + tk.MustExec("insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12)") + tk.MustExec("split table t between (-9223372036854775808) and (9223372036854775807) regions 12") + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingBuildWorkerPanic", "return(1)")) + err := tk.ExecToErr("analyze table t") + require.NotNil(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingBuildWorkerPanic")) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingMergeWorkerPanic", "return(1)")) + err = tk.ExecToErr("analyze table t") + require.NotNil(t, err) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/mockAnalyzeSamplingMergeWorkerPanic")) +} diff --git a/executor/batch_point_get_serial_test.go b/executor/batch_point_get_serial_test.go deleted file mode 100644 index 413af6863091e..0000000000000 --- a/executor/batch_point_get_serial_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/testkit" - "github.com/stretchr/testify/require" -) - -func TestPointGetForTemporaryTable(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create global temporary table t1 (id int primary key, val int) on commit delete rows") - tk.MustExec("begin") - tk.MustExec("insert into t1 values (1,1)") - tk.MustQuery("explain format = 'brief' select * from t1 where id in (1, 2, 3)"). - Check(testkit.Rows("Batch_Point_Get 3.00 root table:t1 handle:[1 2 3], keep order:false, desc:false")) - - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy", "return(true)")) - defer func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy")) - }() - - // Batch point get. - tk.MustQuery("select * from t1 where id in (1, 2, 3)").Check(testkit.Rows("1 1")) - tk.MustQuery("select * from t1 where id in (2, 3)").Check(testkit.Rows()) - - // Point get. - tk.MustQuery("select * from t1 where id = 1").Check(testkit.Rows("1 1")) - tk.MustQuery("select * from t1 where id = 2").Check(testkit.Rows()) -} diff --git a/executor/batch_point_get_test.go b/executor/batch_point_get_test.go index 27c035e52a433..44809a9211f90 100644 --- a/executor/batch_point_get_test.go +++ b/executor/batch_point_get_test.go @@ -21,6 +21,7 @@ import ( "testing" "time" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx/variable" @@ -352,3 +353,30 @@ func TestCacheSnapShot(t *testing.T) { require.Equal(t, batchGet[string(keys[0])], []byte("1111")) require.Equal(t, batchGet[string(keys[1])], []byte("2222")) } + +func TestPointGetForTemporaryTable(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create global temporary table t1 (id int primary key, val int) on commit delete rows") + tk.MustExec("begin") + tk.MustExec("insert into t1 values (1,1)") + tk.MustQuery("explain format = 'brief' select * from t1 where id in (1, 2, 3)"). + Check(testkit.Rows("Batch_Point_Get 3.00 root table:t1 handle:[1 2 3], keep order:false, desc:false")) + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy", "return(true)")) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy")) + }() + + // Batch point get. + tk.MustQuery("select * from t1 where id in (1, 2, 3)").Check(testkit.Rows("1 1")) + tk.MustQuery("select * from t1 where id in (2, 3)").Check(testkit.Rows()) + + // Point get. + tk.MustQuery("select * from t1 where id = 1").Check(testkit.Rows("1 1")) + tk.MustQuery("select * from t1 where id = 2").Check(testkit.Rows()) +} diff --git a/executor/collation_serial_test.go b/executor/collation_test.go similarity index 100% rename from executor/collation_serial_test.go rename to executor/collation_test.go diff --git a/executor/cte_serial_test.go b/executor/cte_serial_test.go deleted file mode 100644 index b8b04551b0c6e..0000000000000 --- a/executor/cte_serial_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "fmt" - "math/rand" - "sort" - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/testkit" - "github.com/stretchr/testify/require" -) - -func TestSpillToDisk(t *testing.T) { - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.OOMUseTmpStorage = true - }) - - store, close := testkit.CreateMockStore(t) - defer close() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testCTEStorageSpill", "return(true)")) - defer func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testCTEStorageSpill")) - tk.MustExec("set tidb_mem_quota_query = 1073741824;") - }() - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill", "return(true)")) - defer func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill")) - }() - - // Use duplicated rows to test UNION DISTINCT. - tk.MustExec("set tidb_mem_quota_query = 1073741824;") - insertStr := "insert into t1 values(0)" - rowNum := 1000 - vals := make([]int, rowNum) - vals[0] = 0 - for i := 1; i < rowNum; i++ { - v := rand.Intn(100) - vals[i] = v - insertStr += fmt.Sprintf(", (%d)", v) - } - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(c1 int);") - tk.MustExec(insertStr) - tk.MustExec("set tidb_mem_quota_query = 40000;") - tk.MustExec("set cte_max_recursion_depth = 500000;") - sql := fmt.Sprintf("with recursive cte1 as ( "+ - "select c1 from t1 "+ - "union "+ - "select c1 + 1 c1 from cte1 where c1 < %d) "+ - "select c1 from cte1 order by c1;", rowNum) - rows := tk.MustQuery(sql) - - memTracker := tk.Session().GetSessionVars().StmtCtx.MemTracker - diskTracker := tk.Session().GetSessionVars().StmtCtx.DiskTracker - require.Greater(t, memTracker.MaxConsumed(), int64(0)) - require.Greater(t, diskTracker.MaxConsumed(), int64(0)) - - sort.Ints(vals) - resRows := make([]string, 0, rowNum) - for i := vals[0]; i <= rowNum; i++ { - resRows = append(resRows, fmt.Sprintf("%d", i)) - } - rows.Check(testkit.Rows(resRows...)) -} diff --git a/executor/cte_test.go b/executor/cte_test.go index bf4ec37b99095..bf6d33ede4a42 100644 --- a/executor/cte_test.go +++ b/executor/cte_test.go @@ -16,8 +16,12 @@ package executor_test import ( "fmt" + "math/rand" + "sort" "testing" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" ) @@ -346,3 +350,61 @@ func TestCTEWithLimit(t *testing.T) { rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 4) select * from cte1;") rows.Check(testkit.Rows("3", "4", "3", "4")) } + +func TestSpillToDisk(t *testing.T) { + defer config.RestoreFunc()() + config.UpdateGlobal(func(conf *config.Config) { + conf.OOMUseTmpStorage = true + }) + + store, close := testkit.CreateMockStore(t) + defer close() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testCTEStorageSpill", "return(true)")) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testCTEStorageSpill")) + tk.MustExec("set tidb_mem_quota_query = 1073741824;") + }() + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill", "return(true)")) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill")) + }() + + // Use duplicated rows to test UNION DISTINCT. + tk.MustExec("set tidb_mem_quota_query = 1073741824;") + insertStr := "insert into t1 values(0)" + rowNum := 1000 + vals := make([]int, rowNum) + vals[0] = 0 + for i := 1; i < rowNum; i++ { + v := rand.Intn(100) + vals[i] = v + insertStr += fmt.Sprintf(", (%d)", v) + } + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 int);") + tk.MustExec(insertStr) + tk.MustExec("set tidb_mem_quota_query = 40000;") + tk.MustExec("set cte_max_recursion_depth = 500000;") + sql := fmt.Sprintf("with recursive cte1 as ( "+ + "select c1 from t1 "+ + "union "+ + "select c1 + 1 c1 from cte1 where c1 < %d) "+ + "select c1 from cte1 order by c1;", rowNum) + rows := tk.MustQuery(sql) + + memTracker := tk.Session().GetSessionVars().StmtCtx.MemTracker + diskTracker := tk.Session().GetSessionVars().StmtCtx.DiskTracker + require.Greater(t, memTracker.MaxConsumed(), int64(0)) + require.Greater(t, diskTracker.MaxConsumed(), int64(0)) + + sort.Ints(vals) + resRows := make([]string, 0, rowNum) + for i := vals[0]; i <= rowNum; i++ { + resRows = append(resRows, fmt.Sprintf("%d", i)) + } + rows.Check(testkit.Rows(resRows...)) +} diff --git a/executor/executor_pkg_serial_test.go b/executor/executor_pkg_serial_test.go deleted file mode 100644 index cc2b9923d791c..0000000000000 --- a/executor/executor_pkg_serial_test.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor - -import ( - "context" - "testing" - - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/expression" - "github.com/pingcap/tidb/parser/ast" - plannerutil "github.com/pingcap/tidb/planner/util" - "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/util/memory" - "github.com/pingcap/tidb/util/mock" - "github.com/stretchr/testify/require" -) - -func TestLoadDataWithDifferentEscapeChar(t *testing.T) { - tests := []struct { - input string - escapeChar byte - expected []string - }{ - { - `"{""itemRangeType"":0,""itemContainType"":0,""shopRangeType"":1,""shopJson"":""[{\""id\"":\""A1234\"",\""shopName\"":\""AAAAAA\""}]""}"`, - byte(0), // escaped by '' - []string{`{"itemRangeType":0,"itemContainType":0,"shopRangeType":1,"shopJson":"[{\"id\":\"A1234\",\"shopName\":\"AAAAAA\"}]"}`}, - }, - } - - for _, test := range tests { - ldInfo := LoadDataInfo{ - FieldsInfo: &ast.FieldsClause{ - Enclosed: '"', - Terminated: ",", - Escaped: test.escapeChar, - }, - } - got, err := ldInfo.getFieldsFromLine([]byte(test.input)) - require.NoErrorf(t, err, "failed: %s", test.input) - assertEqualStrings(t, got, test.expected) - } -} - -func TestSortSpillDisk(t *testing.T) { - defer config.RestoreFunc()() - config.UpdateGlobal(func(conf *config.Config) { - conf.OOMUseTmpStorage = true - conf.MemQuotaQuery = 1 - }) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill", "return(true)")) - defer func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill")) - }() - ctx := mock.NewContext() - ctx.GetSessionVars().InitChunkSize = variable.DefMaxChunkSize - ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize - ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, -1) - cas := &sortCase{rows: 2048, orderByIdx: []int{0, 1}, ndvs: []int{0, 0}, ctx: ctx} - opt := mockDataSourceParameters{ - schema: expression.NewSchema(cas.columns()...), - rows: cas.rows, - ctx: cas.ctx, - ndvs: cas.ndvs, - } - dataSource := buildMockDataSource(opt) - exec := &SortExec{ - baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 0, dataSource), - ByItems: make([]*plannerutil.ByItems, 0, len(cas.orderByIdx)), - schema: dataSource.schema, - } - for _, idx := range cas.orderByIdx { - exec.ByItems = append(exec.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) - } - tmpCtx := context.Background() - chk := newFirstChunk(exec) - dataSource.prepareChunks() - err := exec.Open(tmpCtx) - require.NoError(t, err) - for { - err = exec.Next(tmpCtx, chk) - require.NoError(t, err) - if chk.NumRows() == 0 { - break - } - } - // Test only 1 partition and all data in memory. - require.Len(t, exec.partitionList, 1) - require.Equal(t, false, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, 2048, exec.partitionList[0].NumRow()) - err = exec.Close() - require.NoError(t, err) - - ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 1) - dataSource.prepareChunks() - err = exec.Open(tmpCtx) - require.NoError(t, err) - for { - err = exec.Next(tmpCtx, chk) - require.NoError(t, err) - if chk.NumRows() == 0 { - break - } - } - // Test 2 partitions and all data in disk. - // Now spilling is in parallel. - // Maybe the second add() will called before spilling, depends on - // Golang goroutine scheduling. So the result has two possibilities. - if len(exec.partitionList) == 2 { - require.Len(t, exec.partitionList, 2) - require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, true, exec.partitionList[1].AlreadySpilledSafeForTest()) - require.Equal(t, 1024, exec.partitionList[0].NumRow()) - require.Equal(t, 1024, exec.partitionList[1].NumRow()) - } else { - require.Len(t, exec.partitionList, 1) - require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, 2048, exec.partitionList[0].NumRow()) - } - - err = exec.Close() - require.NoError(t, err) - - ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 24000) - dataSource.prepareChunks() - err = exec.Open(tmpCtx) - require.NoError(t, err) - for { - err = exec.Next(tmpCtx, chk) - require.NoError(t, err) - if chk.NumRows() == 0 { - break - } - } - // Test only 1 partition but spill disk. - require.Len(t, exec.partitionList, 1) - require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, 2048, exec.partitionList[0].NumRow()) - err = exec.Close() - require.NoError(t, err) - - // Test partition nums. - ctx = mock.NewContext() - ctx.GetSessionVars().InitChunkSize = variable.DefMaxChunkSize - ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize - ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 16864*50) - ctx.GetSessionVars().StmtCtx.MemTracker.Consume(16864 * 45) - cas = &sortCase{rows: 20480, orderByIdx: []int{0, 1}, ndvs: []int{0, 0}, ctx: ctx} - opt = mockDataSourceParameters{ - schema: expression.NewSchema(cas.columns()...), - rows: cas.rows, - ctx: cas.ctx, - ndvs: cas.ndvs, - } - dataSource = buildMockDataSource(opt) - exec = &SortExec{ - baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 0, dataSource), - ByItems: make([]*plannerutil.ByItems, 0, len(cas.orderByIdx)), - schema: dataSource.schema, - } - for _, idx := range cas.orderByIdx { - exec.ByItems = append(exec.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) - } - tmpCtx = context.Background() - chk = newFirstChunk(exec) - dataSource.prepareChunks() - err = exec.Open(tmpCtx) - require.NoError(t, err) - for { - err = exec.Next(tmpCtx, chk) - require.NoError(t, err) - if chk.NumRows() == 0 { - break - } - } - // Don't spill too many partitions. - require.True(t, len(exec.partitionList) <= 4) - err = exec.Close() - require.NoError(t, err) -} diff --git a/executor/executor_pkg_test.go b/executor/executor_pkg_test.go index 5cd983fccd5f0..1b449f5fed6a6 100644 --- a/executor/executor_pkg_test.go +++ b/executor/executor_pkg_test.go @@ -23,18 +23,22 @@ import ( "time" "unsafe" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/executor/aggfuncs" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/mysql" + plannerutil "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/tableutil" @@ -397,3 +401,167 @@ func SubTestFilterTemporaryTableKeys(t *testing.T) { res := filterTemporaryTableKeys(vars, []kv.Key{tablecodec.EncodeTablePrefix(tableID), tablecodec.EncodeTablePrefix(42)}) require.Len(t, res, 1) } + +func TestLoadDataWithDifferentEscapeChar(t *testing.T) { + tests := []struct { + input string + escapeChar byte + expected []string + }{ + { + `"{""itemRangeType"":0,""itemContainType"":0,""shopRangeType"":1,""shopJson"":""[{\""id\"":\""A1234\"",\""shopName\"":\""AAAAAA\""}]""}"`, + byte(0), // escaped by '' + []string{`{"itemRangeType":0,"itemContainType":0,"shopRangeType":1,"shopJson":"[{\"id\":\"A1234\",\"shopName\":\"AAAAAA\"}]"}`}, + }, + } + + for _, test := range tests { + ldInfo := LoadDataInfo{ + FieldsInfo: &ast.FieldsClause{ + Enclosed: '"', + Terminated: ",", + Escaped: test.escapeChar, + }, + } + got, err := ldInfo.getFieldsFromLine([]byte(test.input)) + require.NoErrorf(t, err, "failed: %s", test.input) + assertEqualStrings(t, got, test.expected) + } +} + +func TestSortSpillDisk(t *testing.T) { + defer config.RestoreFunc()() + config.UpdateGlobal(func(conf *config.Config) { + conf.OOMUseTmpStorage = true + conf.MemQuotaQuery = 1 + }) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill", "return(true)")) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/testSortedRowContainerSpill")) + }() + ctx := mock.NewContext() + ctx.GetSessionVars().InitChunkSize = variable.DefMaxChunkSize + ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize + ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, -1) + cas := &sortCase{rows: 2048, orderByIdx: []int{0, 1}, ndvs: []int{0, 0}, ctx: ctx} + opt := mockDataSourceParameters{ + schema: expression.NewSchema(cas.columns()...), + rows: cas.rows, + ctx: cas.ctx, + ndvs: cas.ndvs, + } + dataSource := buildMockDataSource(opt) + exec := &SortExec{ + baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 0, dataSource), + ByItems: make([]*plannerutil.ByItems, 0, len(cas.orderByIdx)), + schema: dataSource.schema, + } + for _, idx := range cas.orderByIdx { + exec.ByItems = append(exec.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) + } + tmpCtx := context.Background() + chk := newFirstChunk(exec) + dataSource.prepareChunks() + err := exec.Open(tmpCtx) + require.NoError(t, err) + for { + err = exec.Next(tmpCtx, chk) + require.NoError(t, err) + if chk.NumRows() == 0 { + break + } + } + // Test only 1 partition and all data in memory. + require.Len(t, exec.partitionList, 1) + require.Equal(t, false, exec.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, 2048, exec.partitionList[0].NumRow()) + err = exec.Close() + require.NoError(t, err) + + ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 1) + dataSource.prepareChunks() + err = exec.Open(tmpCtx) + require.NoError(t, err) + for { + err = exec.Next(tmpCtx, chk) + require.NoError(t, err) + if chk.NumRows() == 0 { + break + } + } + // Test 2 partitions and all data in disk. + // Now spilling is in parallel. + // Maybe the second add() will called before spilling, depends on + // Golang goroutine scheduling. So the result has two possibilities. + if len(exec.partitionList) == 2 { + require.Len(t, exec.partitionList, 2) + require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, true, exec.partitionList[1].AlreadySpilledSafeForTest()) + require.Equal(t, 1024, exec.partitionList[0].NumRow()) + require.Equal(t, 1024, exec.partitionList[1].NumRow()) + } else { + require.Len(t, exec.partitionList, 1) + require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, 2048, exec.partitionList[0].NumRow()) + } + + err = exec.Close() + require.NoError(t, err) + + ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 24000) + dataSource.prepareChunks() + err = exec.Open(tmpCtx) + require.NoError(t, err) + for { + err = exec.Next(tmpCtx, chk) + require.NoError(t, err) + if chk.NumRows() == 0 { + break + } + } + // Test only 1 partition but spill disk. + require.Len(t, exec.partitionList, 1) + require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, 2048, exec.partitionList[0].NumRow()) + err = exec.Close() + require.NoError(t, err) + + // Test partition nums. + ctx = mock.NewContext() + ctx.GetSessionVars().InitChunkSize = variable.DefMaxChunkSize + ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize + ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(-1, 16864*50) + ctx.GetSessionVars().StmtCtx.MemTracker.Consume(16864 * 45) + cas = &sortCase{rows: 20480, orderByIdx: []int{0, 1}, ndvs: []int{0, 0}, ctx: ctx} + opt = mockDataSourceParameters{ + schema: expression.NewSchema(cas.columns()...), + rows: cas.rows, + ctx: cas.ctx, + ndvs: cas.ndvs, + } + dataSource = buildMockDataSource(opt) + exec = &SortExec{ + baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 0, dataSource), + ByItems: make([]*plannerutil.ByItems, 0, len(cas.orderByIdx)), + schema: dataSource.schema, + } + for _, idx := range cas.orderByIdx { + exec.ByItems = append(exec.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) + } + tmpCtx = context.Background() + chk = newFirstChunk(exec) + dataSource.prepareChunks() + err = exec.Open(tmpCtx) + require.NoError(t, err) + for { + err = exec.Next(tmpCtx, chk) + require.NoError(t, err) + if chk.NumRows() == 0 { + break + } + } + // Don't spill too many partitions. + require.True(t, len(exec.partitionList) <= 4) + err = exec.Close() + require.NoError(t, err) +} diff --git a/executor/explainfor_test.go b/executor/explainfor_test.go index e3dbfd42d3a52..078ed7e6bb548 100644 --- a/executor/explainfor_test.go +++ b/executor/explainfor_test.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" - txninfo "github.com/pingcap/tidb/session/txninfo" + "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/israce" diff --git a/executor/hash_table_serial_test.go b/executor/hash_table_test.go similarity index 100% rename from executor/hash_table_serial_test.go rename to executor/hash_table_test.go diff --git a/executor/infoschema_reader_test.go b/executor/infoschema_reader_test.go index 0744372305cb6..53fd409f0fd2a 100644 --- a/executor/infoschema_reader_test.go +++ b/executor/infoschema_reader_test.go @@ -37,7 +37,7 @@ import ( "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/server" "github.com/pingcap/tidb/session" - txninfo "github.com/pingcap/tidb/session/txninfo" + "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/statistics/handle" diff --git a/executor/oomtest/oom_serial_test.go b/executor/oomtest/oom_test.go similarity index 100% rename from executor/oomtest/oom_serial_test.go rename to executor/oomtest/oom_test.go diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index 25c52e8c4954f..89e4311aaee09 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -2912,7 +2912,7 @@ type testOutput struct { func (s *testSuiteWithData) verifyPartitionResult(tk *testkit.TestKit, input []string, output []testOutput) { for i, tt := range input { - var isSelect bool = false + var isSelect = false if strings.HasPrefix(strings.ToLower(tt), "select ") { isSelect = true } diff --git a/executor/prepared_serial_test.go b/executor/prepared_serial_test.go deleted file mode 100644 index 8315e3dadc718..0000000000000 --- a/executor/prepared_serial_test.go +++ /dev/null @@ -1,1179 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "fmt" - "strconv" - "strings" - "testing" - - "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/parser/auth" - "github.com/pingcap/tidb/parser/model" - plannercore "github.com/pingcap/tidb/planner/core" - "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/testkit/testdata" - "github.com/pingcap/tidb/util" - "github.com/pingcap/tidb/util/israce" - "github.com/stretchr/testify/require" -) - -func TestIssue28064(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - tk.MustExec("use test") - tk.MustExec("drop table if exists t28064") - tk.MustExec("CREATE TABLE `t28064` (" + - "`a` decimal(10,0) DEFAULT NULL," + - "`b` decimal(10,0) DEFAULT NULL," + - "`c` decimal(10,0) DEFAULT NULL," + - "`d` decimal(10,0) DEFAULT NULL," + - "KEY `iabc` (`a`,`b`,`c`));") - tk.MustExec("set @a='123', @b='234', @c='345';") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("prepare stmt1 from 'select * from t28064 use index (iabc) where a = ? and b = ? and c = ?';") - - tk.MustExec("execute stmt1 using @a, @b, @c;") - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - rows.Check(testkit.Rows("Selection_8 0.00 root eq(test.t28064.a, 123), eq(test.t28064.b, 234), eq(test.t28064.c, 345)", - "└─IndexLookUp_7 0.00 root ", - " ├─IndexRangeScan_5(Build) 0.00 cop[tikv] table:t28064, index:iabc(a, b, c) range:[123 234 345,123 234 345], keep order:false, stats:pseudo", - " └─TableRowIDScan_6(Probe) 0.00 cop[tikv] table:t28064 keep order:false, stats:pseudo")) - - tk.MustExec("execute stmt1 using @a, @b, @c;") - rows = tk.MustQuery("select @@last_plan_from_cache") - rows.Check(testkit.Rows("1")) - - tk.MustExec("execute stmt1 using @a, @b, @c;") - rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - rows.Check(testkit.Rows("Selection_8 0.00 root eq(test.t28064.a, 123), eq(test.t28064.b, 234), eq(test.t28064.c, 345)", - "└─IndexLookUp_7 0.00 root ", - " ├─IndexRangeScan_5(Build) 0.00 cop[tikv] table:t28064, index:iabc(a, b, c) range:[123 234 345,123 234 345], keep order:false, stats:pseudo", - " └─TableRowIDScan_6(Probe) 0.00 cop[tikv] table:t28064 keep order:false, stats:pseudo")) -} - -func TestPreparePlanCache4Blacklist(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - - // test the blacklist of optimization rules - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int);") - tk.MustExec("prepare stmt from 'select min(a) from t;';") - tk.MustExec("execute stmt;") - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - require.Contains(t, res.Rows()[1][0], "TopN") - - res = tk.MustQuery("explain format = 'brief' select min(a) from t") - require.Contains(t, res.Rows()[1][0], "TopN") - - tk.MustExec("INSERT INTO mysql.opt_rule_blacklist VALUES('max_min_eliminate');") - tk.MustExec("ADMIN reload opt_rule_blacklist;") - - tk.MustExec("execute stmt;") - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustExec("execute stmt;") - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - // Plans that have been cached will not be affected by the blacklist. - require.Contains(t, res.Rows()[1][0], "TopN") - - res = tk.MustQuery("explain format = 'brief' select min(a) from t") - require.Contains(t, res.Rows()[0][0], "StreamAgg") - - // test the blacklist of Expression Pushdown - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int);") - tk.MustExec("prepare stmt from 'SELECT * FROM t WHERE a < 2 and a > 2;';") - tk.MustExec("execute stmt;") - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - require.Equal(t, 3, len(res.Rows())) - require.Contains(t, res.Rows()[1][0], "Selection") - require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) - - res = tk.MustQuery("explain format = 'brief' SELECT * FROM t WHERE a < 2 and a > 2;") - require.Equal(t, 3, len(res.Rows())) - require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) - - tk.MustExec("INSERT INTO mysql.expr_pushdown_blacklist VALUES('<','tikv','');") - tk.MustExec("ADMIN reload expr_pushdown_blacklist;") - - tk.MustExec("execute stmt;") - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustExec("execute stmt;") - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - // The expressions can still be pushed down to tikv. - require.Equal(t, 3, len(res.Rows())) - require.Contains(t, res.Rows()[1][0], "Selection") - require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) - - res = tk.MustQuery("explain format = 'brief' SELECT * FROM t WHERE a < 2 and a > 2;") - require.Equal(t, 4, len(res.Rows())) - require.Contains(t, res.Rows()[0][0], "Selection") - require.Equal(t, "lt(test.t.a, 2)", res.Rows()[0][4]) - require.Contains(t, res.Rows()[2][0], "Selection") - require.Equal(t, "gt(test.t.a, 2)", res.Rows()[2][4]) - - tk.MustExec("DELETE FROM mysql.expr_pushdown_blacklist;") - tk.MustExec("ADMIN reload expr_pushdown_blacklist;") -} - -func TestPlanCacheClusterIndex(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("create table t1(a varchar(20), b varchar(20), c varchar(20), primary key(a, b))") - tk.MustExec("insert into t1 values('1','1','111'),('2','2','222'),('3','3','333')") - - // For table scan - tk.MustExec(`prepare stmt1 from "select * from t1 where t1.a = ? and t1.b > ?"`) - tk.MustExec("set @v1 = '1'") - tk.MustExec("set @v2 = '0'") - tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("1 1 111")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) - tk.MustExec("set @v1 = '2'") - tk.MustExec("set @v2 = '1'") - tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("2 2 222")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - tk.MustExec("set @v1 = '3'") - tk.MustExec("set @v2 = '2'") - tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("3 3 333")) - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() - require.Equal(t, 0, strings.Index(rows[len(rows)-1][4].(string), `range:("3" "2","3" +inf]`)) - // For point get - tk.MustExec(`prepare stmt2 from "select * from t1 where t1.a = ? and t1.b = ?"`) - tk.MustExec("set @v1 = '1'") - tk.MustExec("set @v2 = '1'") - tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("1 1 111")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) - tk.MustExec("set @v1 = '2'") - tk.MustExec("set @v2 = '2'") - tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("2 2 222")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - tk.MustExec("set @v1 = '3'") - tk.MustExec("set @v2 = '3'") - tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("3 3 333")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() - require.Equal(t, 0, strings.Index(rows[len(rows)-1][0].(string), `Point_Get`)) - // For CBO point get and batch point get - // case 1: - tk.MustExec(`drop table if exists ta, tb`) - tk.MustExec(`create table ta (a varchar(8) primary key, b int)`) - tk.MustExec(`insert ta values ('a', 1), ('b', 2)`) - tk.MustExec(`create table tb (a varchar(8) primary key, b int)`) - tk.MustExec(`insert tb values ('a', 1), ('b', 2)`) - tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.a = tb.a and ta.a = ?"`) - tk.MustExec(`set @v1 = 'a', @v2 = 'b'`) - tk.MustQuery(`execute stmt1 using @v1`).Check(testkit.Rows("a 1 a 1")) - tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 b 2")) - - // case 2: - tk.MustExec(`drop table if exists ta, tb`) - tk.MustExec(`create table ta (a varchar(10) primary key, b int not null)`) - tk.MustExec(`insert ta values ('a', 1), ('b', 2)`) - tk.MustExec(`create table tb (b int primary key, c int)`) - tk.MustExec(`insert tb values (1, 1), (2, 2)`) - tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.b = tb.b and ta.a = ?"`) - tk.MustExec(`set @v1 = 'a', @v2 = 'b'`) - tk.MustQuery(`execute stmt1 using @v1`).Check(testkit.Rows("a 1 1 1")) - tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 2 2")) - tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 2 2")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() - require.True(t, strings.Contains(rows[3][0].(string), `TableRangeScan`)) - - // case 3: - tk.MustExec(`drop table if exists ta, tb`) - tk.MustExec(`create table ta (a varchar(10), b varchar(10), c int, primary key (a, b))`) - tk.MustExec(`insert ta values ('a', 'a', 1), ('b', 'b', 2), ('c', 'c', 3)`) - tk.MustExec(`create table tb (b int primary key, c int)`) - tk.MustExec(`insert tb values (1, 1), (2, 2), (3,3)`) - tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.c = tb.b and ta.a = ? and ta.b = ?"`) - tk.MustExec(`set @v1 = 'a', @v2 = 'b', @v3 = 'c'`) - tk.MustQuery(`execute stmt1 using @v1, @v1`).Check(testkit.Rows("a a 1 1 1")) - tk.MustQuery(`execute stmt1 using @v2, @v2`).Check(testkit.Rows("b b 2 2 2")) - tk.MustExec(`prepare stmt2 from "select * from ta, tb where ta.c = tb.b and (ta.a, ta.b) in ((?, ?), (?, ?))"`) - tk.MustQuery(`execute stmt2 using @v1, @v1, @v2, @v2`).Check(testkit.Rows("a a 1 1 1", "b b 2 2 2")) - tk.MustQuery(`execute stmt2 using @v2, @v2, @v3, @v3`).Check(testkit.Rows("b b 2 2 2", "c c 3 3 3")) - - // For issue 19002 - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec(`drop table if exists t1`) - tk.MustExec(`create table t1(a int, b int, c int, primary key(a, b))`) - tk.MustExec(`insert into t1 values(1,1,111),(2,2,222),(3,3,333)`) - // Point Get: - tk.MustExec(`prepare stmt1 from "select * from t1 where t1.a = ? and t1.b = ?"`) - tk.MustExec(`set @v1=1, @v2=1`) - tk.MustQuery(`execute stmt1 using @v1,@v2`).Check(testkit.Rows("1 1 111")) - tk.MustExec(`set @v1=2, @v2=2`) - tk.MustQuery(`execute stmt1 using @v1,@v2`).Check(testkit.Rows("2 2 222")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) - // Batch Point Get: - tk.MustExec(`prepare stmt2 from "select * from t1 where (t1.a,t1.b) in ((?,?),(?,?))"`) - tk.MustExec(`set @v1=1, @v2=1, @v3=2, @v4=2`) - tk.MustQuery(`execute stmt2 using @v1,@v2,@v3,@v4`).Check(testkit.Rows("1 1 111", "2 2 222")) - tk.MustExec(`set @v1=2, @v2=2, @v3=3, @v4=3`) - tk.MustQuery(`execute stmt2 using @v1,@v2,@v3,@v4`).Check(testkit.Rows("2 2 222", "3 3 333")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) -} - -func TestPlanCacheWithDifferentVariableTypes(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - require.NoError(t, err) - - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("create table t1(a varchar(20), b int, c float, key(b, a))") - tk.MustExec("insert into t1 values('1',1,1.1),('2',2,222),('3',3,333)") - tk.MustExec("create table t2(a varchar(20), b int, c float, key(b, a))") - tk.MustExec("insert into t2 values('3',3,3.3),('2',2,222),('3',3,333)") - - var input []struct { - PrepareStmt string - Executes []struct { - Vars []struct { - Name string - Value string - } - ExecuteSQL string - } - } - var output []struct { - PrepareStmt string - Executes []struct { - SQL string - Vars []struct { - Name string - Value string - } - Plan []string - LastPlanUseCache string - Result []string - } - } - prepareMergeSuiteData.GetTestCases(t, &input, &output) - for i, tt := range input { - tk.MustExec(tt.PrepareStmt) - testdata.OnRecord(func() { - output[i].PrepareStmt = tt.PrepareStmt - output[i].Executes = make([]struct { - SQL string - Vars []struct { - Name string - Value string - } - Plan []string - LastPlanUseCache string - Result []string - }, len(tt.Executes)) - }) - require.Equal(t, tt.PrepareStmt, output[i].PrepareStmt) - for j, exec := range tt.Executes { - for _, v := range exec.Vars { - tk.MustExec(fmt.Sprintf(`set @%s = %s`, v.Name, v.Value)) - } - res := tk.MustQuery(exec.ExecuteSQL) - lastPlanUseCache := tk.MustQuery("select @@last_plan_from_cache").Rows()[0][0] - tk.MustQuery(exec.ExecuteSQL) - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - plan := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - testdata.OnRecord(func() { - output[i].Executes[j].SQL = exec.ExecuteSQL - output[i].Executes[j].Plan = testdata.ConvertRowsToStrings(plan.Rows()) - output[i].Executes[j].Vars = exec.Vars - output[i].Executes[j].LastPlanUseCache = lastPlanUseCache.(string) - output[i].Executes[j].Result = testdata.ConvertRowsToStrings(res.Rows()) - }) - - require.Equal(t, exec.ExecuteSQL, output[i].Executes[j].SQL) - plan.Check(testkit.Rows(output[i].Executes[j].Plan...)) - require.Equal(t, exec.Vars, output[i].Executes[j].Vars) - require.Equal(t, lastPlanUseCache.(string), output[i].Executes[j].LastPlanUseCache) - res.Check(testkit.Rows(output[i].Executes[j].Result...)) - } - } -} - -func TestPlanCacheOperators(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - type ExecCase struct { - Parameters []string - UseCache bool - } - type PrepCase struct { - PrepStmt string - ExecCases []ExecCase - } - - cases := []PrepCase{ - {"use test", nil}, - - // cases for TableReader on PK - {"create table t (a int, b int, primary key(a))", nil}, - {"insert into t values (1,1), (2,2), (3,3), (4,4), (5,5), (6,null)", nil}, - {"select a from t where a=?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, true}, - {[]string{"3"}, true}, - }}, - {"select a from t where a in (?,?,?)", []ExecCase{ - {[]string{"1", "1", "1"}, false}, - {[]string{"2", "3", "4"}, true}, - {[]string{"3", "5", "7"}, true}, - }}, - {"select a from t where a>? and a? and a? and a? and a? and a?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ HASH_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ HASH_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ MERGE_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ MERGE_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, true}, - {[]string{"5"}, true}, - }}, - {"select /*+ INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b? and t1.a > (select min(t2.a) from t t2 where t2.b < t1.b)", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, false}, // plans with sub-queries cannot be cached, but the result must be correct - {[]string{"5"}, false}, - }}, - {"select * from t t1 where t1.a > (select min(t2.a) from t t2 where t2.b < t1.b+?)", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"3"}, false}, - {[]string{"5"}, false}, - }}, - {"select * from t t1 where t1.b>? and t1.a > (select min(t2.a) from t t2 where t2.b < t1.b+?)", []ExecCase{ - {[]string{"1", "1"}, false}, - {[]string{"3", "2"}, false}, - {[]string{"5", "3"}, false}, - }}, - {"drop table t", nil}, - - // cases for Window - {"create table t (name varchar(50), y int, sale decimal(14,2))", nil}, - {"insert into t values ('Bob',2016,2.4), ('Bob',2017,3.2), ('Bob',2018,2.1), ('Alice',2016,1.4), ('Alice',2017,2), ('Alice',2018,3.3), ('John',2016,4), ('John',2017,2.1), ('John',2018,5)", nil}, - {"select *, sum(sale) over (partition by y order by sale) total from t where sale>? order by y", []ExecCase{ - {[]string{"0.1"}, false}, - {[]string{"0.5"}, true}, - {[]string{"1.5"}, true}, - {[]string{"3.5"}, true}, - }}, - {"select *, sum(sale) over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ - {[]string{"0.1"}, false}, - {[]string{"0.5"}, true}, - {[]string{"1.5"}, true}, - {[]string{"3.5"}, true}, - }}, - {"select *, rank() over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ - {[]string{"0.1"}, false}, - {[]string{"0.5"}, true}, - {[]string{"1.5"}, true}, - {[]string{"3.5"}, true}, - }}, - {"select *, first_value(sale) over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ - {[]string{"0.1"}, false}, - {[]string{"0.5"}, true}, - {[]string{"1.5"}, true}, - {[]string{"3.5"}, true}, - }}, - {"select *, first_value(sale) over (partition by y order by sale rows ? preceding) total from t order by y", []ExecCase{ - {[]string{"1"}, false}, // window plans with parameters in frame cannot be cached - {[]string{"2"}, false}, - {[]string{"3"}, false}, - {[]string{"4"}, false}, - }}, - {"drop table t", nil}, - - // cases for Limit - {"create table t (a int)", nil}, - {"insert into t values (1), (1), (2), (2), (3), (4), (5), (6), (7), (8), (9), (0), (0)", nil}, - {"select * from t limit ?", []ExecCase{ - {[]string{"20"}, false}, - {[]string{"30"}, false}, - }}, - {"select * from t limit 40, ?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, false}, - }}, - {"select * from t limit ?, 10", []ExecCase{ - {[]string{"20"}, false}, - {[]string{"30"}, false}, - }}, - {"select * from t limit ?, ?", []ExecCase{ - {[]string{"20", "20"}, false}, - {[]string{"20", "40"}, false}, - }}, - {"select * from t where a? order by mod(a, 3)", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, true}, - {[]string{"3"}, true}, - }}, - - // cases for topN - {"select * from t order by b limit ?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, false}, - }}, - {"select * from t order by b limit 10, ?", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, false}, - }}, - {"select * from t order by ? limit 10", []ExecCase{ - {[]string{"1"}, false}, - {[]string{"2"}, false}, - }}, - {"select * from t order by ? limit ?", []ExecCase{ - {[]string{"1", "10"}, false}, - {[]string{"2", "20"}, false}, - }}, - } - - for _, prepCase := range cases { - isQuery := strings.Contains(prepCase.PrepStmt, "select") - if !isQuery { - tk.MustExec(prepCase.PrepStmt) - continue - } - - tk.MustExec(fmt.Sprintf(`prepare stmt from '%v'`, prepCase.PrepStmt)) - for _, execCase := range prepCase.ExecCases { - // set all parameters - usingStmt := "" - if len(execCase.Parameters) > 0 { - setStmt := "set " - usingStmt = "using " - for i, parameter := range execCase.Parameters { - if i > 0 { - setStmt += ", " - usingStmt += ", " - } - setStmt += fmt.Sprintf("@x%v=%v", i, parameter) - usingStmt += fmt.Sprintf("@x%v", i) - } - tk.MustExec(setStmt) - } - - // execute this statement and check whether it uses a cached plan - results := tk.MustQuery("execute stmt " + usingStmt).Sort().Rows() - - // check whether the result is correct - tmp := strings.Split(prepCase.PrepStmt, "?") - require.Equal(t, len(execCase.Parameters)+1, len(tmp)) - query := "" - for i := range tmp { - query += tmp[i] - if i < len(execCase.Parameters) { - query += execCase.Parameters[i] - } - } - tk.MustQuery(query).Sort().Check(results) - } - } -} - -func TestIssue28782(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("prepare stmt from 'SELECT IF(?, 1, 0);';") - tk.MustExec("set @a=1, @b=null, @c=0") - - tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("0")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @c;").Check(testkit.Rows("0")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) -} - -func TestIssue29101(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - - tk.MustExec(`use test`) - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec(`CREATE TABLE customer ( - c_id int(11) NOT NULL, - c_d_id int(11) NOT NULL, - c_w_id int(11) NOT NULL, - c_first varchar(16) DEFAULT NULL, - c_last varchar(16) DEFAULT NULL, - c_credit char(2) DEFAULT NULL, - c_discount decimal(4,4) DEFAULT NULL, - PRIMARY KEY (c_w_id,c_d_id,c_id), - KEY idx_customer (c_w_id,c_d_id,c_last,c_first) - )`) - tk.MustExec(`CREATE TABLE warehouse ( - w_id int(11) NOT NULL, - w_tax decimal(4,4) DEFAULT NULL, - PRIMARY KEY (w_id) - )`) - tk.MustExec(`prepare s1 from 'SELECT /*+ TIDB_INLJ(customer,warehouse) */ c_discount, c_last, c_credit, w_tax FROM customer, warehouse WHERE w_id = ? AND c_w_id = w_id AND c_d_id = ? AND c_id = ?'`) - tk.MustExec(`set @a=936,@b=7,@c=158`) - tk.MustQuery(`execute s1 using @a,@b,@c`).Check(testkit.Rows()) - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Check(testkit.Rows( // can use IndexJoin - `Projection_6 1.00 root test.customer.c_discount, test.customer.c_last, test.customer.c_credit, test.warehouse.w_tax`, - `└─IndexJoin_14 1.00 root inner join, inner:TableReader_10, outer key:test.customer.c_w_id, inner key:test.warehouse.w_id, equal cond:eq(test.customer.c_w_id, test.warehouse.w_id)`, - ` ├─Point_Get_33(Build) 1.00 root table:customer, index:PRIMARY(c_w_id, c_d_id, c_id) `, - ` └─TableReader_10(Probe) 0.00 root data:Selection_9`, - ` └─Selection_9 0.00 cop[tikv] eq(test.warehouse.w_id, 936)`, - ` └─TableRangeScan_8 1.00 cop[tikv] table:warehouse range: decided by [test.customer.c_w_id], keep order:false, stats:pseudo`)) - tk.MustQuery(`execute s1 using @a,@b,@c`).Check(testkit.Rows()) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can use the plan-cache - - tk.MustExec(`CREATE TABLE order_line ( - ol_o_id int(11) NOT NULL, - ol_d_id int(11) NOT NULL, - ol_w_id int(11) NOT NULL, - ol_number int(11) NOT NULL, - ol_i_id int(11) NOT NULL, - PRIMARY KEY (ol_w_id,ol_d_id,ol_o_id,ol_number))`) - tk.MustExec(`CREATE TABLE stock ( - s_i_id int(11) NOT NULL, - s_w_id int(11) NOT NULL, - s_quantity int(11) DEFAULT NULL, - PRIMARY KEY (s_w_id,s_i_id))`) - tk.MustExec(`prepare s1 from 'SELECT /*+ TIDB_INLJ(order_line,stock) */ COUNT(DISTINCT (s_i_id)) stock_count FROM order_line, stock WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id < ? AND ol_o_id >= ? - 20 AND s_w_id = ? AND s_i_id = ol_i_id AND s_quantity < ?'`) - tk.MustExec(`set @a=391,@b=1,@c=3058,@d=18`) - tk.MustExec(`execute s1 using @a,@b,@c,@c,@a,@d`) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Check(testkit.Rows( // can use index-join - `StreamAgg_9 1.00 root funcs:count(distinct test.stock.s_i_id)->Column#11`, - `└─IndexJoin_14 0.03 root inner join, inner:IndexLookUp_13, outer key:test.order_line.ol_i_id, inner key:test.stock.s_i_id, equal cond:eq(test.order_line.ol_i_id, test.stock.s_i_id)`, - ` ├─Selection_30(Build) 0.03 root eq(test.order_line.ol_d_id, 1), eq(test.order_line.ol_w_id, 391), ge(test.order_line.ol_o_id, 3038), lt(test.order_line.ol_o_id, 3058)`, - ` │ └─IndexLookUp_29 0.03 root `, - ` │ ├─IndexRangeScan_27(Build) 0.03 cop[tikv] table:order_line, index:PRIMARY(ol_w_id, ol_d_id, ol_o_id, ol_number) range:[391 1 3038,391 1 3058), keep order:false, stats:pseudo`, - ` │ └─TableRowIDScan_28(Probe) 0.03 cop[tikv] table:order_line keep order:false, stats:pseudo`, - ` └─IndexLookUp_13(Probe) 1.00 root `, - ` ├─IndexRangeScan_10(Build) 1.00 cop[tikv] table:stock, index:PRIMARY(s_w_id, s_i_id) range: decided by [eq(test.stock.s_i_id, test.order_line.ol_i_id) eq(test.stock.s_w_id, 391)], keep order:false, stats:pseudo`, - ` └─Selection_12(Probe) 1.00 cop[tikv] lt(test.stock.s_quantity, 18)`, - ` └─TableRowIDScan_11 1.00 cop[tikv] table:stock keep order:false, stats:pseudo`)) - tk.MustExec(`execute s1 using @a,@b,@c,@c,@a,@d`) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can use the plan-cache -} - -func TestIssue28087And28162(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - - // issue 28087 - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists IDT_26207`) - tk.MustExec(`CREATE TABLE IDT_26207 (col1 bit(1))`) - tk.MustExec(`insert into IDT_26207 values(0x0), (0x1)`) - tk.MustExec(`prepare stmt from 'select t1.col1 from IDT_26207 as t1 left join IDT_26207 as t2 on t1.col1 = t2.col1 where t1.col1 in (?, ?, ?)'`) - tk.MustExec(`set @a=0x01, @b=0x01, @c=0x01`) - tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x01")) - tk.MustExec(`set @a=0x00, @b=0x00, @c=0x01`) - tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x00", "\x01")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0")) - - // issue 28162 - tk.MustExec(`drop table if exists IDT_MC21780`) - tk.MustExec(`CREATE TABLE IDT_MC21780 ( - COL1 timestamp NULL DEFAULT NULL, - COL2 timestamp NULL DEFAULT NULL, - COL3 timestamp NULL DEFAULT NULL, - KEY U_M_COL (COL1,COL2) - )`) - tk.MustExec(`insert into IDT_MC21780 values("1970-12-18 10:53:28", "1970-12-18 10:53:28", "1970-12-18 10:53:28")`) - tk.MustExec(`prepare stmt from 'select/*+ hash_join(t1) */ * from IDT_MC21780 t1 join IDT_MC21780 t2 on t1.col1 = t2.col1 where t1. col1 < ? and t2. col1 in (?, ?, ?);'`) - tk.MustExec(`set @a="2038-01-19 03:14:07", @b="2038-01-19 03:14:07", @c="2038-01-19 03:14:07", @d="2038-01-19 03:14:07"`) - tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows()) - tk.MustExec(`set @a="1976-09-09 20:21:11", @b="2021-07-14 09:28:16", @c="1982-01-09 03:36:39", @d="1970-12-18 10:53:28"`) - tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows("1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) -} - -func TestParameterPushDown(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - require.NoError(t, err) - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists t`) - tk.MustExec(`create table t (a int, b int, c int, key(a))`) - tk.MustExec(`insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6)`) - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec(`set @x1=1,@x5=5,@x10=10,@x20=20`) - - var input []struct { - SQL string - } - var output []struct { - Result []string - Plan []string - FromCache string - } - prepareMergeSuiteData.GetTestCases(t, &input, &output) - - for i, tt := range input { - if strings.HasPrefix(tt.SQL, "execute") { - res := tk.MustQuery(tt.SQL).Sort() - fromCache := tk.MustQuery("select @@last_plan_from_cache") - tk.MustQuery(tt.SQL) - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - plan := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) - - testdata.OnRecord(func() { - output[i].Result = testdata.ConvertRowsToStrings(res.Rows()) - output[i].Plan = testdata.ConvertRowsToStrings(plan.Rows()) - output[i].FromCache = fromCache.Rows()[0][0].(string) - }) - - res.Check(testkit.Rows(output[i].Result...)) - plan.Check(testkit.Rows(output[i].Plan...)) - require.Equal(t, fromCache.Rows()[0][0].(string), output[i].FromCache) - } else { - tk.MustExec(tt.SQL) - testdata.OnRecord(func() { - output[i].Result = nil - }) - } - } -} - -func TestPreparePlanCache4Function(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - - // Testing for non-deterministic functions - tk.MustExec("prepare stmt from 'select rand()';") - res := tk.MustQuery("execute stmt;") - require.Equal(t, 1, len(res.Rows())) - - res1 := tk.MustQuery("execute stmt;") - require.Equal(t, 1, len(res1.Rows())) - require.NotEqual(t, res.Rows()[0][0], res1.Rows()[0][0]) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - - // Testing for control functions - tk.MustExec("prepare stmt from 'SELECT IFNULL(?,0);';") - tk.MustExec("set @a = 1, @b = null;") - tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("0")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int);") - tk.MustExec("prepare stmt from 'select a, case when a = ? then 0 when a <=> ? then 1 else 2 end b from t order by a;';") - tk.MustExec("insert into t values(0), (1), (2), (null);") - tk.MustExec("set @a = 0, @b = 1, @c = 2, @d = null;") - tk.MustQuery("execute stmt using @a, @b;").Check(testkit.Rows(" 2", "0 0", "1 1", "2 2")) - tk.MustQuery("execute stmt using @c, @d;").Check(testkit.Rows(" 1", "0 2", "1 2", "2 0")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) -} - -func TestPreparePlanCache4DifferentSystemVars(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - - // Testing for 'sql_select_limit' - tk.MustExec("set @@sql_select_limit = 1") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int);") - tk.MustExec("insert into t values(0), (1), (null);") - tk.MustExec("prepare stmt from 'select a from t order by a;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows("")) - - tk.MustExec("set @@sql_select_limit = 2") - tk.MustQuery("execute stmt;").Check(testkit.Rows("", "0")) - // The 'sql_select_limit' will be stored in the cache key. So if the `sql_select_limit` - // have been changed, the plan cache can not be reused. - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - tk.MustExec("set @@sql_select_limit = 18446744073709551615") - tk.MustQuery("execute stmt;").Check(testkit.Rows("", "0", "1")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - // test for 'tidb_enable_index_merge' - tk.MustExec("set @@tidb_enable_index_merge = 1;") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int, b int, index idx_a(a), index idx_b(b));") - tk.MustExec("prepare stmt from 'select * from t use index(idx_a, idx_b) where a > 1 or b > 1;';") - tk.MustExec("execute stmt;") - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res := tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Equal(t, 4, len(res.Rows())) - require.Contains(t, res.Rows()[0][0], "IndexMerge") - - tk.MustExec("set @@tidb_enable_index_merge = 0;") - tk.MustExec("execute stmt;") - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Equal(t, 4, len(res.Rows())) - require.Contains(t, res.Rows()[0][0], "IndexMerge") - tk.MustExec("execute stmt;") - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - // test for 'tidb_enable_parallel_apply' - tk.MustExec("set @@tidb_enable_collect_execution_info=1;") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int)") - tk.MustExec("insert into t values (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (null, null)") - - tk.MustExec("set tidb_enable_parallel_apply=true") - tk.MustExec("prepare stmt from 'select t1.b from t t1 where t1.b > (select max(b) from t t2 where t1.a > t2.a);';") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Contains(t, res.Rows()[1][0], "Apply") - require.Contains(t, res.Rows()[1][5], "Concurrency") - - tk.MustExec("set tidb_enable_parallel_apply=false") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Contains(t, res.Rows()[1][0], "Apply") - executionInfo := fmt.Sprintf("%v", res.Rows()[1][4]) - // Do not use the parallel apply. - require.False(t, strings.Contains(executionInfo, "Concurrency")) - tk.MustExec("execute stmt;") - // The subquery plan can not be cached. - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - // test for apply cache - tk.MustExec("set @@tidb_enable_collect_execution_info=1;") - tk.MustExec("set tidb_mem_quota_apply_cache=33554432") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int)") - tk.MustExec("insert into t values (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (null, null)") - - tk.MustExec("prepare stmt from 'select t1.b from t t1 where t1.b > (select max(b) from t t2 where t1.a > t2.a);';") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Contains(t, res.Rows()[1][0], "Apply") - require.Contains(t, res.Rows()[1][5], "cache:ON") - - tk.MustExec("set tidb_mem_quota_apply_cache=0") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - require.Contains(t, res.Rows()[1][0], "Apply") - executionInfo = fmt.Sprintf("%v", res.Rows()[1][5]) - // Do not use the apply cache. - require.True(t, strings.Contains(executionInfo, "cache:OFF")) - tk.MustExec("execute stmt;") - // The subquery plan can not be cached. - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) -} - -func TestTemporaryTable4PlanCache(t *testing.T) { - store, dom, err := newStoreWithBootstrap() - require.NoError(t, err) - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(true) - tk := testkit.NewTestKit(t, store) - defer func() { - dom.Close() - require.NoError(t, store.Close()) - }() - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("drop table if exists tmp2") - tk.MustExec("create temporary table tmp2 (a int, b int, key(a), key(b));") - tk.MustExec("prepare stmt from 'select * from tmp2;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - tk.MustExec("drop table if exists tmp_t;") - tk.MustExec("create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows") - tk.MustExec("prepare stmt from 'select * from tmp_t;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - -} - -func TestPrepareStmtAfterIsolationReadChange(t *testing.T) { - if israce.RaceEnabled { - t.Skip("race test for this case takes too long time") - } - store, clean := testkit.CreateMockStore(t) - defer clean() - orgEnable := plannercore.PreparedPlanCacheEnabled() - defer func() { - plannercore.SetPreparedPlanCache(orgEnable) - }() - plannercore.SetPreparedPlanCache(false) // requires plan cache disabled - tk := testkit.NewTestKit(t, store) - tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost", CurrentUser: true, AuthUsername: "root", AuthHostname: "%"}, nil, []byte("012345678901234567890")) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - // create virtual tiflash replica. - dom := domain.GetDomain(tk.Session()) - is := dom.InfoSchema() - db, exists := is.SchemaByName(model.NewCIStr("test")) - require.True(t, exists) - for _, tblInfo := range db.Tables { - if tblInfo.Name.L == "t" { - tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ - Count: 1, - Available: true, - } - } - } - - tk.MustExec("set @@session.tidb_isolation_read_engines='tikv'") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("prepare stmt from \"select * from t\"") - tk.MustQuery("execute stmt") - tkProcess := tk.Session().ShowProcess() - ps := []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() - require.Equal(t, "cop[tikv]", rows[len(rows)-1][2]) - - tk.MustExec("set @@session.tidb_isolation_read_engines='tiflash'") - tk.MustExec("execute stmt") - tkProcess = tk.Session().ShowProcess() - ps = []*util.ProcessInfo{tkProcess} - tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) - rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() - require.Equal(t, rows[len(rows)-1][2], "cop[tiflash]") - - require.Equal(t, 1, len(tk.Session().GetSessionVars().PreparedStmts)) - require.Equal(t, "select * from `t`", tk.Session().GetSessionVars().PreparedStmts[1].(*plannercore.CachedPrepareStmt).NormalizedSQL) - require.Equal(t, "", tk.Session().GetSessionVars().PreparedStmts[1].(*plannercore.CachedPrepareStmt).NormalizedPlan) -} diff --git a/executor/prepared_test.go b/executor/prepared_test.go index 5908af0437848..e24178b9817b2 100644 --- a/executor/prepared_test.go +++ b/executor/prepared_test.go @@ -17,15 +17,23 @@ package executor_test import ( "crypto/tls" "fmt" + "strconv" + "strings" "sync/atomic" "testing" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/parser/auth" + "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" - txninfo "github.com/pingcap/tidb/session/txninfo" + "github.com/pingcap/tidb/session/txninfo" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testdata" "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/israce" "github.com/stretchr/testify/require" ) @@ -218,3 +226,1149 @@ func TestIssue29850(t *testing.T) { ` └─TableRangeScan_5 1.00 cop[tikv] table:t range:[1,1], keep order:false, stats:pseudo`)) tk.MustQuery(`execute stmt using @a1, @a2`).Check(testkit.Rows("1", "2")) } + +func TestIssue28064(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + tk.MustExec("use test") + tk.MustExec("drop table if exists t28064") + tk.MustExec("CREATE TABLE `t28064` (" + + "`a` decimal(10,0) DEFAULT NULL," + + "`b` decimal(10,0) DEFAULT NULL," + + "`c` decimal(10,0) DEFAULT NULL," + + "`d` decimal(10,0) DEFAULT NULL," + + "KEY `iabc` (`a`,`b`,`c`));") + tk.MustExec("set @a='123', @b='234', @c='345';") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("prepare stmt1 from 'select * from t28064 use index (iabc) where a = ? and b = ? and c = ?';") + + tk.MustExec("execute stmt1 using @a, @b, @c;") + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + rows.Check(testkit.Rows("Selection_8 0.00 root eq(test.t28064.a, 123), eq(test.t28064.b, 234), eq(test.t28064.c, 345)", + "└─IndexLookUp_7 0.00 root ", + " ├─IndexRangeScan_5(Build) 0.00 cop[tikv] table:t28064, index:iabc(a, b, c) range:[123 234 345,123 234 345], keep order:false, stats:pseudo", + " └─TableRowIDScan_6(Probe) 0.00 cop[tikv] table:t28064 keep order:false, stats:pseudo")) + + tk.MustExec("execute stmt1 using @a, @b, @c;") + rows = tk.MustQuery("select @@last_plan_from_cache") + rows.Check(testkit.Rows("1")) + + tk.MustExec("execute stmt1 using @a, @b, @c;") + rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + rows.Check(testkit.Rows("Selection_8 0.00 root eq(test.t28064.a, 123), eq(test.t28064.b, 234), eq(test.t28064.c, 345)", + "└─IndexLookUp_7 0.00 root ", + " ├─IndexRangeScan_5(Build) 0.00 cop[tikv] table:t28064, index:iabc(a, b, c) range:[123 234 345,123 234 345], keep order:false, stats:pseudo", + " └─TableRowIDScan_6(Probe) 0.00 cop[tikv] table:t28064 keep order:false, stats:pseudo")) +} + +func TestPreparePlanCache4Blacklist(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + + // test the blacklist of optimization rules + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int);") + tk.MustExec("prepare stmt from 'select min(a) from t;';") + tk.MustExec("execute stmt;") + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + require.Contains(t, res.Rows()[1][0], "TopN") + + res = tk.MustQuery("explain format = 'brief' select min(a) from t") + require.Contains(t, res.Rows()[1][0], "TopN") + + tk.MustExec("INSERT INTO mysql.opt_rule_blacklist VALUES('max_min_eliminate');") + tk.MustExec("ADMIN reload opt_rule_blacklist;") + + tk.MustExec("execute stmt;") + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + tk.MustExec("execute stmt;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + // Plans that have been cached will not be affected by the blacklist. + require.Contains(t, res.Rows()[1][0], "TopN") + + res = tk.MustQuery("explain format = 'brief' select min(a) from t") + require.Contains(t, res.Rows()[0][0], "StreamAgg") + + // test the blacklist of Expression Pushdown + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int);") + tk.MustExec("prepare stmt from 'SELECT * FROM t WHERE a < 2 and a > 2;';") + tk.MustExec("execute stmt;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + require.Equal(t, 3, len(res.Rows())) + require.Contains(t, res.Rows()[1][0], "Selection") + require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) + + res = tk.MustQuery("explain format = 'brief' SELECT * FROM t WHERE a < 2 and a > 2;") + require.Equal(t, 3, len(res.Rows())) + require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) + + tk.MustExec("INSERT INTO mysql.expr_pushdown_blacklist VALUES('<','tikv','');") + tk.MustExec("ADMIN reload expr_pushdown_blacklist;") + + tk.MustExec("execute stmt;") + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + tk.MustExec("execute stmt;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + // The expressions can still be pushed down to tikv. + require.Equal(t, 3, len(res.Rows())) + require.Contains(t, res.Rows()[1][0], "Selection") + require.Equal(t, "gt(test.t.a, 2), lt(test.t.a, 2)", res.Rows()[1][4]) + + res = tk.MustQuery("explain format = 'brief' SELECT * FROM t WHERE a < 2 and a > 2;") + require.Equal(t, 4, len(res.Rows())) + require.Contains(t, res.Rows()[0][0], "Selection") + require.Equal(t, "lt(test.t.a, 2)", res.Rows()[0][4]) + require.Contains(t, res.Rows()[2][0], "Selection") + require.Equal(t, "gt(test.t.a, 2)", res.Rows()[2][4]) + + tk.MustExec("DELETE FROM mysql.expr_pushdown_blacklist;") + tk.MustExec("ADMIN reload expr_pushdown_blacklist;") +} + +func TestPlanCacheClusterIndex(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("create table t1(a varchar(20), b varchar(20), c varchar(20), primary key(a, b))") + tk.MustExec("insert into t1 values('1','1','111'),('2','2','222'),('3','3','333')") + + // For table scan + tk.MustExec(`prepare stmt1 from "select * from t1 where t1.a = ? and t1.b > ?"`) + tk.MustExec("set @v1 = '1'") + tk.MustExec("set @v2 = '0'") + tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("1 1 111")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + tk.MustExec("set @v1 = '2'") + tk.MustExec("set @v2 = '1'") + tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("2 2 222")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + tk.MustExec("set @v1 = '3'") + tk.MustExec("set @v2 = '2'") + tk.MustQuery("execute stmt1 using @v1,@v2").Check(testkit.Rows("3 3 333")) + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.Equal(t, 0, strings.Index(rows[len(rows)-1][4].(string), `range:("3" "2","3" +inf]`)) + // For point get + tk.MustExec(`prepare stmt2 from "select * from t1 where t1.a = ? and t1.b = ?"`) + tk.MustExec("set @v1 = '1'") + tk.MustExec("set @v2 = '1'") + tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("1 1 111")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + tk.MustExec("set @v1 = '2'") + tk.MustExec("set @v2 = '2'") + tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("2 2 222")) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + tk.MustExec("set @v1 = '3'") + tk.MustExec("set @v2 = '3'") + tk.MustQuery("execute stmt2 using @v1,@v2").Check(testkit.Rows("3 3 333")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.Equal(t, 0, strings.Index(rows[len(rows)-1][0].(string), `Point_Get`)) + // For CBO point get and batch point get + // case 1: + tk.MustExec(`drop table if exists ta, tb`) + tk.MustExec(`create table ta (a varchar(8) primary key, b int)`) + tk.MustExec(`insert ta values ('a', 1), ('b', 2)`) + tk.MustExec(`create table tb (a varchar(8) primary key, b int)`) + tk.MustExec(`insert tb values ('a', 1), ('b', 2)`) + tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.a = tb.a and ta.a = ?"`) + tk.MustExec(`set @v1 = 'a', @v2 = 'b'`) + tk.MustQuery(`execute stmt1 using @v1`).Check(testkit.Rows("a 1 a 1")) + tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 b 2")) + + // case 2: + tk.MustExec(`drop table if exists ta, tb`) + tk.MustExec(`create table ta (a varchar(10) primary key, b int not null)`) + tk.MustExec(`insert ta values ('a', 1), ('b', 2)`) + tk.MustExec(`create table tb (b int primary key, c int)`) + tk.MustExec(`insert tb values (1, 1), (2, 2)`) + tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.b = tb.b and ta.a = ?"`) + tk.MustExec(`set @v1 = 'a', @v2 = 'b'`) + tk.MustQuery(`execute stmt1 using @v1`).Check(testkit.Rows("a 1 1 1")) + tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 2 2")) + tk.MustQuery(`execute stmt1 using @v2`).Check(testkit.Rows("b 2 2 2")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.True(t, strings.Contains(rows[3][0].(string), `TableRangeScan`)) + + // case 3: + tk.MustExec(`drop table if exists ta, tb`) + tk.MustExec(`create table ta (a varchar(10), b varchar(10), c int, primary key (a, b))`) + tk.MustExec(`insert ta values ('a', 'a', 1), ('b', 'b', 2), ('c', 'c', 3)`) + tk.MustExec(`create table tb (b int primary key, c int)`) + tk.MustExec(`insert tb values (1, 1), (2, 2), (3,3)`) + tk.MustExec(`prepare stmt1 from "select * from ta, tb where ta.c = tb.b and ta.a = ? and ta.b = ?"`) + tk.MustExec(`set @v1 = 'a', @v2 = 'b', @v3 = 'c'`) + tk.MustQuery(`execute stmt1 using @v1, @v1`).Check(testkit.Rows("a a 1 1 1")) + tk.MustQuery(`execute stmt1 using @v2, @v2`).Check(testkit.Rows("b b 2 2 2")) + tk.MustExec(`prepare stmt2 from "select * from ta, tb where ta.c = tb.b and (ta.a, ta.b) in ((?, ?), (?, ?))"`) + tk.MustQuery(`execute stmt2 using @v1, @v1, @v2, @v2`).Check(testkit.Rows("a a 1 1 1", "b b 2 2 2")) + tk.MustQuery(`execute stmt2 using @v2, @v2, @v3, @v3`).Check(testkit.Rows("b b 2 2 2", "c c 3 3 3")) + + // For issue 19002 + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.MustExec(`drop table if exists t1`) + tk.MustExec(`create table t1(a int, b int, c int, primary key(a, b))`) + tk.MustExec(`insert into t1 values(1,1,111),(2,2,222),(3,3,333)`) + // Point Get: + tk.MustExec(`prepare stmt1 from "select * from t1 where t1.a = ? and t1.b = ?"`) + tk.MustExec(`set @v1=1, @v2=1`) + tk.MustQuery(`execute stmt1 using @v1,@v2`).Check(testkit.Rows("1 1 111")) + tk.MustExec(`set @v1=2, @v2=2`) + tk.MustQuery(`execute stmt1 using @v1,@v2`).Check(testkit.Rows("2 2 222")) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) + // Batch Point Get: + tk.MustExec(`prepare stmt2 from "select * from t1 where (t1.a,t1.b) in ((?,?),(?,?))"`) + tk.MustExec(`set @v1=1, @v2=1, @v3=2, @v4=2`) + tk.MustQuery(`execute stmt2 using @v1,@v2,@v3,@v4`).Check(testkit.Rows("1 1 111", "2 2 222")) + tk.MustExec(`set @v1=2, @v2=2, @v3=3, @v4=3`) + tk.MustQuery(`execute stmt2 using @v1,@v2,@v3,@v4`).Check(testkit.Rows("2 2 222", "3 3 333")) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) +} + +func TestPlanCacheWithDifferentVariableTypes(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + require.NoError(t, err) + + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("create table t1(a varchar(20), b int, c float, key(b, a))") + tk.MustExec("insert into t1 values('1',1,1.1),('2',2,222),('3',3,333)") + tk.MustExec("create table t2(a varchar(20), b int, c float, key(b, a))") + tk.MustExec("insert into t2 values('3',3,3.3),('2',2,222),('3',3,333)") + + var input []struct { + PrepareStmt string + Executes []struct { + Vars []struct { + Name string + Value string + } + ExecuteSQL string + } + } + var output []struct { + PrepareStmt string + Executes []struct { + SQL string + Vars []struct { + Name string + Value string + } + Plan []string + LastPlanUseCache string + Result []string + } + } + prepareMergeSuiteData.GetTestCases(t, &input, &output) + for i, tt := range input { + tk.MustExec(tt.PrepareStmt) + testdata.OnRecord(func() { + output[i].PrepareStmt = tt.PrepareStmt + output[i].Executes = make([]struct { + SQL string + Vars []struct { + Name string + Value string + } + Plan []string + LastPlanUseCache string + Result []string + }, len(tt.Executes)) + }) + require.Equal(t, tt.PrepareStmt, output[i].PrepareStmt) + for j, exec := range tt.Executes { + for _, v := range exec.Vars { + tk.MustExec(fmt.Sprintf(`set @%s = %s`, v.Name, v.Value)) + } + res := tk.MustQuery(exec.ExecuteSQL) + lastPlanUseCache := tk.MustQuery("select @@last_plan_from_cache").Rows()[0][0] + tk.MustQuery(exec.ExecuteSQL) + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + plan := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + testdata.OnRecord(func() { + output[i].Executes[j].SQL = exec.ExecuteSQL + output[i].Executes[j].Plan = testdata.ConvertRowsToStrings(plan.Rows()) + output[i].Executes[j].Vars = exec.Vars + output[i].Executes[j].LastPlanUseCache = lastPlanUseCache.(string) + output[i].Executes[j].Result = testdata.ConvertRowsToStrings(res.Rows()) + }) + + require.Equal(t, exec.ExecuteSQL, output[i].Executes[j].SQL) + plan.Check(testkit.Rows(output[i].Executes[j].Plan...)) + require.Equal(t, exec.Vars, output[i].Executes[j].Vars) + require.Equal(t, lastPlanUseCache.(string), output[i].Executes[j].LastPlanUseCache) + res.Check(testkit.Rows(output[i].Executes[j].Result...)) + } + } +} + +func TestPlanCacheOperators(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + type ExecCase struct { + Parameters []string + UseCache bool + } + type PrepCase struct { + PrepStmt string + ExecCases []ExecCase + } + + cases := []PrepCase{ + {"use test", nil}, + + // cases for TableReader on PK + {"create table t (a int, b int, primary key(a))", nil}, + {"insert into t values (1,1), (2,2), (3,3), (4,4), (5,5), (6,null)", nil}, + {"select a from t where a=?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, true}, + {[]string{"3"}, true}, + }}, + {"select a from t where a in (?,?,?)", []ExecCase{ + {[]string{"1", "1", "1"}, false}, + {[]string{"2", "3", "4"}, true}, + {[]string{"3", "5", "7"}, true}, + }}, + {"select a from t where a>? and a? and a? and a? and a? and a?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ HASH_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ HASH_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ MERGE_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ MERGE_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t2.b>?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, true}, + {[]string{"5"}, true}, + }}, + {"select /*+ INL_JOIN(t1, t2) */ * from t t1, t t2 where t1.a=t2.a and t1.b>? and t2.b? and t1.a > (select min(t2.a) from t t2 where t2.b < t1.b)", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, false}, // plans with sub-queries cannot be cached, but the result must be correct + {[]string{"5"}, false}, + }}, + {"select * from t t1 where t1.a > (select min(t2.a) from t t2 where t2.b < t1.b+?)", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"3"}, false}, + {[]string{"5"}, false}, + }}, + {"select * from t t1 where t1.b>? and t1.a > (select min(t2.a) from t t2 where t2.b < t1.b+?)", []ExecCase{ + {[]string{"1", "1"}, false}, + {[]string{"3", "2"}, false}, + {[]string{"5", "3"}, false}, + }}, + {"drop table t", nil}, + + // cases for Window + {"create table t (name varchar(50), y int, sale decimal(14,2))", nil}, + {"insert into t values ('Bob',2016,2.4), ('Bob',2017,3.2), ('Bob',2018,2.1), ('Alice',2016,1.4), ('Alice',2017,2), ('Alice',2018,3.3), ('John',2016,4), ('John',2017,2.1), ('John',2018,5)", nil}, + {"select *, sum(sale) over (partition by y order by sale) total from t where sale>? order by y", []ExecCase{ + {[]string{"0.1"}, false}, + {[]string{"0.5"}, true}, + {[]string{"1.5"}, true}, + {[]string{"3.5"}, true}, + }}, + {"select *, sum(sale) over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ + {[]string{"0.1"}, false}, + {[]string{"0.5"}, true}, + {[]string{"1.5"}, true}, + {[]string{"3.5"}, true}, + }}, + {"select *, rank() over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ + {[]string{"0.1"}, false}, + {[]string{"0.5"}, true}, + {[]string{"1.5"}, true}, + {[]string{"3.5"}, true}, + }}, + {"select *, first_value(sale) over (partition by y order by sale+? rows 2 preceding) total from t order by y", []ExecCase{ + {[]string{"0.1"}, false}, + {[]string{"0.5"}, true}, + {[]string{"1.5"}, true}, + {[]string{"3.5"}, true}, + }}, + {"select *, first_value(sale) over (partition by y order by sale rows ? preceding) total from t order by y", []ExecCase{ + {[]string{"1"}, false}, // window plans with parameters in frame cannot be cached + {[]string{"2"}, false}, + {[]string{"3"}, false}, + {[]string{"4"}, false}, + }}, + {"drop table t", nil}, + + // cases for Limit + {"create table t (a int)", nil}, + {"insert into t values (1), (1), (2), (2), (3), (4), (5), (6), (7), (8), (9), (0), (0)", nil}, + {"select * from t limit ?", []ExecCase{ + {[]string{"20"}, false}, + {[]string{"30"}, false}, + }}, + {"select * from t limit 40, ?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, false}, + }}, + {"select * from t limit ?, 10", []ExecCase{ + {[]string{"20"}, false}, + {[]string{"30"}, false}, + }}, + {"select * from t limit ?, ?", []ExecCase{ + {[]string{"20", "20"}, false}, + {[]string{"20", "40"}, false}, + }}, + {"select * from t where a? order by mod(a, 3)", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, true}, + {[]string{"3"}, true}, + }}, + + // cases for topN + {"select * from t order by b limit ?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, false}, + }}, + {"select * from t order by b limit 10, ?", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, false}, + }}, + {"select * from t order by ? limit 10", []ExecCase{ + {[]string{"1"}, false}, + {[]string{"2"}, false}, + }}, + {"select * from t order by ? limit ?", []ExecCase{ + {[]string{"1", "10"}, false}, + {[]string{"2", "20"}, false}, + }}, + } + + for _, prepCase := range cases { + isQuery := strings.Contains(prepCase.PrepStmt, "select") + if !isQuery { + tk.MustExec(prepCase.PrepStmt) + continue + } + + tk.MustExec(fmt.Sprintf(`prepare stmt from '%v'`, prepCase.PrepStmt)) + for _, execCase := range prepCase.ExecCases { + // set all parameters + usingStmt := "" + if len(execCase.Parameters) > 0 { + setStmt := "set " + usingStmt = "using " + for i, parameter := range execCase.Parameters { + if i > 0 { + setStmt += ", " + usingStmt += ", " + } + setStmt += fmt.Sprintf("@x%v=%v", i, parameter) + usingStmt += fmt.Sprintf("@x%v", i) + } + tk.MustExec(setStmt) + } + + // execute this statement and check whether it uses a cached plan + results := tk.MustQuery("execute stmt " + usingStmt).Sort().Rows() + + // check whether the result is correct + tmp := strings.Split(prepCase.PrepStmt, "?") + require.Equal(t, len(execCase.Parameters)+1, len(tmp)) + query := "" + for i := range tmp { + query += tmp[i] + if i < len(execCase.Parameters) { + query += execCase.Parameters[i] + } + } + tk.MustQuery(query).Sort().Check(results) + } + } +} + +func TestIssue28782(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("prepare stmt from 'SELECT IF(?, 1, 0);';") + tk.MustExec("set @a=1, @b=null, @c=0") + + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) + tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("0")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + tk.MustQuery("execute stmt using @c;").Check(testkit.Rows("0")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) +} + +func TestIssue29101(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + + tk.MustExec(`use test`) + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec(`CREATE TABLE customer ( + c_id int(11) NOT NULL, + c_d_id int(11) NOT NULL, + c_w_id int(11) NOT NULL, + c_first varchar(16) DEFAULT NULL, + c_last varchar(16) DEFAULT NULL, + c_credit char(2) DEFAULT NULL, + c_discount decimal(4,4) DEFAULT NULL, + PRIMARY KEY (c_w_id,c_d_id,c_id), + KEY idx_customer (c_w_id,c_d_id,c_last,c_first) + )`) + tk.MustExec(`CREATE TABLE warehouse ( + w_id int(11) NOT NULL, + w_tax decimal(4,4) DEFAULT NULL, + PRIMARY KEY (w_id) + )`) + tk.MustExec(`prepare s1 from 'SELECT /*+ TIDB_INLJ(customer,warehouse) */ c_discount, c_last, c_credit, w_tax FROM customer, warehouse WHERE w_id = ? AND c_w_id = w_id AND c_d_id = ? AND c_id = ?'`) + tk.MustExec(`set @a=936,@b=7,@c=158`) + tk.MustQuery(`execute s1 using @a,@b,@c`).Check(testkit.Rows()) + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Check(testkit.Rows( // can use IndexJoin + `Projection_6 1.00 root test.customer.c_discount, test.customer.c_last, test.customer.c_credit, test.warehouse.w_tax`, + `└─IndexJoin_14 1.00 root inner join, inner:TableReader_10, outer key:test.customer.c_w_id, inner key:test.warehouse.w_id, equal cond:eq(test.customer.c_w_id, test.warehouse.w_id)`, + ` ├─Point_Get_33(Build) 1.00 root table:customer, index:PRIMARY(c_w_id, c_d_id, c_id) `, + ` └─TableReader_10(Probe) 0.00 root data:Selection_9`, + ` └─Selection_9 0.00 cop[tikv] eq(test.warehouse.w_id, 936)`, + ` └─TableRangeScan_8 1.00 cop[tikv] table:warehouse range: decided by [test.customer.c_w_id], keep order:false, stats:pseudo`)) + tk.MustQuery(`execute s1 using @a,@b,@c`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can use the plan-cache + + tk.MustExec(`CREATE TABLE order_line ( + ol_o_id int(11) NOT NULL, + ol_d_id int(11) NOT NULL, + ol_w_id int(11) NOT NULL, + ol_number int(11) NOT NULL, + ol_i_id int(11) NOT NULL, + PRIMARY KEY (ol_w_id,ol_d_id,ol_o_id,ol_number))`) + tk.MustExec(`CREATE TABLE stock ( + s_i_id int(11) NOT NULL, + s_w_id int(11) NOT NULL, + s_quantity int(11) DEFAULT NULL, + PRIMARY KEY (s_w_id,s_i_id))`) + tk.MustExec(`prepare s1 from 'SELECT /*+ TIDB_INLJ(order_line,stock) */ COUNT(DISTINCT (s_i_id)) stock_count FROM order_line, stock WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id < ? AND ol_o_id >= ? - 20 AND s_w_id = ? AND s_i_id = ol_i_id AND s_quantity < ?'`) + tk.MustExec(`set @a=391,@b=1,@c=3058,@d=18`) + tk.MustExec(`execute s1 using @a,@b,@c,@c,@a,@d`) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Check(testkit.Rows( // can use index-join + `StreamAgg_9 1.00 root funcs:count(distinct test.stock.s_i_id)->Column#11`, + `└─IndexJoin_14 0.03 root inner join, inner:IndexLookUp_13, outer key:test.order_line.ol_i_id, inner key:test.stock.s_i_id, equal cond:eq(test.order_line.ol_i_id, test.stock.s_i_id)`, + ` ├─Selection_30(Build) 0.03 root eq(test.order_line.ol_d_id, 1), eq(test.order_line.ol_w_id, 391), ge(test.order_line.ol_o_id, 3038), lt(test.order_line.ol_o_id, 3058)`, + ` │ └─IndexLookUp_29 0.03 root `, + ` │ ├─IndexRangeScan_27(Build) 0.03 cop[tikv] table:order_line, index:PRIMARY(ol_w_id, ol_d_id, ol_o_id, ol_number) range:[391 1 3038,391 1 3058), keep order:false, stats:pseudo`, + ` │ └─TableRowIDScan_28(Probe) 0.03 cop[tikv] table:order_line keep order:false, stats:pseudo`, + ` └─IndexLookUp_13(Probe) 1.00 root `, + ` ├─IndexRangeScan_10(Build) 1.00 cop[tikv] table:stock, index:PRIMARY(s_w_id, s_i_id) range: decided by [eq(test.stock.s_i_id, test.order_line.ol_i_id) eq(test.stock.s_w_id, 391)], keep order:false, stats:pseudo`, + ` └─Selection_12(Probe) 1.00 cop[tikv] lt(test.stock.s_quantity, 18)`, + ` └─TableRowIDScan_11 1.00 cop[tikv] table:stock keep order:false, stats:pseudo`)) + tk.MustExec(`execute s1 using @a,@b,@c,@c,@a,@d`) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can use the plan-cache +} + +func TestIssue28087And28162(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + + // issue 28087 + tk.MustExec(`use test`) + tk.MustExec(`drop table if exists IDT_26207`) + tk.MustExec(`CREATE TABLE IDT_26207 (col1 bit(1))`) + tk.MustExec(`insert into IDT_26207 values(0x0), (0x1)`) + tk.MustExec(`prepare stmt from 'select t1.col1 from IDT_26207 as t1 left join IDT_26207 as t2 on t1.col1 = t2.col1 where t1.col1 in (?, ?, ?)'`) + tk.MustExec(`set @a=0x01, @b=0x01, @c=0x01`) + tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x01")) + tk.MustExec(`set @a=0x00, @b=0x00, @c=0x01`) + tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x00", "\x01")) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0")) + + // issue 28162 + tk.MustExec(`drop table if exists IDT_MC21780`) + tk.MustExec(`CREATE TABLE IDT_MC21780 ( + COL1 timestamp NULL DEFAULT NULL, + COL2 timestamp NULL DEFAULT NULL, + COL3 timestamp NULL DEFAULT NULL, + KEY U_M_COL (COL1,COL2) + )`) + tk.MustExec(`insert into IDT_MC21780 values("1970-12-18 10:53:28", "1970-12-18 10:53:28", "1970-12-18 10:53:28")`) + tk.MustExec(`prepare stmt from 'select/*+ hash_join(t1) */ * from IDT_MC21780 t1 join IDT_MC21780 t2 on t1.col1 = t2.col1 where t1. col1 < ? and t2. col1 in (?, ?, ?);'`) + tk.MustExec(`set @a="2038-01-19 03:14:07", @b="2038-01-19 03:14:07", @c="2038-01-19 03:14:07", @d="2038-01-19 03:14:07"`) + tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows()) + tk.MustExec(`set @a="1976-09-09 20:21:11", @b="2021-07-14 09:28:16", @c="1982-01-09 03:36:39", @d="1970-12-18 10:53:28"`) + tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows("1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28")) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) +} + +func TestParameterPushDown(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + require.NoError(t, err) + tk.MustExec(`use test`) + tk.MustExec(`drop table if exists t`) + tk.MustExec(`create table t (a int, b int, c int, key(a))`) + tk.MustExec(`insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6)`) + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec(`set @x1=1,@x5=5,@x10=10,@x20=20`) + + var input []struct { + SQL string + } + var output []struct { + Result []string + Plan []string + FromCache string + } + prepareMergeSuiteData.GetTestCases(t, &input, &output) + + for i, tt := range input { + if strings.HasPrefix(tt.SQL, "execute") { + res := tk.MustQuery(tt.SQL).Sort() + fromCache := tk.MustQuery("select @@last_plan_from_cache") + tk.MustQuery(tt.SQL) + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + plan := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)) + + testdata.OnRecord(func() { + output[i].Result = testdata.ConvertRowsToStrings(res.Rows()) + output[i].Plan = testdata.ConvertRowsToStrings(plan.Rows()) + output[i].FromCache = fromCache.Rows()[0][0].(string) + }) + + res.Check(testkit.Rows(output[i].Result...)) + plan.Check(testkit.Rows(output[i].Plan...)) + require.Equal(t, fromCache.Rows()[0][0].(string), output[i].FromCache) + } else { + tk.MustExec(tt.SQL) + testdata.OnRecord(func() { + output[i].Result = nil + }) + } + } +} + +func TestPreparePlanCache4Function(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + + // Testing for non-deterministic functions + tk.MustExec("prepare stmt from 'select rand()';") + res := tk.MustQuery("execute stmt;") + require.Equal(t, 1, len(res.Rows())) + + res1 := tk.MustQuery("execute stmt;") + require.Equal(t, 1, len(res1.Rows())) + require.NotEqual(t, res.Rows()[0][0], res1.Rows()[0][0]) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + // Testing for control functions + tk.MustExec("prepare stmt from 'SELECT IFNULL(?,0);';") + tk.MustExec("set @a = 1, @b = null;") + tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) + tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("0")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int);") + tk.MustExec("prepare stmt from 'select a, case when a = ? then 0 when a <=> ? then 1 else 2 end b from t order by a;';") + tk.MustExec("insert into t values(0), (1), (2), (null);") + tk.MustExec("set @a = 0, @b = 1, @c = 2, @d = null;") + tk.MustQuery("execute stmt using @a, @b;").Check(testkit.Rows(" 2", "0 0", "1 1", "2 2")) + tk.MustQuery("execute stmt using @c, @d;").Check(testkit.Rows(" 1", "0 2", "1 2", "2 0")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) +} + +func TestPreparePlanCache4DifferentSystemVars(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + + // Testing for 'sql_select_limit' + tk.MustExec("set @@sql_select_limit = 1") + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int);") + tk.MustExec("insert into t values(0), (1), (null);") + tk.MustExec("prepare stmt from 'select a from t order by a;';") + tk.MustQuery("execute stmt;").Check(testkit.Rows("")) + + tk.MustExec("set @@sql_select_limit = 2") + tk.MustQuery("execute stmt;").Check(testkit.Rows("", "0")) + // The 'sql_select_limit' will be stored in the cache key. So if the `sql_select_limit` + // have been changed, the plan cache can not be reused. + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + + tk.MustExec("set @@sql_select_limit = 18446744073709551615") + tk.MustQuery("execute stmt;").Check(testkit.Rows("", "0", "1")) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + + // test for 'tidb_enable_index_merge' + tk.MustExec("set @@tidb_enable_index_merge = 1;") + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a int, b int, index idx_a(a), index idx_b(b));") + tk.MustExec("prepare stmt from 'select * from t use index(idx_a, idx_b) where a > 1 or b > 1;';") + tk.MustExec("execute stmt;") + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res := tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Equal(t, 4, len(res.Rows())) + require.Contains(t, res.Rows()[0][0], "IndexMerge") + + tk.MustExec("set @@tidb_enable_index_merge = 0;") + tk.MustExec("execute stmt;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Equal(t, 4, len(res.Rows())) + require.Contains(t, res.Rows()[0][0], "IndexMerge") + tk.MustExec("execute stmt;") + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) + + // test for 'tidb_enable_parallel_apply' + tk.MustExec("set @@tidb_enable_collect_execution_info=1;") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int, b int)") + tk.MustExec("insert into t values (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (null, null)") + + tk.MustExec("set tidb_enable_parallel_apply=true") + tk.MustExec("prepare stmt from 'select t1.b from t t1 where t1.b > (select max(b) from t t2 where t1.a > t2.a);';") + tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Contains(t, res.Rows()[1][0], "Apply") + require.Contains(t, res.Rows()[1][5], "Concurrency") + + tk.MustExec("set tidb_enable_parallel_apply=false") + tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Contains(t, res.Rows()[1][0], "Apply") + executionInfo := fmt.Sprintf("%v", res.Rows()[1][4]) + // Do not use the parallel apply. + require.False(t, strings.Contains(executionInfo, "Concurrency")) + tk.MustExec("execute stmt;") + // The subquery plan can not be cached. + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + + // test for apply cache + tk.MustExec("set @@tidb_enable_collect_execution_info=1;") + tk.MustExec("set tidb_mem_quota_apply_cache=33554432") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int, b int)") + tk.MustExec("insert into t values (0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (null, null)") + + tk.MustExec("prepare stmt from 'select t1.b from t t1 where t1.b > (select max(b) from t t2 where t1.a > t2.a);';") + tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Contains(t, res.Rows()[1][0], "Apply") + require.Contains(t, res.Rows()[1][5], "cache:ON") + + tk.MustExec("set tidb_mem_quota_apply_cache=0") + tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.Contains(t, res.Rows()[1][0], "Apply") + executionInfo = fmt.Sprintf("%v", res.Rows()[1][5]) + // Do not use the apply cache. + require.True(t, strings.Contains(executionInfo, "cache:OFF")) + tk.MustExec("execute stmt;") + // The subquery plan can not be cached. + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) +} + +func TestTemporaryTable4PlanCache(t *testing.T) { + store, dom, err := newStoreWithBootstrap() + require.NoError(t, err) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + tk := testkit.NewTestKit(t, store) + defer func() { + dom.Close() + require.NoError(t, store.Close()) + }() + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("drop table if exists tmp2") + tk.MustExec("create temporary table tmp2 (a int, b int, key(a), key(b));") + tk.MustExec("prepare stmt from 'select * from tmp2;';") + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + + tk.MustExec("drop table if exists tmp_t;") + tk.MustExec("create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows") + tk.MustExec("prepare stmt from 'select * from tmp_t;';") + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) + +} + +func TestPrepareStmtAfterIsolationReadChange(t *testing.T) { + if israce.RaceEnabled { + t.Skip("race test for this case takes too long time") + } + store, clean := testkit.CreateMockStore(t) + defer clean() + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(false) // requires plan cache disabled + tk := testkit.NewTestKit(t, store) + tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost", CurrentUser: true, AuthUsername: "root", AuthHostname: "%"}, nil, []byte("012345678901234567890")) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + // create virtual tiflash replica. + dom := domain.GetDomain(tk.Session()) + is := dom.InfoSchema() + db, exists := is.SchemaByName(model.NewCIStr("test")) + require.True(t, exists) + for _, tblInfo := range db.Tables { + if tblInfo.Name.L == "t" { + tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ + Count: 1, + Available: true, + } + } + } + + tk.MustExec("set @@session.tidb_isolation_read_engines='tikv'") + tk.MustExec("set @@tidb_enable_collect_execution_info=0;") + tk.MustExec("prepare stmt from \"select * from t\"") + tk.MustQuery("execute stmt") + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows := tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.Equal(t, "cop[tikv]", rows[len(rows)-1][2]) + + tk.MustExec("set @@session.tidb_isolation_read_engines='tiflash'") + tk.MustExec("execute stmt") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + rows = tk.MustQuery(fmt.Sprintf("explain for connection %d", tkProcess.ID)).Rows() + require.Equal(t, rows[len(rows)-1][2], "cop[tiflash]") + + require.Equal(t, 1, len(tk.Session().GetSessionVars().PreparedStmts)) + require.Equal(t, "select * from `t`", tk.Session().GetSessionVars().PreparedStmts[1].(*plannercore.CachedPrepareStmt).NormalizedSQL) + require.Equal(t, "", tk.Session().GetSessionVars().PreparedStmts[1].(*plannercore.CachedPrepareStmt).NormalizedPlan) +} diff --git a/executor/seqtest/prepared_serial_test.go b/executor/seqtest/prepared_test.go similarity index 100% rename from executor/seqtest/prepared_serial_test.go rename to executor/seqtest/prepared_test.go diff --git a/executor/seqtest/seq_executor_serial_test.go b/executor/seqtest/seq_executor_test.go similarity index 100% rename from executor/seqtest/seq_executor_serial_test.go rename to executor/seqtest/seq_executor_test.go diff --git a/executor/show_stats_serial_test.go b/executor/show_stats_serial_test.go deleted file mode 100644 index c2f7e7ca828ee..0000000000000 --- a/executor/show_stats_serial_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "testing" - - "github.com/pingcap/tidb/statistics" - "github.com/pingcap/tidb/testkit" - "github.com/stretchr/testify/require" -) - -func TestShowAnalyzeStatus(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - - tk := testkit.NewTestKit(t, store) - statistics.ClearHistoryJobs() - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int, primary key(a), index idx(b))") - tk.MustExec(`insert into t values (1, 1), (2, 2)`) - - tk.MustExec("set @@tidb_analyze_version=2") - tk.MustExec("analyze table t") - result := tk.MustQuery("show analyze status").Sort() - require.Len(t, result.Rows(), 1) - require.Equal(t, "test", result.Rows()[0][0]) - require.Equal(t, "t", result.Rows()[0][1]) - require.Equal(t, "", result.Rows()[0][2]) - require.Equal(t, "analyze table", result.Rows()[0][3]) - require.Equal(t, "2", result.Rows()[0][4]) - require.NotNil(t, result.Rows()[0][5]) - require.NotNil(t, result.Rows()[0][6]) - require.Equal(t, "finished", result.Rows()[0][7]) - - statistics.ClearHistoryJobs() - - tk.MustExec("set @@tidb_analyze_version=1") - tk.MustExec("analyze table t") - result = tk.MustQuery("show analyze status").Sort() - require.Len(t, result.Rows(), 2) - require.Equal(t, "test", result.Rows()[0][0]) - require.Equal(t, "t", result.Rows()[0][1]) - require.Equal(t, "", result.Rows()[0][2]) - require.Equal(t, "analyze columns", result.Rows()[0][3]) - require.Equal(t, "2", result.Rows()[0][4]) - require.NotNil(t, result.Rows()[0][5]) - require.NotNil(t, result.Rows()[0][6]) - require.Equal(t, "finished", result.Rows()[0][7]) - - require.Len(t, result.Rows(), 2) - require.Equal(t, "test", result.Rows()[1][0]) - require.Equal(t, "t", result.Rows()[1][1]) - require.Equal(t, "", result.Rows()[1][2]) - require.Equal(t, "analyze index idx", result.Rows()[1][3]) - require.Equal(t, "2", result.Rows()[1][4]) - require.NotNil(t, result.Rows()[1][5]) - require.NotNil(t, result.Rows()[1][6]) - require.Equal(t, "finished", result.Rows()[1][7]) -} diff --git a/executor/show_stats_test.go b/executor/show_stats_test.go index 1f2ffafd8c54d..f48fe9988649d 100644 --- a/executor/show_stats_test.go +++ b/executor/show_stats_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/testkit" "github.com/stretchr/testify/require" ) @@ -354,3 +355,53 @@ func TestShowHistogramsInFlight(t *testing.T) { require.Equal(t, len(rows), 1) require.Equal(t, rows[0][0], "0") } + +func TestShowAnalyzeStatus(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + statistics.ClearHistoryJobs() + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int, b int, primary key(a), index idx(b))") + tk.MustExec(`insert into t values (1, 1), (2, 2)`) + + tk.MustExec("set @@tidb_analyze_version=2") + tk.MustExec("analyze table t") + result := tk.MustQuery("show analyze status").Sort() + require.Len(t, result.Rows(), 1) + require.Equal(t, "test", result.Rows()[0][0]) + require.Equal(t, "t", result.Rows()[0][1]) + require.Equal(t, "", result.Rows()[0][2]) + require.Equal(t, "analyze table", result.Rows()[0][3]) + require.Equal(t, "2", result.Rows()[0][4]) + require.NotNil(t, result.Rows()[0][5]) + require.NotNil(t, result.Rows()[0][6]) + require.Equal(t, "finished", result.Rows()[0][7]) + + statistics.ClearHistoryJobs() + + tk.MustExec("set @@tidb_analyze_version=1") + tk.MustExec("analyze table t") + result = tk.MustQuery("show analyze status").Sort() + require.Len(t, result.Rows(), 2) + require.Equal(t, "test", result.Rows()[0][0]) + require.Equal(t, "t", result.Rows()[0][1]) + require.Equal(t, "", result.Rows()[0][2]) + require.Equal(t, "analyze columns", result.Rows()[0][3]) + require.Equal(t, "2", result.Rows()[0][4]) + require.NotNil(t, result.Rows()[0][5]) + require.NotNil(t, result.Rows()[0][6]) + require.Equal(t, "finished", result.Rows()[0][7]) + + require.Len(t, result.Rows(), 2) + require.Equal(t, "test", result.Rows()[1][0]) + require.Equal(t, "t", result.Rows()[1][1]) + require.Equal(t, "", result.Rows()[1][2]) + require.Equal(t, "analyze index idx", result.Rows()[1][3]) + require.Equal(t, "2", result.Rows()[1][4]) + require.NotNil(t, result.Rows()[1][5]) + require.NotNil(t, result.Rows()[1][6]) + require.Equal(t, "finished", result.Rows()[1][7]) +} diff --git a/executor/slow_query_test.go b/executor/slow_query_test.go index 4fdd9281c5142..9828263402ac1 100644 --- a/executor/slow_query_test.go +++ b/executor/slow_query_test.go @@ -452,7 +452,7 @@ select 7;` sctx.GetSessionVars().TimeZone = loc sctx.GetSessionVars().SlowQueryFile = fileName3 for i, cas := range cases { - extractor := &plannercore.SlowQueryExtractor{Enable: (len(cas.startTime) > 0 && len(cas.endTime) > 0)} + extractor := &plannercore.SlowQueryExtractor{Enable: len(cas.startTime) > 0 && len(cas.endTime) > 0} if extractor.Enable { startTime, err := ParseTime(cas.startTime) c.Assert(err, IsNil) @@ -622,7 +622,7 @@ select 9;` sctx.GetSessionVars().TimeZone = loc sctx.GetSessionVars().SlowQueryFile = fileName3 for i, cas := range cases { - extractor := &plannercore.SlowQueryExtractor{Enable: (len(cas.startTime) > 0 && len(cas.endTime) > 0), Desc: true} + extractor := &plannercore.SlowQueryExtractor{Enable: len(cas.startTime) > 0 && len(cas.endTime) > 0, Desc: true} if extractor.Enable { startTime, err := ParseTime(cas.startTime) c.Assert(err, IsNil) diff --git a/executor/temporary_table_serial_test.go b/executor/temporary_table_test.go similarity index 100% rename from executor/temporary_table_serial_test.go rename to executor/temporary_table_test.go diff --git a/executor/write_serial_test.go b/executor/write_serial_test.go deleted file mode 100644 index 440ecbeb177e2..0000000000000 --- a/executor/write_serial_test.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "testing" - - "github.com/pingcap/tidb/config" - "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/planner/core" - "github.com/pingcap/tidb/sessionctx/variable" - "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/types" - "github.com/pingcap/tidb/util/collate" - "github.com/stretchr/testify/require" -) - -func TestUpdate(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - fillData(tk, "update_test") - - updateStr := `UPDATE update_test SET name = "abc" where id > 0;` - tk.MustExec(updateStr) - tk.CheckExecResult(2, 0) - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 2 Changed: 2 Warnings: 0") - - // select data - tk.MustExec("begin") - r := tk.MustQuery(`SELECT * from update_test limit 2;`) - r.Check(testkit.Rows("1 abc", "2 abc")) - tk.MustExec("commit") - - tk.MustExec(`UPDATE update_test SET name = "foo"`) - tk.CheckExecResult(2, 0) - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 2 Changed: 2 Warnings: 0") - - // table option is auto-increment - tk.MustExec("begin") - tk.MustExec("drop table if exists update_test;") - tk.MustExec("commit") - tk.MustExec("begin") - tk.MustExec("create table update_test(id int not null auto_increment, name varchar(255), primary key(id))") - tk.MustExec("insert into update_test(name) values ('aa')") - tk.MustExec("update update_test set id = 8 where name = 'aa'") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - tk.MustExec("insert into update_test(name) values ('bb')") - tk.MustExec("commit") - tk.MustExec("begin") - r = tk.MustQuery("select * from update_test;") - r.Check(testkit.Rows("8 aa", "9 bb")) - tk.MustExec("commit") - - tk.MustExec("begin") - tk.MustExec("drop table if exists update_test;") - tk.MustExec("commit") - tk.MustExec("begin") - tk.MustExec("create table update_test(id int not null auto_increment, name varchar(255), index(id))") - tk.MustExec("insert into update_test(name) values ('aa')") - _, err := tk.Exec("update update_test set id = null where name = 'aa'") - require.EqualError(t, err, "[table:1048]Column 'id' cannot be null") - - tk.MustExec("drop table update_test") - tk.MustExec("create table update_test(id int)") - tk.MustExec("begin") - tk.MustExec("insert into update_test(id) values (1)") - tk.MustExec("update update_test set id = 2 where id = 1 limit 1") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - r = tk.MustQuery("select * from update_test;") - r.Check(testkit.Rows("2")) - tk.MustExec("commit") - - // Test that in a transaction, when a constraint failed in an update statement, the record is not inserted. - tk.MustExec("create table update_unique (id int primary key, name int unique)") - tk.MustExec("insert update_unique values (1, 1), (2, 2);") - tk.MustExec("begin") - _, err = tk.Exec("update update_unique set name = 1 where id = 2") - require.Error(t, err) - tk.MustExec("commit") - tk.MustQuery("select * from update_unique").Check(testkit.Rows("1 1", "2 2")) - - // test update ignore for pimary key - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a bigint, primary key (a));") - tk.MustExec("insert into t values (1)") - tk.MustExec("insert into t values (2)") - _, err = tk.Exec("update ignore t set a = 1 where a = 2;") - require.NoError(t, err) - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 1") - r = tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'PRIMARY'")) - tk.MustQuery("select * from t").Check(testkit.Rows("1", "2")) - - // test update ignore for truncate as warning - _, err = tk.Exec("update ignore t set a = 1 where a = (select '2a')") - require.NoError(t, err) - r = tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1062 Duplicate entry '1' for key 'PRIMARY'")) - - tk.MustExec("update ignore t set a = 42 where a = 2;") - tk.MustQuery("select * from t").Check(testkit.Rows("1", "42")) - - // test update ignore for unique key - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a bigint, unique key I_uniq (a));") - tk.MustExec("insert into t values (1)") - tk.MustExec("insert into t values (2)") - _, err = tk.Exec("update ignore t set a = 1 where a = 2;") - require.NoError(t, err) - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 1") - r = tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'I_uniq'")) - tk.MustQuery("select * from t").Check(testkit.Rows("1", "2")) - - // test issue21965 - tk.MustExec("drop table if exists t;") - tk.MustExec("set @@session.tidb_enable_list_partition = ON") - tk.MustExec("create table t (a int) partition by list (a) (partition p0 values in (0,1));") - tk.MustExec("insert ignore into t values (1);") - tk.MustExec("update ignore t set a=2 where a=1;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 0") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t (a int key) partition by list (a) (partition p0 values in (0,1));") - tk.MustExec("insert ignore into t values (1);") - tk.MustExec("update ignore t set a=2 where a=1;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 0") - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(id integer auto_increment, t1 datetime, t2 datetime, primary key (id))") - tk.MustExec("insert into t(t1, t2) values('2000-10-01 01:01:01', '2017-01-01 10:10:10')") - tk.MustQuery("select * from t").Check(testkit.Rows("1 2000-10-01 01:01:01 2017-01-01 10:10:10")) - tk.MustExec("update t set t1 = '2017-10-01 10:10:11', t2 = date_add(t1, INTERVAL 10 MINUTE) where id = 1") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - tk.MustQuery("select * from t").Check(testkit.Rows("1 2017-10-01 10:10:11 2000-10-01 01:11:01")) - - // for issue #5132 - tk.MustExec("CREATE TABLE `tt1` (" + - "`a` int(11) NOT NULL," + - "`b` varchar(32) DEFAULT NULL," + - "`c` varchar(32) DEFAULT NULL," + - "PRIMARY KEY (`a`)," + - "UNIQUE KEY `b_idx` (`b`)" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;") - tk.MustExec("insert into tt1 values(1, 'a', 'a');") - tk.MustExec("insert into tt1 values(2, 'd', 'b');") - r = tk.MustQuery("select * from tt1;") - r.Check(testkit.Rows("1 a a", "2 d b")) - tk.MustExec("update tt1 set a=5 where c='b';") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - r = tk.MustQuery("select * from tt1;") - r.Check(testkit.Rows("1 a a", "5 d b")) - - // Automatic Updating for TIMESTAMP - tk.MustExec("CREATE TABLE `tsup` (" + - "`a` int," + - "`ts` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP," + - "KEY `idx` (`ts`)" + - ");") - tk.MustExec("set @orig_sql_mode=@@sql_mode; set @@sql_mode='';") - tk.MustExec("insert into tsup values(1, '0000-00-00 00:00:00');") - tk.MustExec("update tsup set a=5;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - r1 := tk.MustQuery("select ts from tsup use index (idx);") - r2 := tk.MustQuery("select ts from tsup;") - r1.Check(r2.Rows()) - tk.MustExec("update tsup set ts='2019-01-01';") - tk.MustQuery("select ts from tsup;").Check(testkit.Rows("2019-01-01 00:00:00")) - tk.MustExec("set @@sql_mode=@orig_sql_mode;") - - // issue 5532 - tk.MustExec("create table decimals (a decimal(20, 0) not null)") - tk.MustExec("insert into decimals values (201)") - // A warning rather than data truncated error. - tk.MustExec("update decimals set a = a + 1.23;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 1") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect DECIMAL value: '202.23'")) - r = tk.MustQuery("select * from decimals") - r.Check(testkit.Rows("202")) - - tk.MustExec("drop table t") - tk.MustExec("CREATE TABLE `t` ( `c1` year DEFAULT NULL, `c2` year DEFAULT NULL, `c3` date DEFAULT NULL, `c4` datetime DEFAULT NULL, KEY `idx` (`c1`,`c2`))") - _, err = tk.Exec("UPDATE t SET c2=16777215 WHERE c1>= -8388608 AND c1 < -9 ORDER BY c1 LIMIT 2") - require.NoError(t, err) - - tk.MustGetErrCode("update (select * from t) t set c1 = 1111111", mysql.ErrNonUpdatableTable) - - // test update ignore for bad null error - tk.MustExec("drop table if exists t;") - tk.MustExec(`create table t (i int not null default 10)`) - tk.MustExec("insert into t values (1)") - tk.MustExec("update ignore t set i = null;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 1") - r = tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1048 Column 'i' cannot be null")) - tk.MustQuery("select * from t").Check(testkit.Rows("0")) - - // issue 7237, update subquery table should be forbidden - tk.MustExec("drop table t") - tk.MustExec("create table t (k int, v int)") - _, err = tk.Exec("update t, (select * from t) as b set b.k = t.k") - require.EqualError(t, err, "[planner:1288]The target table b of the UPDATE is not updatable") - tk.MustExec("update t, (select * from t) as b set t.k = b.k") - - // issue 8045 - tk.MustExec("drop table if exists t1") - tk.MustExec(`CREATE TABLE t1 (c1 float)`) - tk.MustExec("INSERT INTO t1 SET c1 = 1") - tk.MustExec("UPDATE t1 SET c1 = 1.2 WHERE c1=1;") - require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") - - // issue 8119 - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t (c1 float(1,1));") - tk.MustExec("insert into t values (0.0);") - _, err = tk.Exec("update t set c1 = 2.0;") - require.True(t, types.ErrWarnDataOutOfRange.Equal(err)) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a datetime not null, b datetime)") - tk.MustExec("insert into t value('1999-12-12', '1999-12-13')") - tk.MustExec("set @orig_sql_mode=@@sql_mode; set @@sql_mode='';") - tk.MustQuery("select * from t").Check(testkit.Rows("1999-12-12 00:00:00 1999-12-13 00:00:00")) - tk.MustExec("update t set a = ''") - tk.MustQuery("select * from t").Check(testkit.Rows("0000-00-00 00:00:00 1999-12-13 00:00:00")) - tk.MustExec("update t set b = ''") - tk.MustQuery("select * from t").Check(testkit.Rows("0000-00-00 00:00:00 0000-00-00 00:00:00")) - tk.MustExec("set @@sql_mode=@orig_sql_mode;") - - tk.MustExec("create view v as select * from t") - _, err = tk.Exec("update v set a = '2000-11-11'") - require.EqualError(t, err, core.ErrViewInvalid.GenWithStackByArgs("test", "v").Error()) - tk.MustExec("drop view v") - - tk.MustExec("create sequence seq") - tk.MustGetErrCode("update seq set minvalue=1", mysql.ErrBadField) - tk.MustExec("drop sequence seq") - - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int, b int, c int, d int, e int, index idx(a))") - tk.MustExec("create table t2(a int, b int, c int)") - tk.MustExec("update t1 join t2 on t1.a=t2.a set t1.a=1 where t2.b=1 and t2.c=2") - - // Assign `DEFAULT` in `UPDATE` statement - tk.MustExec("drop table if exists t1, t2;") - tk.MustExec("create table t1 (a int default 1, b int default 2);") - tk.MustExec("insert into t1 values (10, 10), (20, 20);") - tk.MustExec("update t1 set a=default where b=10;") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "20 20")) - tk.MustExec("update t1 set a=30, b=default where a=20;") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "30 2")) - tk.MustExec("update t1 set a=default, b=default where a=30;") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "1 2")) - tk.MustExec("insert into t1 values (40, 40)") - tk.MustExec("update t1 set a=default, b=default") - tk.MustQuery("select * from t1;").Check(testkit.Rows("1 2", "1 2", "1 2")) - tk.MustExec("update t1 set a=default(b), b=default(a)") - tk.MustQuery("select * from t1;").Check(testkit.Rows("2 1", "2 1", "2 1")) - // With generated columns - tk.MustExec("create table t2 (a int default 1, b int generated always as (-a) virtual, c int generated always as (-a) stored);") - tk.MustExec("insert into t2 values (10, default, default), (20, default, default)") - tk.MustExec("update t2 set b=default;") - tk.MustQuery("select * from t2;").Check(testkit.Rows("10 -10 -10", "20 -20 -20")) - tk.MustExec("update t2 set a=30, b=default where a=10;") - tk.MustQuery("select * from t2;").Check(testkit.Rows("30 -30 -30", "20 -20 -20")) - tk.MustExec("update t2 set c=default, a=40 where c=-20;") - tk.MustQuery("select * from t2;").Check(testkit.Rows("30 -30 -30", "40 -40 -40")) - tk.MustExec("update t2 set a=default, b=default, c=default where b=-30;") - tk.MustQuery("select * from t2;").Check(testkit.Rows("1 -1 -1", "40 -40 -40")) - tk.MustExec("update t2 set a=default(a), b=default, c=default;") - tk.MustQuery("select * from t2;").Check(testkit.Rows("1 -1 -1", "1 -1 -1")) - tk.MustGetErrCode("update t2 set b=default(a);", mysql.ErrBadGeneratedColumn) - tk.MustGetErrCode("update t2 set a=default(b), b=default(b);", mysql.ErrBadGeneratedColumn) - tk.MustGetErrCode("update t2 set a=default(a), c=default(c);", mysql.ErrBadGeneratedColumn) - tk.MustGetErrCode("update t2 set a=default(a), c=default(a);", mysql.ErrBadGeneratedColumn) - tk.MustExec("drop table t1, t2") -} - -func TestListColumnsPartitionWithGlobalIndex(t *testing.T) { - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_enable_list_partition = ON") - // Test generated column with global index - restoreConfig := config.RestoreFunc() - defer restoreConfig() - config.UpdateGlobal(func(conf *config.Config) { - conf.EnableGlobalIndex = true - }) - tableDefs := []string{ - // Test for virtual generated column with global index - `create table t (a varchar(10), b varchar(1) GENERATED ALWAYS AS (substr(a,1,1)) VIRTUAL) partition by list columns(b) (partition p0 values in ('a','c'), partition p1 values in ('b','d'));`, - // Test for stored generated column with global index - `create table t (a varchar(10), b varchar(1) GENERATED ALWAYS AS (substr(a,1,1)) STORED) partition by list columns(b) (partition p0 values in ('a','c'), partition p1 values in ('b','d'));`, - } - for _, tbl := range tableDefs { - tk.MustExec("drop table if exists t") - tk.MustExec(tbl) - tk.MustExec("alter table t add unique index (a)") - tk.MustExec("insert into t (a) values ('aaa'),('abc'),('acd')") - tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("aaa", "abc", "acd")) - tk.MustQuery("select * from t where a = 'abc' order by a").Check(testkit.Rows("abc a")) - tk.MustExec("update t set a='bbb' where a = 'aaa'") - tk.MustExec("admin check table t") - tk.MustQuery("select a from t order by a").Check(testkit.Rows("abc", "acd", "bbb")) - // TODO: fix below test. - //tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("abc", "acd")) - //tk.MustQuery("select a from t partition (p1) order by a").Check(testkit.Rows("bbb")) - tk.MustQuery("select * from t where a = 'bbb' order by a").Check(testkit.Rows("bbb b")) - // Test insert meet duplicate error. - _, err := tk.Exec("insert into t (a) values ('abc')") - require.Error(t, err) - // Test insert on duplicate update - tk.MustExec("insert into t (a) values ('abc') on duplicate key update a='bbc'") - tk.MustQuery("select a from t order by a").Check(testkit.Rows("acd", "bbb", "bbc")) - tk.MustQuery("select * from t where a = 'bbc'").Check(testkit.Rows("bbc b")) - // TODO: fix below test. - //tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("acd")) - //tk.MustQuery("select a from t partition (p1) order by a").Check(testkit.Rows("bbb", "bbc")) - } -} - -func TestIssue20724(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1(a varchar(10) collate utf8mb4_general_ci)") - tk.MustExec("insert into t1 values ('a')") - tk.MustExec("update t1 set a = 'A'") - tk.MustQuery("select * from t1").Check(testkit.Rows("A")) - tk.MustExec("drop table t1") -} - -func TestIssue20840(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly - tk.MustExec("create table t1 (i varchar(20) unique key) collate=utf8mb4_general_ci") - tk.MustExec("insert into t1 values ('a')") - tk.MustExec("replace into t1 values ('A')") - tk.MustQuery("select * from t1").Check(testkit.Rows("A")) - tk.MustExec("drop table t1") -} - -func TestIssueInsertPrefixIndexForNonUTF8Collation(t *testing.T) { - collate.SetNewCollationEnabledForTest(true) - defer collate.SetNewCollationEnabledForTest(false) - - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2, t3") - tk.MustExec("create table t1 ( c_int int, c_str varchar(40) character set ascii collate ascii_bin, primary key(c_int, c_str(8)) clustered , unique key(c_str))") - tk.MustExec("create table t2 ( c_int int, c_str varchar(40) character set latin1 collate latin1_bin, primary key(c_int, c_str(8)) clustered , unique key(c_str))") - tk.MustExec("insert into t1 values (3, 'fervent brattain')") - tk.MustExec("insert into t2 values (3, 'fervent brattain')") - tk.MustExec("admin check table t1") - tk.MustExec("admin check table t2") - - tk.MustExec("create table t3 (x varchar(40) CHARACTER SET ascii COLLATE ascii_bin, UNIQUE KEY uk(x(4)))") - tk.MustExec("insert into t3 select 'abc '") - tk.MustGetErrCode("insert into t3 select 'abc d'", 1062) -} diff --git a/executor/write_test.go b/executor/write_test.go index 04609968d486a..11e402f446631 100644 --- a/executor/write_test.go +++ b/executor/write_test.go @@ -21,6 +21,7 @@ import ( "strconv" "testing" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -30,12 +31,14 @@ import ( "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/testutil" "github.com/stretchr/testify/require" @@ -3878,3 +3881,364 @@ func testEqualDatumsAsBinary(t *testing.T, a []interface{}, b []interface{}, sam require.NoError(t, err) require.Equal(t, same, res, "a: %v, b: %v", a, b) } + +func TestUpdate(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + fillData(tk, "update_test") + + updateStr := `UPDATE update_test SET name = "abc" where id > 0;` + tk.MustExec(updateStr) + tk.CheckExecResult(2, 0) + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 2 Changed: 2 Warnings: 0") + + // select data + tk.MustExec("begin") + r := tk.MustQuery(`SELECT * from update_test limit 2;`) + r.Check(testkit.Rows("1 abc", "2 abc")) + tk.MustExec("commit") + + tk.MustExec(`UPDATE update_test SET name = "foo"`) + tk.CheckExecResult(2, 0) + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 2 Changed: 2 Warnings: 0") + + // table option is auto-increment + tk.MustExec("begin") + tk.MustExec("drop table if exists update_test;") + tk.MustExec("commit") + tk.MustExec("begin") + tk.MustExec("create table update_test(id int not null auto_increment, name varchar(255), primary key(id))") + tk.MustExec("insert into update_test(name) values ('aa')") + tk.MustExec("update update_test set id = 8 where name = 'aa'") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + tk.MustExec("insert into update_test(name) values ('bb')") + tk.MustExec("commit") + tk.MustExec("begin") + r = tk.MustQuery("select * from update_test;") + r.Check(testkit.Rows("8 aa", "9 bb")) + tk.MustExec("commit") + + tk.MustExec("begin") + tk.MustExec("drop table if exists update_test;") + tk.MustExec("commit") + tk.MustExec("begin") + tk.MustExec("create table update_test(id int not null auto_increment, name varchar(255), index(id))") + tk.MustExec("insert into update_test(name) values ('aa')") + _, err := tk.Exec("update update_test set id = null where name = 'aa'") + require.EqualError(t, err, "[table:1048]Column 'id' cannot be null") + + tk.MustExec("drop table update_test") + tk.MustExec("create table update_test(id int)") + tk.MustExec("begin") + tk.MustExec("insert into update_test(id) values (1)") + tk.MustExec("update update_test set id = 2 where id = 1 limit 1") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + r = tk.MustQuery("select * from update_test;") + r.Check(testkit.Rows("2")) + tk.MustExec("commit") + + // Test that in a transaction, when a constraint failed in an update statement, the record is not inserted. + tk.MustExec("create table update_unique (id int primary key, name int unique)") + tk.MustExec("insert update_unique values (1, 1), (2, 2);") + tk.MustExec("begin") + _, err = tk.Exec("update update_unique set name = 1 where id = 2") + require.Error(t, err) + tk.MustExec("commit") + tk.MustQuery("select * from update_unique").Check(testkit.Rows("1 1", "2 2")) + + // test update ignore for pimary key + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a bigint, primary key (a));") + tk.MustExec("insert into t values (1)") + tk.MustExec("insert into t values (2)") + _, err = tk.Exec("update ignore t set a = 1 where a = 2;") + require.NoError(t, err) + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 1") + r = tk.MustQuery("SHOW WARNINGS;") + r.Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'PRIMARY'")) + tk.MustQuery("select * from t").Check(testkit.Rows("1", "2")) + + // test update ignore for truncate as warning + _, err = tk.Exec("update ignore t set a = 1 where a = (select '2a')") + require.NoError(t, err) + r = tk.MustQuery("SHOW WARNINGS;") + r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1062 Duplicate entry '1' for key 'PRIMARY'")) + + tk.MustExec("update ignore t set a = 42 where a = 2;") + tk.MustQuery("select * from t").Check(testkit.Rows("1", "42")) + + // test update ignore for unique key + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t(a bigint, unique key I_uniq (a));") + tk.MustExec("insert into t values (1)") + tk.MustExec("insert into t values (2)") + _, err = tk.Exec("update ignore t set a = 1 where a = 2;") + require.NoError(t, err) + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 1") + r = tk.MustQuery("SHOW WARNINGS;") + r.Check(testkit.Rows("Warning 1062 Duplicate entry '1' for key 'I_uniq'")) + tk.MustQuery("select * from t").Check(testkit.Rows("1", "2")) + + // test issue21965 + tk.MustExec("drop table if exists t;") + tk.MustExec("set @@session.tidb_enable_list_partition = ON") + tk.MustExec("create table t (a int) partition by list (a) (partition p0 values in (0,1));") + tk.MustExec("insert ignore into t values (1);") + tk.MustExec("update ignore t set a=2 where a=1;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 0") + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int key) partition by list (a) (partition p0 values in (0,1));") + tk.MustExec("insert ignore into t values (1);") + tk.MustExec("update ignore t set a=2 where a=1;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 0 Warnings: 0") + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(id integer auto_increment, t1 datetime, t2 datetime, primary key (id))") + tk.MustExec("insert into t(t1, t2) values('2000-10-01 01:01:01', '2017-01-01 10:10:10')") + tk.MustQuery("select * from t").Check(testkit.Rows("1 2000-10-01 01:01:01 2017-01-01 10:10:10")) + tk.MustExec("update t set t1 = '2017-10-01 10:10:11', t2 = date_add(t1, INTERVAL 10 MINUTE) where id = 1") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + tk.MustQuery("select * from t").Check(testkit.Rows("1 2017-10-01 10:10:11 2000-10-01 01:11:01")) + + // for issue #5132 + tk.MustExec("CREATE TABLE `tt1` (" + + "`a` int(11) NOT NULL," + + "`b` varchar(32) DEFAULT NULL," + + "`c` varchar(32) DEFAULT NULL," + + "PRIMARY KEY (`a`)," + + "UNIQUE KEY `b_idx` (`b`)" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;") + tk.MustExec("insert into tt1 values(1, 'a', 'a');") + tk.MustExec("insert into tt1 values(2, 'd', 'b');") + r = tk.MustQuery("select * from tt1;") + r.Check(testkit.Rows("1 a a", "2 d b")) + tk.MustExec("update tt1 set a=5 where c='b';") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + r = tk.MustQuery("select * from tt1;") + r.Check(testkit.Rows("1 a a", "5 d b")) + + // Automatic Updating for TIMESTAMP + tk.MustExec("CREATE TABLE `tsup` (" + + "`a` int," + + "`ts` TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP," + + "KEY `idx` (`ts`)" + + ");") + tk.MustExec("set @orig_sql_mode=@@sql_mode; set @@sql_mode='';") + tk.MustExec("insert into tsup values(1, '0000-00-00 00:00:00');") + tk.MustExec("update tsup set a=5;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + r1 := tk.MustQuery("select ts from tsup use index (idx);") + r2 := tk.MustQuery("select ts from tsup;") + r1.Check(r2.Rows()) + tk.MustExec("update tsup set ts='2019-01-01';") + tk.MustQuery("select ts from tsup;").Check(testkit.Rows("2019-01-01 00:00:00")) + tk.MustExec("set @@sql_mode=@orig_sql_mode;") + + // issue 5532 + tk.MustExec("create table decimals (a decimal(20, 0) not null)") + tk.MustExec("insert into decimals values (201)") + // A warning rather than data truncated error. + tk.MustExec("update decimals set a = a + 1.23;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 1") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect DECIMAL value: '202.23'")) + r = tk.MustQuery("select * from decimals") + r.Check(testkit.Rows("202")) + + tk.MustExec("drop table t") + tk.MustExec("CREATE TABLE `t` ( `c1` year DEFAULT NULL, `c2` year DEFAULT NULL, `c3` date DEFAULT NULL, `c4` datetime DEFAULT NULL, KEY `idx` (`c1`,`c2`))") + _, err = tk.Exec("UPDATE t SET c2=16777215 WHERE c1>= -8388608 AND c1 < -9 ORDER BY c1 LIMIT 2") + require.NoError(t, err) + + tk.MustGetErrCode("update (select * from t) t set c1 = 1111111", mysql.ErrNonUpdatableTable) + + // test update ignore for bad null error + tk.MustExec("drop table if exists t;") + tk.MustExec(`create table t (i int not null default 10)`) + tk.MustExec("insert into t values (1)") + tk.MustExec("update ignore t set i = null;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 1") + r = tk.MustQuery("SHOW WARNINGS;") + r.Check(testkit.Rows("Warning 1048 Column 'i' cannot be null")) + tk.MustQuery("select * from t").Check(testkit.Rows("0")) + + // issue 7237, update subquery table should be forbidden + tk.MustExec("drop table t") + tk.MustExec("create table t (k int, v int)") + _, err = tk.Exec("update t, (select * from t) as b set b.k = t.k") + require.EqualError(t, err, "[planner:1288]The target table b of the UPDATE is not updatable") + tk.MustExec("update t, (select * from t) as b set t.k = b.k") + + // issue 8045 + tk.MustExec("drop table if exists t1") + tk.MustExec(`CREATE TABLE t1 (c1 float)`) + tk.MustExec("INSERT INTO t1 SET c1 = 1") + tk.MustExec("UPDATE t1 SET c1 = 1.2 WHERE c1=1;") + require.Equal(t, tk.Session().LastMessage(), "Rows matched: 1 Changed: 1 Warnings: 0") + + // issue 8119 + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (c1 float(1,1));") + tk.MustExec("insert into t values (0.0);") + _, err = tk.Exec("update t set c1 = 2.0;") + require.True(t, types.ErrWarnDataOutOfRange.Equal(err)) + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a datetime not null, b datetime)") + tk.MustExec("insert into t value('1999-12-12', '1999-12-13')") + tk.MustExec("set @orig_sql_mode=@@sql_mode; set @@sql_mode='';") + tk.MustQuery("select * from t").Check(testkit.Rows("1999-12-12 00:00:00 1999-12-13 00:00:00")) + tk.MustExec("update t set a = ''") + tk.MustQuery("select * from t").Check(testkit.Rows("0000-00-00 00:00:00 1999-12-13 00:00:00")) + tk.MustExec("update t set b = ''") + tk.MustQuery("select * from t").Check(testkit.Rows("0000-00-00 00:00:00 0000-00-00 00:00:00")) + tk.MustExec("set @@sql_mode=@orig_sql_mode;") + + tk.MustExec("create view v as select * from t") + _, err = tk.Exec("update v set a = '2000-11-11'") + require.EqualError(t, err, core.ErrViewInvalid.GenWithStackByArgs("test", "v").Error()) + tk.MustExec("drop view v") + + tk.MustExec("create sequence seq") + tk.MustGetErrCode("update seq set minvalue=1", mysql.ErrBadField) + tk.MustExec("drop sequence seq") + + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1(a int, b int, c int, d int, e int, index idx(a))") + tk.MustExec("create table t2(a int, b int, c int)") + tk.MustExec("update t1 join t2 on t1.a=t2.a set t1.a=1 where t2.b=1 and t2.c=2") + + // Assign `DEFAULT` in `UPDATE` statement + tk.MustExec("drop table if exists t1, t2;") + tk.MustExec("create table t1 (a int default 1, b int default 2);") + tk.MustExec("insert into t1 values (10, 10), (20, 20);") + tk.MustExec("update t1 set a=default where b=10;") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "20 20")) + tk.MustExec("update t1 set a=30, b=default where a=20;") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "30 2")) + tk.MustExec("update t1 set a=default, b=default where a=30;") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 10", "1 2")) + tk.MustExec("insert into t1 values (40, 40)") + tk.MustExec("update t1 set a=default, b=default") + tk.MustQuery("select * from t1;").Check(testkit.Rows("1 2", "1 2", "1 2")) + tk.MustExec("update t1 set a=default(b), b=default(a)") + tk.MustQuery("select * from t1;").Check(testkit.Rows("2 1", "2 1", "2 1")) + // With generated columns + tk.MustExec("create table t2 (a int default 1, b int generated always as (-a) virtual, c int generated always as (-a) stored);") + tk.MustExec("insert into t2 values (10, default, default), (20, default, default)") + tk.MustExec("update t2 set b=default;") + tk.MustQuery("select * from t2;").Check(testkit.Rows("10 -10 -10", "20 -20 -20")) + tk.MustExec("update t2 set a=30, b=default where a=10;") + tk.MustQuery("select * from t2;").Check(testkit.Rows("30 -30 -30", "20 -20 -20")) + tk.MustExec("update t2 set c=default, a=40 where c=-20;") + tk.MustQuery("select * from t2;").Check(testkit.Rows("30 -30 -30", "40 -40 -40")) + tk.MustExec("update t2 set a=default, b=default, c=default where b=-30;") + tk.MustQuery("select * from t2;").Check(testkit.Rows("1 -1 -1", "40 -40 -40")) + tk.MustExec("update t2 set a=default(a), b=default, c=default;") + tk.MustQuery("select * from t2;").Check(testkit.Rows("1 -1 -1", "1 -1 -1")) + tk.MustGetErrCode("update t2 set b=default(a);", mysql.ErrBadGeneratedColumn) + tk.MustGetErrCode("update t2 set a=default(b), b=default(b);", mysql.ErrBadGeneratedColumn) + tk.MustGetErrCode("update t2 set a=default(a), c=default(c);", mysql.ErrBadGeneratedColumn) + tk.MustGetErrCode("update t2 set a=default(a), c=default(a);", mysql.ErrBadGeneratedColumn) + tk.MustExec("drop table t1, t2") +} + +func TestListColumnsPartitionWithGlobalIndex(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@session.tidb_enable_list_partition = ON") + // Test generated column with global index + restoreConfig := config.RestoreFunc() + defer restoreConfig() + config.UpdateGlobal(func(conf *config.Config) { + conf.EnableGlobalIndex = true + }) + tableDefs := []string{ + // Test for virtual generated column with global index + `create table t (a varchar(10), b varchar(1) GENERATED ALWAYS AS (substr(a,1,1)) VIRTUAL) partition by list columns(b) (partition p0 values in ('a','c'), partition p1 values in ('b','d'));`, + // Test for stored generated column with global index + `create table t (a varchar(10), b varchar(1) GENERATED ALWAYS AS (substr(a,1,1)) STORED) partition by list columns(b) (partition p0 values in ('a','c'), partition p1 values in ('b','d'));`, + } + for _, tbl := range tableDefs { + tk.MustExec("drop table if exists t") + tk.MustExec(tbl) + tk.MustExec("alter table t add unique index (a)") + tk.MustExec("insert into t (a) values ('aaa'),('abc'),('acd')") + tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("aaa", "abc", "acd")) + tk.MustQuery("select * from t where a = 'abc' order by a").Check(testkit.Rows("abc a")) + tk.MustExec("update t set a='bbb' where a = 'aaa'") + tk.MustExec("admin check table t") + tk.MustQuery("select a from t order by a").Check(testkit.Rows("abc", "acd", "bbb")) + // TODO: fix below test. + //tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("abc", "acd")) + //tk.MustQuery("select a from t partition (p1) order by a").Check(testkit.Rows("bbb")) + tk.MustQuery("select * from t where a = 'bbb' order by a").Check(testkit.Rows("bbb b")) + // Test insert meet duplicate error. + _, err := tk.Exec("insert into t (a) values ('abc')") + require.Error(t, err) + // Test insert on duplicate update + tk.MustExec("insert into t (a) values ('abc') on duplicate key update a='bbc'") + tk.MustQuery("select a from t order by a").Check(testkit.Rows("acd", "bbb", "bbc")) + tk.MustQuery("select * from t where a = 'bbc'").Check(testkit.Rows("bbc b")) + // TODO: fix below test. + //tk.MustQuery("select a from t partition (p0) order by a").Check(testkit.Rows("acd")) + //tk.MustQuery("select a from t partition (p1) order by a").Check(testkit.Rows("bbb", "bbc")) + } +} + +func TestIssue20724(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1(a varchar(10) collate utf8mb4_general_ci)") + tk.MustExec("insert into t1 values ('a')") + tk.MustExec("update t1 set a = 'A'") + tk.MustQuery("select * from t1").Check(testkit.Rows("A")) + tk.MustExec("drop table t1") +} + +func TestIssue20840(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1") + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly + tk.MustExec("create table t1 (i varchar(20) unique key) collate=utf8mb4_general_ci") + tk.MustExec("insert into t1 values ('a')") + tk.MustExec("replace into t1 values ('A')") + tk.MustQuery("select * from t1").Check(testkit.Rows("A")) + tk.MustExec("drop table t1") +} + +func TestIssueInsertPrefixIndexForNonUTF8Collation(t *testing.T) { + collate.SetNewCollationEnabledForTest(true) + defer collate.SetNewCollationEnabledForTest(false) + + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2, t3") + tk.MustExec("create table t1 ( c_int int, c_str varchar(40) character set ascii collate ascii_bin, primary key(c_int, c_str(8)) clustered , unique key(c_str))") + tk.MustExec("create table t2 ( c_int int, c_str varchar(40) character set latin1 collate latin1_bin, primary key(c_int, c_str(8)) clustered , unique key(c_str))") + tk.MustExec("insert into t1 values (3, 'fervent brattain')") + tk.MustExec("insert into t2 values (3, 'fervent brattain')") + tk.MustExec("admin check table t1") + tk.MustExec("admin check table t2") + + tk.MustExec("create table t3 (x varchar(40) CHARACTER SET ascii COLLATE ascii_bin, UNIQUE KEY uk(x(4)))") + tk.MustExec("insert into t3 select 'abc '") + tk.MustGetErrCode("insert into t3 select 'abc d'", 1062) +} From 0c7528a42509e08d8cfe697555260fdd662273ff Mon Sep 17 00:00:00 2001 From: Zhou Kunqin <25057648+time-and-fate@users.noreply.github.com> Date: Wed, 15 Dec 2021 20:28:35 +0800 Subject: [PATCH 03/15] statistics: avoid deadlock when create/drop extended stats and analyze at the same time (#30566) --- statistics/handle/handle.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go index 16bf3bbcb7b7c..0889d00e431e5 100644 --- a/statistics/handle/handle.go +++ b/statistics/handle/handle.go @@ -1481,6 +1481,15 @@ func (h *Handle) InsertExtendedStats(statsName string, colIDs []int64, tp int, t return errors.Errorf("extended statistics '%s' with same type on same columns already exists", statsName) } } + txn, err := h.mu.ctx.Txn(true) + if err != nil { + return errors.Trace(err) + } + version := txn.StartTS() + // Bump version in `mysql.stats_meta` to trigger stats cache refresh. + if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID); err != nil { + return err + } // Remove the existing 'deleted' records. if _, err = exec.ExecuteInternal(ctx, "DELETE FROM mysql.stats_extended WHERE name = %? and table_id = %?", statsName, tableID); err != nil { return err @@ -1491,17 +1500,10 @@ func (h *Handle) InsertExtendedStats(statsName string, colIDs []int64, tp int, t // the record from the table, tidb-b should delete the cached item synchronously. While for tidb-c, it has to wait for // next `Update()` to remove the cached item then. h.removeExtendedStatsItem(tableID, statsName) - txn, err := h.mu.ctx.Txn(true) - if err != nil { - return errors.Trace(err) - } - version := txn.StartTS() const sql = "INSERT INTO mysql.stats_extended(name, type, table_id, column_ids, version, status) VALUES (%?, %?, %?, %?, %?, %?)" if _, err = exec.ExecuteInternal(ctx, sql, statsName, tp, tableID, strColIDs, version, StatsStatusInited); err != nil { return err } - // Bump version in `mysql.stats_meta` to trigger stats cache refresh. - _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID) return } @@ -1541,10 +1543,10 @@ func (h *Handle) MarkExtendedStatsDeleted(statsName string, tableID int64, ifExi return errors.Trace(err) } version := txn.StartTS() - if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_extended SET version = %?, status = %? WHERE name = %? and table_id = %?", version, StatsStatusDeleted, statsName, tableID); err != nil { + if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID); err != nil { return err } - if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_meta SET version = %? WHERE table_id = %?", version, tableID); err != nil { + if _, err = exec.ExecuteInternal(ctx, "UPDATE mysql.stats_extended SET version = %?, status = %? WHERE name = %? and table_id = %?", version, StatsStatusDeleted, statsName, tableID); err != nil { return err } return nil From d660e483c2cf3df13d891a38fa29bcae53c52a08 Mon Sep 17 00:00:00 2001 From: Yiding Cui Date: Wed, 15 Dec 2021 21:12:34 +0800 Subject: [PATCH 04/15] =?UTF-8?q?sessionctx:=20fix=20the=20value=20of=20an?= =?UTF-8?q?alyze=5Fversion=20when=20upgrading=204.x=20to=205.=E2=80=A6=20(?= =?UTF-8?q?#30743)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- session/bootstrap.go | 27 +++++++++++++++++- session/bootstrap_serial_test.go | 49 ++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/session/bootstrap.go b/session/bootstrap.go index 815e921c4a6fb..2244e3eaa5484 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -538,11 +538,14 @@ const ( version78 = 78 // version79 adds the mysql.table_cache_meta table version79 = 79 + // version80 fixes the issue https://github.com/pingcap/tidb/issues/25422. + // If the TiDB upgrading from the 4.x to a newer version, we keep the tidb_analyze_version to 1. + version80 = 80 ) // currentBootstrapVersion is defined as a variable, so we can modify its value for testing. // please make sure this is the largest version -var currentBootstrapVersion int64 = version79 +var currentBootstrapVersion int64 = version80 var ( bootstrapVersion = []func(Session, int64){ @@ -625,6 +628,7 @@ var ( upgradeToVer77, upgradeToVer78, upgradeToVer79, + upgradeToVer80, } ) @@ -1630,6 +1634,27 @@ func upgradeToVer79(s Session, ver int64) { doReentrantDDL(s, CreateTableCacheMetaTable) } +func upgradeToVer80(s Session, ver int64) { + if ver >= version80 { + return + } + // Check if tidb_analyze_version exists in mysql.GLOBAL_VARIABLES. + // If not, insert "tidb_analyze_version | 1" since this is the old behavior before we introduce this variable. + ctx := context.Background() + rs, err := s.ExecuteInternal(ctx, "SELECT VARIABLE_VALUE FROM %n.%n WHERE VARIABLE_NAME=%?;", + mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBAnalyzeVersion) + terror.MustNil(err) + req := rs.NewChunk(nil) + err = rs.Next(ctx, req) + terror.MustNil(err) + if req.NumRows() != 0 { + return + } + + mustExecute(s, "INSERT HIGH_PRIORITY IGNORE INTO %n.%n VALUES (%?, %?);", + mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBAnalyzeVersion, 1) +} + func writeOOMAction(s Session) { comment := "oom-action is `log` by default in v3.0.x, `cancel` by default in v4.0.11+" mustExecute(s, `INSERT HIGH_PRIORITY INTO %n.%n VALUES (%?, %?, %?) ON DUPLICATE KEY UPDATE VARIABLE_VALUE= %?`, diff --git a/session/bootstrap_serial_test.go b/session/bootstrap_serial_test.go index 235cf9556ace2..1de04a5ca91f3 100644 --- a/session/bootstrap_serial_test.go +++ b/session/bootstrap_serial_test.go @@ -879,3 +879,52 @@ func TestReferencesPrivilegeOnColumn(t *testing.T) { mustExec(t, se, "create table t1 (a int)") mustExec(t, se, "GRANT select (a), update (a),insert(a), references(a) on t1 to issue28531") } + +func TestAnalyzeVersionUpgradeFrom300To500(t *testing.T) { + ctx := context.Background() + store, _ := createStoreAndBootstrap(t) + defer func() { require.NoError(t, store.Close()) }() + + // Upgrade from 3.0.0 to 5.1+ or above. + ver300 := 33 + seV3 := createSessionAndSetID(t, store) + txn, err := store.Begin() + require.NoError(t, err) + m := meta.NewMeta(txn) + err = m.FinishBootstrap(int64(ver300)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + mustExec(t, seV3, fmt.Sprintf("update mysql.tidb set variable_value=%d where variable_name='tidb_server_version'", ver300)) + mustExec(t, seV3, fmt.Sprintf("delete from mysql.GLOBAL_VARIABLES where variable_name='%s'", variable.TiDBAnalyzeVersion)) + mustExec(t, seV3, "commit") + unsetStoreBootstrapped(store.UUID()) + ver, err := getBootstrapVersion(seV3) + require.NoError(t, err) + require.Equal(t, int64(ver300), ver) + + // We are now in 3.0.0, check tidb_analyze_version should not exist. + res := mustExec(t, seV3, fmt.Sprintf("select * from mysql.GLOBAL_VARIABLES where variable_name='%s'", variable.TiDBAnalyzeVersion)) + chk := res.NewChunk(nil) + err = res.Next(ctx, chk) + require.NoError(t, err) + require.Equal(t, 0, chk.NumRows()) + + domCurVer, err := BootstrapSession(store) + require.NoError(t, err) + defer domCurVer.Close() + seCurVer := createSessionAndSetID(t, store) + ver, err = getBootstrapVersion(seCurVer) + require.NoError(t, err) + require.Equal(t, currentBootstrapVersion, ver) + + // We are now in version no lower than 5.x, tidb_enable_index_merge should be 1. + res = mustExec(t, seCurVer, "select @@tidb_analyze_version") + chk = res.NewChunk(nil) + err = res.Next(ctx, chk) + require.NoError(t, err) + require.Equal(t, 1, chk.NumRows()) + row := chk.GetRow(0) + require.Equal(t, 1, row.Len()) + require.Equal(t, "1", row.GetString(0)) +} From 05b99603901ae0981aa10027dc95e4d595a54c1e Mon Sep 17 00:00:00 2001 From: bb7133 Date: Thu, 16 Dec 2021 10:06:35 +0800 Subject: [PATCH 05/15] server: disable socket listener for `basicHTTPHandlerTestSuite` (#30680) --- server/http_handler_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/server/http_handler_test.go b/server/http_handler_test.go index ac57db6425891..48205c192c11e 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -448,7 +448,6 @@ func (ts *basicHTTPHandlerTestSuite) startServer(t *testing.T) { cfg.Port = 0 cfg.Status.StatusPort = 0 cfg.Status.ReportStatus = true - cfg.Socket = fmt.Sprintf("/tmp/%s.sock", t.Name()) server, err := NewServer(cfg, ts.tidbdrv) require.NoError(t, err) From af259faacfa25d7516920ac4bd976eaeeaabbd82 Mon Sep 17 00:00:00 2001 From: Chengpeng Yan <41809508+Reminiscent@users.noreply.github.com> Date: Thu, 16 Dec 2021 10:18:35 +0800 Subject: [PATCH 06/15] planner: support the plan cache aware of bindings (#30169) --- bindinfo/bind_serial_test.go | 312 ++++++++++++++++++++++++++++++++++ executor/explainfor_test.go | 14 +- executor/prepared.go | 3 +- planner/core/cache.go | 5 +- planner/core/cache_test.go | 2 +- planner/core/common_plans.go | 17 +- planner/core/optimizer.go | 3 + planner/optimize.go | 54 ++++-- server/driver_tidb.go | 5 +- session/session.go | 3 +- sessionctx/stmtctx/stmtctx.go | 3 + testkit/testkit.go | 24 +++ 12 files changed, 415 insertions(+), 30 deletions(-) diff --git a/bindinfo/bind_serial_test.go b/bindinfo/bind_serial_test.go index 7cf49c087278f..53b674a8ec715 100644 --- a/bindinfo/bind_serial_test.go +++ b/bindinfo/bind_serial_test.go @@ -16,7 +16,9 @@ package bindinfo_test import ( "context" + "crypto/tls" "fmt" + "strconv" "testing" "github.com/pingcap/tidb/bindinfo" @@ -26,10 +28,320 @@ import ( "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/terror" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session/txninfo" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/util" "github.com/stretchr/testify/require" ) +// mockSessionManager is a mocked session manager which is used for test. +type mockSessionManager1 struct { + PS []*util.ProcessInfo +} + +func (msm *mockSessionManager1) ShowTxnList() []*txninfo.TxnInfo { + return nil +} + +// ShowProcessList implements the SessionManager.ShowProcessList interface. +func (msm *mockSessionManager1) ShowProcessList() map[uint64]*util.ProcessInfo { + ret := make(map[uint64]*util.ProcessInfo) + for _, item := range msm.PS { + ret[item.ID] = item + } + return ret +} + +func (msm *mockSessionManager1) GetProcessInfo(id uint64) (*util.ProcessInfo, bool) { + for _, item := range msm.PS { + if item.ID == id { + return item, true + } + } + return &util.ProcessInfo{}, false +} + +// Kill implements the SessionManager.Kill interface. +func (msm *mockSessionManager1) Kill(cid uint64, query bool) { +} + +func (msm *mockSessionManager1) KillAllConnections() { +} + +func (msm *mockSessionManager1) UpdateTLSConfig(cfg *tls.Config) { +} + +func (msm *mockSessionManager1) ServerID() uint64 { + return 1 +} + +func TestPrepareCacheWithBinding(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1(a int, b int, c int, key idx_b(b), key idx_c(c))") + tk.MustExec("create table t2(a int, b int, c int, key idx_b(b), key idx_c(c))") + + // TestDMLSQLBind + tk.MustExec("prepare stmt1 from 'delete from t1 where b = 1 and c > 1';") + tk.MustExec("execute stmt1;") + require.Equal(t, "t1:idx_b", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess := tk.Session().ShowProcess() + ps := []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res := tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_b(b)"), res.Rows()) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for delete from t1 where b = 1 and c > 1 using delete /*+ use_index(t1,idx_c) */ from t1 where b = 1 and c > 1") + + tk.MustExec("execute stmt1;") + require.Equal(t, "t1:idx_c", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_c(c)"), res.Rows()) + + tk.MustExec("prepare stmt2 from 'delete t1, t2 from t1 inner join t2 on t1.b = t2.b';") + tk.MustExec("execute stmt2;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "HashJoin"), res.Rows()) + tk.MustExec("execute stmt2;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for delete t1, t2 from t1 inner join t2 on t1.b = t2.b using delete /*+ inl_join(t1) */ t1, t2 from t1 inner join t2 on t1.b = t2.b") + + tk.MustExec("execute stmt2;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "IndexJoin"), res.Rows()) + + tk.MustExec("prepare stmt3 from 'update t1 set a = 1 where b = 1 and c > 1';") + tk.MustExec("execute stmt3;") + require.Equal(t, "t1:idx_b", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_b(b)"), res.Rows()) + tk.MustExec("execute stmt3;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for update t1 set a = 1 where b = 1 and c > 1 using update /*+ use_index(t1,idx_c) */ t1 set a = 1 where b = 1 and c > 1") + + tk.MustExec("execute stmt3;") + require.Equal(t, "t1:idx_c", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_c(c)"), res.Rows()) + + tk.MustExec("prepare stmt4 from 'update t1, t2 set t1.a = 1 where t1.b = t2.b';") + tk.MustExec("execute stmt4;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "HashJoin"), res.Rows()) + tk.MustExec("execute stmt4;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for update t1, t2 set t1.a = 1 where t1.b = t2.b using update /*+ inl_join(t1) */ t1, t2 set t1.a = 1 where t1.b = t2.b") + + tk.MustExec("execute stmt4;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "IndexJoin"), res.Rows()) + + tk.MustExec("prepare stmt5 from 'insert into t1 select * from t2 where t2.b = 2 and t2.c > 2';") + tk.MustExec("execute stmt5;") + require.Equal(t, "t2:idx_b", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_b(b)"), res.Rows()) + tk.MustExec("execute stmt5;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1 using insert /*+ use_index(t2,idx_c) */ into t1 select * from t2 where t2.b = 1 and t2.c > 1") + + tk.MustExec("execute stmt5;") + require.Equal(t, "t2:idx_b", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_b(b)"), res.Rows()) + + tk.MustExec("drop global binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1") + tk.MustExec("create global binding for insert into t1 select * from t2 where t2.b = 1 and t2.c > 1 using insert into t1 select /*+ use_index(t2,idx_c) */ * from t2 where t2.b = 1 and t2.c > 1") + + tk.MustExec("execute stmt5;") + require.Equal(t, "t2:idx_c", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_c(c)"), res.Rows()) + + tk.MustExec("prepare stmt6 from 'replace into t1 select * from t2 where t2.b = 2 and t2.c > 2';") + tk.MustExec("execute stmt6;") + require.Equal(t, "t2:idx_b", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_b(b)"), res.Rows()) + tk.MustExec("execute stmt6;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for replace into t1 select * from t2 where t2.b = 1 and t2.c > 1 using replace into t1 select /*+ use_index(t2,idx_c) */ * from t2 where t2.b = 1 and t2.c > 1") + + tk.MustExec("execute stmt6;") + require.Equal(t, "t2:idx_c", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "idx_c(c)"), res.Rows()) + + // TestExplain + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1(id int)") + tk.MustExec("create table t2(id int)") + + tk.MustExec("prepare stmt1 from 'SELECT * from t1,t2 where t1.id = t2.id';") + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "HashJoin")) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("prepare stmt2 from 'SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id';") + tk.MustExec("execute stmt2;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "MergeJoin")) + tk.MustExec("execute stmt2;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for SELECT * from t1,t2 where t1.id = t2.id using SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1,t2 where t1.id = t2.id") + + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "MergeJoin")) + + tk.MustExec("drop global binding for SELECT * from t1,t2 where t1.id = t2.id") + + tk.MustExec("create index index_id on t1(id)") + tk.MustExec("prepare stmt1 from 'SELECT * from t1 use index(index_id)';") + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "IndexReader")) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for SELECT * from t1 using SELECT * from t1 ignore index(index_id)") + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.False(t, tk.HasPlan4ExplainFor(res, "IndexReader")) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + // Add test for SetOprStmt + tk.MustExec("prepare stmt1 from 'SELECT * from t1 union SELECT * from t1';") + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.False(t, tk.HasPlan4ExplainFor(res, "IndexReader")) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("prepare stmt2 from 'SELECT * from t1 use index(index_id) union SELECT * from t1';") + tk.MustExec("execute stmt2;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "IndexReader")) + tk.MustExec("execute stmt2;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec("create global binding for SELECT * from t1 union SELECT * from t1 using SELECT * from t1 use index(index_id) union SELECT * from t1") + + tk.MustExec("execute stmt1;") + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.HasPlan4ExplainFor(res, "IndexReader")) + + tk.MustExec("drop global binding for SELECT * from t1 union SELECT * from t1") + + // TestBindingSymbolList + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, INDEX ia (a), INDEX ib (b));") + tk.MustExec("insert into t value(1, 1);") + tk.MustExec("prepare stmt1 from 'select a, b from t where a = 3 limit 1, 100';") + tk.MustExec("execute stmt1;") + require.Equal(t, "t:ia", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "ia(a)"), res.Rows()) + tk.MustExec("execute stmt1;") + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + + tk.MustExec(`create global binding for select a, b from t where a = 1 limit 0, 1 using select a, b from t use index (ib) where a = 1 limit 0, 1`) + + // after binding + tk.MustExec("execute stmt1;") + require.Equal(t, "t:ib", tk.Session().GetSessionVars().StmtCtx.IndexNames[0]) + tkProcess = tk.Session().ShowProcess() + ps = []*util.ProcessInfo{tkProcess} + tk.Session().SetSessionManager(&mockSessionManager1{PS: ps}) + res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) + require.True(t, tk.MustUseIndex4ExplainFor(res, "ib(b)"), res.Rows()) +} + func TestExplain(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() diff --git a/executor/explainfor_test.go b/executor/explainfor_test.go index 078ed7e6bb548..5befb576c132a 100644 --- a/executor/explainfor_test.go +++ b/executor/explainfor_test.go @@ -1124,16 +1124,20 @@ func (s *testPrepareSerialSuite) TestSPM4PlanCache(c *C) { tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1")) tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + // The bindSQL has changed, the previous cache is invalid. + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) tk.MustQuery("execute stmt;").Check(testkit.Rows()) tkProcess = tk.Se.ShowProcess() ps = []*util.ProcessInfo{tkProcess} tk.Se.SetSessionManager(&mockSessionManager1{PS: ps}) res = tk.MustQuery("explain for connection " + strconv.FormatUint(tkProcess.ID, 10)) - // The binding does not take effect for caches that have been cached. - c.Assert(res.Rows()[0][0], Matches, ".*TableReader.*") - c.Assert(res.Rows()[1][0], Matches, ".*TableFullScan.*") - tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("0")) + // We can use the new binding. + c.Assert(res.Rows()[0][0], Matches, ".*IndexReader.*") + c.Assert(res.Rows()[1][0], Matches, ".*IndexFullScan.*") + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) + tk.MustQuery("execute stmt;").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1")) tk.MustExec("delete from mysql.bind_info where default_db='test';") tk.MustExec("admin reload bindings;") diff --git a/executor/prepared.go b/executor/prepared.go index 3013aba0de9cd..82a030e76b6c1 100644 --- a/executor/prepared.go +++ b/executor/prepared.go @@ -314,8 +314,9 @@ func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error { prepared := preparedObj.PreparedAst delete(vars.PreparedStmtNameToID, e.Name) if plannercore.PreparedPlanCacheEnabled() { + bindSQL := planner.GetBindSQL4PlanCache(e.ctx, prepared.Stmt) e.ctx.PreparedPlanCache().Delete(plannercore.NewPSTMTPlanCacheKey( - vars, id, prepared.SchemaVersion, + vars, id, prepared.SchemaVersion, bindSQL, )) } vars.RemovePreparedStmt(id) diff --git a/planner/core/cache.go b/planner/core/cache.go index ea6d0c32e3b39..a386c4a5a3649 100644 --- a/planner/core/cache.go +++ b/planner/core/cache.go @@ -74,6 +74,7 @@ type pstmtPlanCacheKey struct { timezoneOffset int isolationReadEngines map[kv.StoreType]struct{} selectLimit uint64 + bindSQL string hash []byte } @@ -104,6 +105,7 @@ func (key *pstmtPlanCacheKey) Hash() []byte { key.hash = append(key.hash, kv.TiFlash.Name()...) } key.hash = codec.EncodeInt(key.hash, int64(key.selectLimit)) + key.hash = append(key.hash, hack.Slice(key.bindSQL)...) } return key.hash } @@ -125,7 +127,7 @@ func SetPstmtIDSchemaVersion(key kvcache.Key, pstmtID uint32, schemaVersion int6 } // NewPSTMTPlanCacheKey creates a new pstmtPlanCacheKey object. -func NewPSTMTPlanCacheKey(sessionVars *variable.SessionVars, pstmtID uint32, schemaVersion int64) kvcache.Key { +func NewPSTMTPlanCacheKey(sessionVars *variable.SessionVars, pstmtID uint32, schemaVersion int64, bindSQL string) kvcache.Key { timezoneOffset := 0 if sessionVars.TimeZone != nil { _, timezoneOffset = time.Now().In(sessionVars.TimeZone).Zone() @@ -139,6 +141,7 @@ func NewPSTMTPlanCacheKey(sessionVars *variable.SessionVars, pstmtID uint32, sch timezoneOffset: timezoneOffset, isolationReadEngines: make(map[kv.StoreType]struct{}), selectLimit: sessionVars.SelectLimit, + bindSQL: bindSQL, } for k, v := range sessionVars.IsolationReadEngines { key.isolationReadEngines[k] = v diff --git a/planner/core/cache_test.go b/planner/core/cache_test.go index ff0ab53fa558b..074d1e4cf2828 100644 --- a/planner/core/cache_test.go +++ b/planner/core/cache_test.go @@ -28,6 +28,6 @@ func TestCacheKey(t *testing.T) { ctx.GetSessionVars().SQLMode = mysql.ModeNone ctx.GetSessionVars().TimeZone = time.UTC ctx.GetSessionVars().ConnectionID = 0 - key := NewPSTMTPlanCacheKey(ctx.GetSessionVars(), 1, 1) + key := NewPSTMTPlanCacheKey(ctx.GetSessionVars(), 1, 1, "") require.Equal(t, []byte{0x74, 0x65, 0x73, 0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x74, 0x69, 0x64, 0x62, 0x74, 0x69, 0x6b, 0x76, 0x74, 0x69, 0x66, 0x6c, 0x61, 0x73, 0x68, 0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, key.Hash()) } diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index 9af659ef6c972..d3e56600b2c25 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -401,8 +401,10 @@ func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, } stmtCtx.UseCache = prepared.UseCache + var bindSQL string if prepared.UseCache { - cacheKey = NewPSTMTPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion) + bindSQL = GetBindSQL4PlanCache(sctx, prepared.Stmt) + cacheKey = NewPSTMTPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion, bindSQL) } tps := make([]*types.FieldType, len(e.UsingVars)) for i, param := range e.UsingVars { @@ -468,6 +470,14 @@ func (e *Execute) getPhysicalPlan(ctx context.Context, sctx sessionctx.Context, if err != nil { return err } + if len(bindSQL) > 0 { + // When the `len(bindSQL) > 0`, it means we use the binding. + // So we need to record this. + err = sessVars.SetSystemVar(variable.TiDBFoundInBinding, variable.BoolToOnOff(true)) + if err != nil { + return err + } + } if metrics.ResettablePlanCacheCounterFortTest { metrics.PlanCacheCounter.WithLabelValues("prepare").Inc() } else { @@ -500,8 +510,11 @@ REBUILD: // rebuild key to exclude kv.TiFlash when stmt is not read only if _, isolationReadContainTiFlash := sessVars.IsolationReadEngines[kv.TiFlash]; isolationReadContainTiFlash && !IsReadOnly(stmt, sessVars) { delete(sessVars.IsolationReadEngines, kv.TiFlash) - cacheKey = NewPSTMTPlanCacheKey(sctx.GetSessionVars(), e.ExecID, prepared.SchemaVersion) + cacheKey = NewPSTMTPlanCacheKey(sessVars, e.ExecID, prepared.SchemaVersion, sessVars.StmtCtx.BindSQL) sessVars.IsolationReadEngines[kv.TiFlash] = struct{}{} + } else { + // We need to reconstruct the plan cache key based on the bindSQL. + cacheKey = NewPSTMTPlanCacheKey(sessVars, e.ExecID, prepared.SchemaVersion, sessVars.StmtCtx.BindSQL) } cached := NewPSTMTPlanCacheValue(p, names, stmtCtx.TblInfo2UnionScan, tps) preparedStmt.NormalizedPlan, preparedStmt.PlanDigest = NormalizePlan(p) diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index 5103ea4fdf38d..89b156e632cea 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -44,6 +44,9 @@ import ( // OptimizeAstNode optimizes the query to a physical plan directly. var OptimizeAstNode func(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (Plan, types.NameSlice, error) +// GetBindSQL4PlanCache get the bindSQL for the ast.StmtNode +var GetBindSQL4PlanCache func(sctx sessionctx.Context, stmtNode ast.StmtNode) (bindSQL string) + // AllowCartesianProduct means whether tidb allows cartesian join without equal conditions. var AllowCartesianProduct = atomic.NewBool(true) diff --git a/planner/optimize.go b/planner/optimize.go index 363c3e6f5374a..b16fc09a238f0 100644 --- a/planner/optimize.go +++ b/planner/optimize.go @@ -99,6 +99,28 @@ func GetExecuteForUpdateReadIS(node ast.Node, sctx sessionctx.Context) infoschem return nil } +// GetBindSQL4PlanCache used to get the bindSQL for plan cache to build the plan cache key. +func GetBindSQL4PlanCache(sctx sessionctx.Context, stmtNode ast.StmtNode) (bindSQL string) { + bindRecord, _, match := matchSQLBinding(sctx, stmtNode) + if match { + bindSQL = bindRecord.Bindings[0].BindSQL + } + return bindSQL +} + +func matchSQLBinding(sctx sessionctx.Context, stmtNode ast.StmtNode) (bindRecord *bindinfo.BindRecord, scope string, matched bool) { + useBinding := sctx.GetSessionVars().UsePlanBaselines + if !useBinding || stmtNode == nil { + return nil, "", false + } + var err error + bindRecord, scope, err = getBindRecord(sctx, stmtNode) + if err != nil || bindRecord == nil || len(bindRecord.Bindings) == 0 { + return nil, "", false + } + return bindRecord, scope, true +} + // Optimize does optimization and creates a Plan. // The node must be prepared first. func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (plannercore.Plan, types.NameSlice, error) { @@ -149,16 +171,9 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in if !ok { useBinding = false } - var ( - bindRecord *bindinfo.BindRecord - scope string - err error - ) - if useBinding { - bindRecord, scope, err = getBindRecord(sctx, stmtNode) - if err != nil || bindRecord == nil || len(bindRecord.Bindings) == 0 { - useBinding = false - } + bindRecord, scope, match := matchSQLBinding(sctx, stmtNode) + if !match { + useBinding = false } if ok { // add the extra Limit after matching the bind record @@ -166,14 +181,15 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in node = stmtNode } - var names types.NameSlice - var bestPlan, bestPlanFromBind plannercore.Plan + var ( + names types.NameSlice + bestPlan, bestPlanFromBind plannercore.Plan + chosenBinding bindinfo.Binding + err error + ) if useBinding { minCost := math.MaxFloat64 - var ( - bindStmtHints stmtctx.StmtHints - chosenBinding bindinfo.Binding - ) + var bindStmtHints stmtctx.StmtHints originHints := hint.CollectHint(stmtNode) // bindRecord must be not nil when coming here, try to find the best binding. for _, binding := range bindRecord.Bindings { @@ -206,7 +222,7 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in for _, warn := range warns { sessVars.StmtCtx.AppendWarning(warn) } - if err := setFoundInBinding(sctx, true); err != nil { + if err := setFoundInBinding(sctx, true, chosenBinding.BindSQL); err != nil { logutil.BgLogger().Warn("set tidb_found_in_binding failed", zap.Error(err)) } if sessVars.StmtCtx.InVerboseExplain { @@ -694,13 +710,15 @@ func handleStmtHints(hints []*ast.TableOptimizerHint) (stmtHints stmtctx.StmtHin return } -func setFoundInBinding(sctx sessionctx.Context, opt bool) error { +func setFoundInBinding(sctx sessionctx.Context, opt bool, bindSQL string) error { vars := sctx.GetSessionVars() + vars.StmtCtx.BindSQL = bindSQL err := vars.SetSystemVar(variable.TiDBFoundInBinding, variable.BoolToOnOff(opt)) return err } func init() { plannercore.OptimizeAstNode = Optimize + plannercore.GetBindSQL4PlanCache = GetBindSQL4PlanCache plannercore.IsReadOnly = IsReadOnly } diff --git a/server/driver_tidb.go b/server/driver_tidb.go index 6dae49084eeee..9a13eea632962 100644 --- a/server/driver_tidb.go +++ b/server/driver_tidb.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/planner" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/stmtctx" @@ -164,8 +165,10 @@ func (ts *TiDBStatement) Close() error { if !ok { return errors.Errorf("invalid CachedPrepareStmt type") } + preparedAst := preparedObj.PreparedAst + bindSQL := planner.GetBindSQL4PlanCache(ts.ctx, preparedAst.Stmt) ts.ctx.PreparedPlanCache().Delete(core.NewPSTMTPlanCacheKey( - ts.ctx.GetSessionVars(), ts.id, preparedObj.PreparedAst.SchemaVersion)) + ts.ctx.GetSessionVars(), ts.id, preparedObj.PreparedAst.SchemaVersion, bindSQL)) } ts.ctx.GetSessionVars().RemovePreparedStmt(ts.id) } diff --git a/session/session.go b/session/session.go index bfc5288a7ff4f..465de576b37c7 100644 --- a/session/session.go +++ b/session/session.go @@ -309,7 +309,8 @@ func (s *session) cleanRetryInfo() { preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt) if ok { preparedAst = preparedObj.PreparedAst - cacheKey = plannercore.NewPSTMTPlanCacheKey(s.sessionVars, firstStmtID, preparedAst.SchemaVersion) + bindSQL := planner.GetBindSQL4PlanCache(s, preparedAst.Stmt) + cacheKey = plannercore.NewPSTMTPlanCacheKey(s.sessionVars, firstStmtID, preparedAst.SchemaVersion, bindSQL) } } } diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index 3125ae419641e..e41eb4766b47b 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -153,6 +153,9 @@ type StatementContext struct { normalized string digest *parser.Digest } + // BindSQL used to construct the key for plan cache. It records the binding used by the stmt. + // If the binding is not used by the stmt, the value is empty + BindSQL string // planNormalized use for cache the normalized plan, avoid duplicate builds. planNormalized string planDigest *parser.Digest diff --git a/testkit/testkit.go b/testkit/testkit.go index 50af5e0178a1b..c99791efe369a 100644 --- a/testkit/testkit.go +++ b/testkit/testkit.go @@ -128,6 +128,16 @@ func (tk *TestKit) HasPlan(sql string, plan string, args ...interface{}) bool { return false } +// HasPlan4ExplainFor checks if the result execution plan contains specific plan. +func (tk *TestKit) HasPlan4ExplainFor(result *Result, plan string) bool { + for i := range result.rows { + if strings.Contains(result.rows[i][0], plan) { + return true + } + } + return false +} + // Exec executes a sql statement using the prepared stmt API func (tk *TestKit) Exec(sql string, args ...interface{}) (sqlexec.RecordSet, error) { ctx := context.Background() @@ -228,6 +238,20 @@ func (tk *TestKit) MustUseIndex(sql string, index string, args ...interface{}) b return false } +// MustUseIndex4ExplainFor checks if the result execution plan contains specific index(es). +func (tk *TestKit) MustUseIndex4ExplainFor(result *Result, index string) bool { + for i := range result.rows { + // It depends on whether we enable to collect the execution info. + if strings.Contains(result.rows[i][3], "index:"+index) { + return true + } + if strings.Contains(result.rows[i][4], "index:"+index) { + return true + } + } + return false +} + // CheckExecResult checks the affected rows and the insert id after executing MustExec. func (tk *TestKit) CheckExecResult(affectedRows, insertID int64) { tk.require.Equal(int64(tk.Session().AffectedRows()), affectedRows) From 5d62c2a92fe5b9a527db2e16e44d9a946a31bc9e Mon Sep 17 00:00:00 2001 From: Xiaoju Wu Date: Thu, 16 Dec 2021 10:46:35 +0800 Subject: [PATCH 07/15] planner: fix early set of plan's statisticsTable (#30754) --- planner/core/logical_plan_builder.go | 7 +------ planner/core/rule_partition_processor.go | 3 --- planner/core/stats.go | 2 +- 3 files changed, 2 insertions(+), 10 deletions(-) diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index c3b1239d4ffac..84149462965e1 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -4052,11 +4052,6 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as } else { columns = tbl.Cols() } - var statisticTable *statistics.Table - if _, ok := tbl.(table.PartitionedTable); !ok || b.ctx.GetSessionVars().UseDynamicPartitionPrune() { - statisticTable = getStatsTable(b.ctx, tbl.Meta(), tbl.Meta().ID) - } - // extract the IndexMergeHint var indexMergeHints []indexHintInfo if hints := b.TableHints(); hints != nil { @@ -4101,7 +4096,7 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as TableAsName: asName, table: tbl, tableInfo: tableInfo, - statisticTable: statisticTable, + physicalTableID: tableInfo.ID, astIndexHints: tn.IndexHints, IndexHints: b.TableHints().indexHintList, indexMergeHints: indexMergeHints, diff --git a/planner/core/rule_partition_processor.go b/planner/core/rule_partition_processor.go index bb57b0fac33da..1264a47ac97bc 100644 --- a/planner/core/rule_partition_processor.go +++ b/planner/core/rule_partition_processor.go @@ -1415,9 +1415,6 @@ func (s *partitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.Part // id as FromID. So we set the id of the newDataSource with the original one to // avoid traversing the whole plan tree to update the references. newDataSource.id = ds.id - if !ds.ctx.GetSessionVars().UseDynamicPartitionPrune() { - newDataSource.statisticTable = getStatsTable(ds.SCtx(), ds.table.Meta(), pi.Definitions[i].ID) - } err := s.resolveOptimizeHint(&newDataSource, pi.Definitions[i].Name) partitionNameSet.Insert(pi.Definitions[i].Name.L) if err != nil { diff --git a/planner/core/stats.go b/planner/core/stats.go index 2e7fd14a67b8d..d3f23427b2f40 100644 --- a/planner/core/stats.go +++ b/planner/core/stats.go @@ -226,7 +226,7 @@ func (ds *DataSource) initStats(colGroups [][]*expression.Column) { return } if ds.statisticTable == nil { - ds.statisticTable = getStatsTable(ds.ctx, ds.tableInfo, ds.table.Meta().ID) + ds.statisticTable = getStatsTable(ds.ctx, ds.tableInfo, ds.physicalTableID) } tableStats := &property.StatsInfo{ RowCount: float64(ds.statisticTable.Count), From 6eb11b300ff6a80c0eac19c2852b1a6392516ef3 Mon Sep 17 00:00:00 2001 From: tiancaiamao Date: Thu, 16 Dec 2021 12:12:35 +0800 Subject: [PATCH 08/15] *: implement renew write lock lease for cached table (#30206) --- go.mod | 2 +- go.sum | 4 +- kv/option.go | 3 + session/session.go | 96 +++++++++++++++++++++++++++++++ session/session_test.go | 32 +++++++++++ sessionctx/variable/session.go | 3 + store/driver/txn/txn_driver.go | 2 + table/tables/cache.go | 50 +++++----------- table/tables/state_remote.go | 95 ++++++++++++++++++++---------- table/tables/state_remote_test.go | 40 +++++-------- 10 files changed, 235 insertions(+), 92 deletions(-) diff --git a/go.mod b/go.mod index e61a95e5a0a2d..3c2868df8b118 100644 --- a/go.mod +++ b/go.mod @@ -65,7 +65,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.0-rc.0.20211213075151-b147ced35a14 + github.com/tikv/client-go/v2 v2.0.0-rc.0.20211214093715-605f49d3ba50 github.com/tikv/pd v1.1.0-beta.0.20211118054146-02848d2660ee github.com/twmb/murmur3 v1.1.3 github.com/uber/jaeger-client-go v2.22.1+incompatible diff --git a/go.sum b/go.sum index 56b154bd4a38f..a6b2d60e60a01 100644 --- a/go.sum +++ b/go.sum @@ -712,8 +712,8 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfK github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tikv/client-go/v2 v2.0.0-rc.0.20211213075151-b147ced35a14 h1:l2T+gfgYpwmLRY5geDq1zM4Lz4X2mi1ruO18/bDGo70= -github.com/tikv/client-go/v2 v2.0.0-rc.0.20211213075151-b147ced35a14/go.mod h1:wRuh+W35daKTiYBld0oBlT6PSkzEVr+pB/vChzJZk+8= +github.com/tikv/client-go/v2 v2.0.0-rc.0.20211214093715-605f49d3ba50 h1:B+cAIm2P1/SNsVV1vL9/mRaGUVl/vdgV8MU03O0vY28= +github.com/tikv/client-go/v2 v2.0.0-rc.0.20211214093715-605f49d3ba50/go.mod h1:wRuh+W35daKTiYBld0oBlT6PSkzEVr+pB/vChzJZk+8= github.com/tikv/pd v1.1.0-beta.0.20211029083450-e65f0c55b6ae/go.mod h1:varH0IE0jJ9E9WN2Ei/N6pajMlPkcXdDEf7f5mmsUVQ= github.com/tikv/pd v1.1.0-beta.0.20211118054146-02848d2660ee h1:rAAdvQ8Hh36syHr92g0VmZEpkH+40RGQBpFL2121xMs= github.com/tikv/pd v1.1.0-beta.0.20211118054146-02848d2660ee/go.mod h1:lRbwxBAhnTQR5vqbTzeI/Bj62bD2OvYYuFezo2vrmeI= diff --git a/kv/option.go b/kv/option.go index 683f5fcb8e389..2a7a17fedcb6c 100644 --- a/kv/option.go +++ b/kv/option.go @@ -71,6 +71,9 @@ const ( // SnapInterceptor is used for setting the interceptor for snapshot SnapInterceptor + // CommitTSUpperBoundChec is used by cached table + // The commitTS must be greater than all the write lock lease of the visited cached table. + CommitTSUpperBoundCheck ) // ReplicaReadType is the type of replica to read data from diff --git a/session/session.go b/session/session.go index 465de576b37c7..16fecfa3b0121 100644 --- a/session/session.go +++ b/session/session.go @@ -44,6 +44,7 @@ import ( "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" + "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/util/topsql" "github.com/pingcap/tipb/go-binlog" @@ -89,6 +90,7 @@ import ( "github.com/pingcap/tidb/util/tableutil" "github.com/pingcap/tidb/util/timeutil" tikvstore "github.com/tikv/client-go/v2/kv" + "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" tikvutil "github.com/tikv/client-go/v2/util" ) @@ -559,10 +561,104 @@ func (s *session) doCommit(ctx context.Context) error { if tables := sessVars.TxnCtx.TemporaryTables; len(tables) > 0 { s.txn.SetOption(kv.KVFilter, temporaryTableKVFilter(tables)) } + if tables := sessVars.TxnCtx.CachedTables; len(tables) > 0 { + c := cachedTableRenewLease{tables: tables} + now := time.Now() + err := c.start(ctx) + defer c.stop(ctx) + sessVars.StmtCtx.WaitLockLeaseTime += time.Since(now) + if err != nil { + return errors.Trace(err) + } + s.txn.SetOption(kv.CommitTSUpperBoundCheck, c.commitTSCheck) + } return s.commitTxnWithTemporaryData(tikvutil.SetSessionID(ctx, sessVars.ConnectionID), &s.txn) } +type cachedTableRenewLease struct { + tables map[int64]interface{} + lease []uint64 // Lease for each visited cached tables. + exit chan struct{} +} + +func (c *cachedTableRenewLease) start(ctx context.Context) error { + c.exit = make(chan struct{}) + c.lease = make([]uint64, len(c.tables)) + wg := make(chan error) + ith := 0 + for tid, raw := range c.tables { + go c.keepAlive(ctx, wg, raw.(tables.StateRemote), tid, &c.lease[ith]) + ith++ + } + + // Wait for all LockForWrite() return, this function can return. + var err error + for ; ith > 0; ith-- { + tmp := <-wg + if tmp != nil { + err = tmp + } + } + return err +} + +const cacheTableWriteLease = 5 * time.Second + +func (c *cachedTableRenewLease) keepAlive(ctx context.Context, wg chan error, handle tables.StateRemote, tid int64, leasePtr *uint64) { + writeLockLease, err := handle.LockForWrite(ctx, tid) + atomic.StoreUint64(leasePtr, writeLockLease) + wg <- err + if err != nil { + logutil.Logger(ctx).Warn("[cached table] lock for write lock fail", zap.Error(err)) + return + } + + t := time.NewTicker(cacheTableWriteLease) + defer t.Stop() + for { + select { + case <-t.C: + if err := c.renew(ctx, handle, tid, leasePtr); err != nil { + logutil.Logger(ctx).Warn("[cached table] renew write lock lease fail", zap.Error(err)) + return + } + case <-c.exit: + return + } + } +} + +func (c *cachedTableRenewLease) renew(ctx context.Context, handle tables.StateRemote, tid int64, leasePtr *uint64) error { + oldLease := atomic.LoadUint64(leasePtr) + physicalTime := oracle.GetTimeFromTS(oldLease) + newLease := oracle.GoTimeToTS(physicalTime.Add(cacheTableWriteLease)) + + succ, err := handle.RenewLease(ctx, tid, newLease, tables.RenewWriteLease) + if err != nil { + return errors.Trace(err) + } + if succ { + atomic.StoreUint64(leasePtr, newLease) + } + return nil +} + +func (c *cachedTableRenewLease) stop(ctx context.Context) { + close(c.exit) +} + +func (c *cachedTableRenewLease) commitTSCheck(commitTS uint64) bool { + for i := 0; i < len(c.lease); i++ { + lease := atomic.LoadUint64(&c.lease[i]) + if commitTS >= lease { + // Txn fails to commit because the write lease is expired. + return false + } + } + return true +} + func (s *session) commitTxnWithTemporaryData(ctx context.Context, txn kv.Transaction) error { sessVars := s.sessionVars txnTempTables := sessVars.TxnCtx.TemporaryTables diff --git a/session/session_test.go b/session/session_test.go index acbc889102fe1..7b5febe0d18e0 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -5883,3 +5883,35 @@ func (s *testSessionSuite) TestSameNameObjectWithLocalTemporaryTable(c *C) { " `cs1` int(11) DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) } + +func (s *testSessionSuite) TestWriteOnMultipleCachedTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists ct1, ct2") + tk.MustExec("create table ct1 (id int, c int)") + tk.MustExec("create table ct2 (id int, c int)") + tk.MustExec("alter table ct1 cache") + tk.MustExec("alter table ct2 cache") + tk.MustQuery("select * from ct1").Check(testkit.Rows()) + tk.MustQuery("select * from ct2").Check(testkit.Rows()) + + cached := false + for i := 0; i < 50; i++ { + if tk.HasPlan("select * from ct1", "Union") { + if tk.HasPlan("select * from ct2", "Union") { + cached = true + break + } + } + time.Sleep(100 * time.Millisecond) + } + c.Assert(cached, IsTrue) + + tk.MustExec("begin") + tk.MustExec("insert into ct1 values (3, 4)") + tk.MustExec("insert into ct2 values (5, 6)") + tk.MustExec("commit") + + tk.MustQuery("select * from ct1").Check(testkit.Rows("3 4")) + tk.MustQuery("select * from ct2").Check(testkit.Rows("5 6")) +} diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 680e6f2c1367b..8169eaa5c2d66 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -180,6 +180,9 @@ type TransactionContext struct { // TemporaryTables is used to store transaction-specific information for global temporary tables. // It can also be stored in sessionCtx with local temporary tables, but it's easier to clean this data after transaction ends. TemporaryTables map[int64]tableutil.TempTable + + // CachedTables is not nil if the transaction write on cached table. + CachedTables map[int64]interface{} } // GetShard returns the shard prefix for the next `count` rowids. diff --git a/store/driver/txn/txn_driver.go b/store/driver/txn/txn_driver.go index 823d33ac88f59..717bf3b154761 100644 --- a/store/driver/txn/txn_driver.go +++ b/store/driver/txn/txn_driver.go @@ -230,6 +230,8 @@ func (txn *tikvTxn) SetOption(opt int, val interface{}) { txn.KVTxn.SetKVFilter(val.(tikv.KVFilter)) case kv.SnapInterceptor: txn.snapshotInterceptor = val.(kv.SnapshotInterceptor) + case kv.CommitTSUpperBoundCheck: + txn.KVTxn.SetCommitTSUpperBoundCheck(val.(func(commitTS uint64) bool)) } } diff --git a/table/tables/cache.go b/table/tables/cache.go index c95379593c066..7e3eb7c40b9a9 100644 --- a/table/tables/cache.go +++ b/table/tables/cache.go @@ -183,51 +183,31 @@ func (c *cachedTable) UpdateLockForRead(ctx context.Context, store kv.Storage, t } // AddRecord implements the AddRecord method for the table.Table interface. -func (c *cachedTable) AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...table.AddRecordOption) (recordID kv.Handle, err error) { - txn, err := ctx.Txn(true) - if err != nil { - return nil, err +func (c *cachedTable) AddRecord(sctx sessionctx.Context, r []types.Datum, opts ...table.AddRecordOption) (recordID kv.Handle, err error) { + txnCtxAddCachedTable(sctx, c.Meta().ID, c.handle) + return c.TableCommon.AddRecord(sctx, r, opts...) +} + +func txnCtxAddCachedTable(sctx sessionctx.Context, tid int64, handle StateRemote) { + txnCtx := sctx.GetSessionVars().TxnCtx + if txnCtx.CachedTables == nil { + txnCtx.CachedTables = make(map[int64]interface{}) } - now := txn.StartTS() - start := time.Now() - err = c.handle.LockForWrite(context.Background(), c.Meta().ID, leaseFromTS(now)) - if err != nil { - return nil, errors.Trace(err) + if _, ok := txnCtx.CachedTables[tid]; !ok { + txnCtx.CachedTables[tid] = handle } - ctx.GetSessionVars().StmtCtx.WaitLockLeaseTime += time.Since(start) - return c.TableCommon.AddRecord(ctx, r, opts...) } // UpdateRecord implements table.Table func (c *cachedTable) UpdateRecord(ctx context.Context, sctx sessionctx.Context, h kv.Handle, oldData, newData []types.Datum, touched []bool) error { - txn, err := sctx.Txn(true) - if err != nil { - return err - } - now := txn.StartTS() - start := time.Now() - err = c.handle.LockForWrite(ctx, c.Meta().ID, leaseFromTS(now)) - if err != nil { - return errors.Trace(err) - } - sctx.GetSessionVars().StmtCtx.WaitLockLeaseTime += time.Since(start) + txnCtxAddCachedTable(sctx, c.Meta().ID, c.handle) return c.TableCommon.UpdateRecord(ctx, sctx, h, oldData, newData, touched) } // RemoveRecord implements table.Table RemoveRecord interface. -func (c *cachedTable) RemoveRecord(ctx sessionctx.Context, h kv.Handle, r []types.Datum) error { - txn, err := ctx.Txn(true) - if err != nil { - return err - } - now := txn.StartTS() - start := time.Now() - err = c.handle.LockForWrite(context.Background(), c.Meta().ID, leaseFromTS(now)) - if err != nil { - return errors.Trace(err) - } - ctx.GetSessionVars().StmtCtx.WaitLockLeaseTime += time.Since(start) - return c.TableCommon.RemoveRecord(ctx, h, r) +func (c *cachedTable) RemoveRecord(sctx sessionctx.Context, h kv.Handle, r []types.Datum) error { + txnCtxAddCachedTable(sctx, c.Meta().ID, c.handle) + return c.TableCommon.RemoveRecord(sctx, h, r) } func (c *cachedTable) renewLease(ts uint64, op RenewLeaseType, data *cacheData) func() { diff --git a/table/tables/state_remote.go b/table/tables/state_remote.go index 83fd06f7e410e..aeddd5b972ab2 100644 --- a/table/tables/state_remote.go +++ b/table/tables/state_remote.go @@ -67,7 +67,7 @@ type StateRemote interface { LockForRead(ctx context.Context, tid int64, lease uint64) (bool, error) // LockForWrite try to add a write lock to the table with the specified tableID - LockForWrite(ctx context.Context, tid int64, lease uint64) error + LockForWrite(ctx context.Context, tid int64) (uint64, error) // RenewLease attempt to renew the read / write lock on the table with the specified tableID RenewLease(ctx context.Context, tid int64, newTs uint64, op RenewLeaseType) (bool, error) @@ -132,28 +132,32 @@ func (h *stateRemoteHandle) LockForRead(ctx context.Context, tid int64, ts uint6 return succ, err } -func (h *stateRemoteHandle) LockForWrite(ctx context.Context, tid int64, ts uint64) error { +// LockForWrite try to add a write lock to the table with the specified tableID, return the write lock lease. +func (h *stateRemoteHandle) LockForWrite(ctx context.Context, tid int64) (uint64, error) { h.Lock() defer h.Unlock() + var ret uint64 for { - waitAndRetry, err := h.lockForWriteOnce(ctx, tid, ts) + waitAndRetry, lease, err := h.lockForWriteOnce(ctx, tid) if err != nil { - return err + return 0, err } if waitAndRetry == 0 { + ret = lease break } time.Sleep(waitAndRetry) } - return nil + return ret, nil } -func (h *stateRemoteHandle) lockForWriteOnce(ctx context.Context, tid int64, ts uint64) (waitAndRetry time.Duration, err error) { +func (h *stateRemoteHandle) lockForWriteOnce(ctx context.Context, tid int64) (waitAndRetry time.Duration, ts uint64, err error) { err = h.runInTxn(ctx, func(ctx context.Context, now uint64) error { lockType, lease, oldReadLease, err := h.loadRow(ctx, tid) if err != nil { return errors.Trace(err) } + ts = leaseFromTS(now) // The lease is outdated, so lock is invalid, clear orphan lock of any kind. if now > lease { if err := h.updateRow(ctx, tid, "WRITE", ts); err != nil { @@ -214,36 +218,69 @@ func (h *stateRemoteHandle) RenewLease(ctx context.Context, tid int64, newLease h.Lock() defer h.Unlock() + switch op { + case RenewReadLease: + return h.renewReadLease(ctx, tid, newLease) + case RenewWriteLease: + return h.renewWriteLease(ctx, tid, newLease) + } + return false, errors.New("wrong renew lease type") +} + +func (h *stateRemoteHandle) renewReadLease(ctx context.Context, tid int64, newLease uint64) (bool, error) { var succ bool - if op == RenewReadLease { - err := h.runInTxn(ctx, func(ctx context.Context, now uint64) error { - lockType, oldLease, _, err := h.loadRow(ctx, tid) + err := h.runInTxn(ctx, func(ctx context.Context, now uint64) error { + lockType, oldLease, _, err := h.loadRow(ctx, tid) + if err != nil { + return errors.Trace(err) + } + if now >= oldLease { + // read lock had already expired, fail to renew + return nil + } + if lockType != CachedTableLockRead { + // Not read lock, fail to renew + return nil + } + + if newLease > oldLease { // lease should never decrease! + err = h.updateRow(ctx, tid, "READ", newLease) if err != nil { return errors.Trace(err) } - if now >= oldLease { - // read lock had already expired, fail to renew - return nil - } - if lockType != CachedTableLockRead { - // Not read lock, fail to renew - return nil - } + } + succ = true + return nil + }) + return succ, err +} - if newLease > oldLease { // lease should never decrease! - err = h.updateRow(ctx, tid, "READ", newLease) - if err != nil { - return errors.Trace(err) - } - } - succ = true +func (h *stateRemoteHandle) renewWriteLease(ctx context.Context, tid int64, newLease uint64) (bool, error) { + var succ bool + err := h.runInTxn(ctx, func(ctx context.Context, now uint64) error { + lockType, oldLease, _, err := h.loadRow(ctx, tid) + if err != nil { + return errors.Trace(err) + } + if now >= oldLease { + // write lock had already expired, fail to renew return nil - }) - return succ, err - } + } + if lockType != CachedTableLockWrite { + // Not write lock, fail to renew + return nil + } - // TODO: renew for write lease - return false, errors.New("not implement yet") + if newLease > oldLease { // lease should never decrease! + err = h.updateRow(ctx, tid, "WRITE", newLease) + if err != nil { + return errors.Trace(err) + } + } + succ = true + return nil + }) + return succ, err } func (h *stateRemoteHandle) beginTxn(ctx context.Context) error { diff --git a/table/tables/state_remote_test.go b/table/tables/state_remote_test.go index b854388aef2c3..dc4e9272b1830 100644 --- a/table/tables/state_remote_test.go +++ b/table/tables/state_remote_test.go @@ -27,19 +27,7 @@ import ( "github.com/tikv/client-go/v2/oracle" ) -// CreateMetaLockForCachedTable initializes the cached table meta lock information. -func createMetaLockForCachedTable(h session.Session) error { - createTable := "CREATE TABLE IF NOT EXISTS `mysql`.`table_cache_meta` (" + - "`tid` int(11) NOT NULL DEFAULT 0," + - "`lock_type` enum('NONE','READ', 'INTEND', 'WRITE') NOT NULL DEFAULT 'NONE'," + - "`lease` bigint(20) NOT NULL DEFAULT 0," + - "`oldReadLease` bigint(20) NOT NULL DEFAULT 0," + - "PRIMARY KEY (`tid`))" - _, err := h.ExecuteInternal(context.Background(), createTable) - return err -} - -// InitRow add a new record into the cached table meta lock table. +// initRow add a new record into the cached table meta lock table. func initRow(ctx context.Context, exec session.Session, tid int) error { _, err := exec.ExecuteInternal(ctx, "insert ignore into mysql.table_cache_meta values (%?, 'NONE', 0, 0)", tid) return err @@ -54,9 +42,6 @@ func TestStateRemote(t *testing.T) { ctx := context.Background() h := tables.NewStateRemote(se) - err := createMetaLockForCachedTable(se) - require.NoError(t, err) - require.Equal(t, tables.CachedTableLockNone, tables.CachedTableLockType(0)) // Check the initial value. require.NoError(t, initRow(ctx, se, 5)) @@ -104,17 +89,18 @@ func TestStateRemote(t *testing.T) { require.Equal(t, lease, leaseVal) // Check write lock. - leaseVal = oracle.GoTimeToTS(physicalTime.Add(700 * time.Millisecond)) - require.NoError(t, h.LockForWrite(ctx, 5, leaseVal)) + writeLease, err := h.LockForWrite(ctx, 5) + require.NoError(t, err) lockType, lease, err = h.Load(ctx, 5) require.NoError(t, err) require.Equal(t, lockType, tables.CachedTableLockWrite) require.Equal(t, lockType.String(), "WRITE") - require.Equal(t, lease, leaseVal) + require.Equal(t, writeLease, lease) + require.Greater(t, writeLease, leaseVal) // Lock for write again - leaseVal = oracle.GoTimeToTS(physicalTime.Add(800 * time.Millisecond)) - require.NoError(t, h.LockForWrite(ctx, 5, leaseVal)) + writeLease, err = h.LockForWrite(ctx, 5) + require.NoError(t, err) lockType, _, err = h.Load(ctx, 5) require.NoError(t, err) require.Equal(t, lockType, tables.CachedTableLockWrite) @@ -130,10 +116,14 @@ func TestStateRemote(t *testing.T) { require.NoError(t, err) require.False(t, succ) - // But clear orphan write lock should success. - time.Sleep(time.Second) - leaseVal = oracle.GoTimeToTS(physicalTime.Add(2 * time.Second)) - succ, err = h.LockForRead(ctx, 5, leaseVal) + // Renew write lease. + succ, err = h.RenewLease(ctx, 5, writeLease+1, tables.RenewWriteLease) require.NoError(t, err) require.True(t, succ) + + lockType, lease, err = h.Load(ctx, 5) + require.NoError(t, err) + require.Equal(t, lockType, tables.CachedTableLockWrite) + require.Equal(t, lockType.String(), "WRITE") + require.Equal(t, lease, writeLease+1) } From bb8774bf21940db7bfa7ee4013a6bf822eb534e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E8=B6=85?= Date: Thu, 16 Dec 2021 12:44:36 +0800 Subject: [PATCH 09/15] *: Modify placement rule index to reserve some indexes for future work (#30737) --- ddl/placement/common.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/ddl/placement/common.go b/ddl/placement/common.go index 7b385daf508ae..6a17c13388d6a 100644 --- a/ddl/placement/common.go +++ b/ddl/placement/common.go @@ -31,16 +31,10 @@ func GroupID(id int64) string { } const ( - // RuleIndexDefault is the default index for a rule, check Rule.Index. - RuleIndexDefault int = iota - // RuleIndexDatabase is the index for a rule of database. - RuleIndexDatabase // RuleIndexTable is the index for a rule of table. - RuleIndexTable + RuleIndexTable = 40 // RuleIndexPartition is the index for a rule of partition. - RuleIndexPartition - // RuleIndexIndex is the index for a rule of index. - RuleIndexIndex + RuleIndexPartition = 80 ) const ( From 8cf847a57514b8b87fe02a73ebd242b7f59051bf Mon Sep 17 00:00:00 2001 From: HuaiyuXu <391585975@qq.com> Date: Thu, 16 Dec 2021 13:04:35 +0800 Subject: [PATCH 10/15] executor: add an unit test case for unreasonable invoking Close (#30696) --- executor/builder.go | 17 +++++ executor/cte.go | 6 +- executor/distsql.go | 6 +- executor/executor_test.go | 130 ++++++++++++++++++++++++++++++++++++++ executor/join.go | 9 ++- executor/merge_join.go | 5 ++ 6 files changed, 167 insertions(+), 6 deletions(-) diff --git a/executor/builder.go b/executor/builder.go index 109cc49915d58..b7fdbbc313143 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -124,6 +124,23 @@ type MockPhysicalPlan interface { GetExecutor() Executor } +// MockExecutorBuilder is a wrapper for executorBuilder. +// ONLY used in test. +type MockExecutorBuilder struct { + *executorBuilder +} + +// NewMockExecutorBuilderForTest is ONLY used in test. +func NewMockExecutorBuilderForTest(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo, snapshotTS uint64, isStaleness bool, replicaReadScope string) *MockExecutorBuilder { + return &MockExecutorBuilder{ + executorBuilder: newExecutorBuilder(ctx, is, ti, snapshotTS, isStaleness, replicaReadScope)} +} + +// Build builds an executor tree according to `p`. +func (b *MockExecutorBuilder) Build(p plannercore.Plan) Executor { + return b.build(p) +} + func (b *executorBuilder) build(p plannercore.Plan) Executor { switch v := p.(type) { case nil: diff --git a/executor/cte.go b/executor/cte.go index 3ce82c3920559..8345bf5e57f5d 100644 --- a/executor/cte.go +++ b/executor/cte.go @@ -204,8 +204,10 @@ func (e *CTEExec) Close() (err error) { } // `iterInTbl` and `resTbl` are shared by multiple operators, // so will be closed when the SQL finishes. - if err = e.iterOutTbl.DerefAndClose(); err != nil { - return err + if e.iterOutTbl != nil { + if err = e.iterOutTbl.DerefAndClose(); err != nil { + return err + } } } diff --git a/executor/distsql.go b/executor/distsql.go index 3edb1dd709168..1c31130dc53b5 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -197,12 +197,14 @@ type IndexReaderExecutor struct { } // Close clears all resources hold by current object. -func (e *IndexReaderExecutor) Close() error { +func (e *IndexReaderExecutor) Close() (err error) { if e.table != nil && e.table.Meta().TempTableType != model.TempTableNone { return nil } - err := e.result.Close() + if e.result != nil { + err = e.result.Close() + } e.result = nil e.ctx.StoreQueryFeedback(e.feedback) return err diff --git a/executor/executor_test.go b/executor/executor_test.go index c93aa09e6ccdf..ef4f434a9fb67 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -24,6 +24,8 @@ import ( "net" "os" "path/filepath" + "reflect" + "runtime" "strconv" "strings" "sync" @@ -80,6 +82,7 @@ import ( "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/rowcodec" "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tidb/util/testutil" "github.com/pingcap/tidb/util/timeutil" "github.com/pingcap/tipb/go-tipb" @@ -9501,3 +9504,130 @@ func (s *testSerialSuite) TestIssue30289(c *C) { err := tk.QueryToErr("select /*+ hash_join(t1) */ * from t t1 join t t2 on t1.a=t2.a") c.Assert(err.Error(), Matches, "issue30289 build return error") } + +// Test invoke Close without invoking Open before for each operators. +func (s *testSerialSuite) TestUnreasonablyClose(c *C) { + defer testleak.AfterTest(c)() + + is := infoschema.MockInfoSchema([]*model.TableInfo{plannercore.MockSignedTable(), plannercore.MockUnsignedTable()}) + se, err := session.CreateSession4Test(s.store) + c.Assert(err, IsNil) + _, err = se.Execute(context.Background(), "use test") + c.Assert(err, IsNil) + // To enable the shuffleExec operator. + _, err = se.Execute(context.Background(), "set @@tidb_merge_join_concurrency=4") + c.Assert(err, IsNil) + + var opsNeedsCovered = []plannercore.PhysicalPlan{ + &plannercore.PhysicalHashJoin{}, + &plannercore.PhysicalMergeJoin{}, + &plannercore.PhysicalIndexJoin{}, + &plannercore.PhysicalIndexHashJoin{}, + &plannercore.PhysicalTableReader{}, + &plannercore.PhysicalIndexReader{}, + &plannercore.PhysicalIndexLookUpReader{}, + &plannercore.PhysicalIndexMergeReader{}, + &plannercore.PhysicalApply{}, + &plannercore.PhysicalHashAgg{}, + &plannercore.PhysicalStreamAgg{}, + &plannercore.PhysicalLimit{}, + &plannercore.PhysicalSort{}, + &plannercore.PhysicalTopN{}, + &plannercore.PhysicalCTE{}, + &plannercore.PhysicalCTETable{}, + &plannercore.PhysicalMaxOneRow{}, + &plannercore.PhysicalProjection{}, + &plannercore.PhysicalSelection{}, + &plannercore.PhysicalTableDual{}, + &plannercore.PhysicalWindow{}, + &plannercore.PhysicalShuffle{}, + &plannercore.PhysicalUnionAll{}, + } + executorBuilder := executor.NewMockExecutorBuilderForTest(se, is, nil, math.MaxUint64, false, "global") + + var opsNeedsCoveredMask uint64 = 1< t1.a) AS a from t as t1) t", + "select /*+ hash_agg() */ count(f) from t group by a", + "select /*+ stream_agg() */ count(f) from t group by a", + "select * from t order by a, f", + "select * from t order by a, f limit 1", + "select * from t limit 1", + "select (select t1.a from t t1 where t1.a > t2.a) as a from t t2;", + "select a + 1 from t", + "select count(*) a from t having a > 1", + "select * from t where a = 1.1", + "with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 0) select * from cte1", + "select /*+use_index_merge(t, c_d_e, f)*/ * from t where c < 1 or f > 2", + "select sum(f) over (partition by f) from t", + "select /*+ merge_join(t1)*/ * from t t1 join t t2 on t1.d = t2.d", + "select a from t union all select a from t", + } { + comment := Commentf("case:%v sql:%s", i, tc) + c.Assert(err, IsNil, comment) + stmt, err := s.ParseOneStmt(tc, "", "") + c.Assert(err, IsNil, comment) + + err = se.NewTxn(context.Background()) + c.Assert(err, IsNil, comment) + p, _, err := planner.Optimize(context.TODO(), se, stmt, is) + c.Assert(err, IsNil, comment) + // This for loop level traverses the plan tree to get which operators are covered. + for child := []plannercore.PhysicalPlan{p.(plannercore.PhysicalPlan)}; len(child) != 0; { + newChild := make([]plannercore.PhysicalPlan, 0, len(child)) + for _, ch := range child { + found := false + for k, t := range opsNeedsCovered { + if reflect.TypeOf(t) == reflect.TypeOf(ch) { + opsAlreadyCoveredMask |= 1 << k + found = true + break + } + } + c.Assert(found, IsTrue, Commentf("case: %v sql: %s operator %v is not registered in opsNeedsCoveredMask", i, tc, reflect.TypeOf(ch))) + switch x := ch.(type) { + case *plannercore.PhysicalCTE: + newChild = append(newChild, x.RecurPlan) + newChild = append(newChild, x.SeedPlan) + continue + case *plannercore.PhysicalShuffle: + newChild = append(newChild, x.DataSources...) + newChild = append(newChild, x.Tails...) + continue + } + newChild = append(newChild, ch.Children()...) + } + child = newChild + } + + e := executorBuilder.Build(p) + + func() { + defer func() { + r := recover() + buf := make([]byte, 4096) + stackSize := runtime.Stack(buf, false) + buf = buf[:stackSize] + c.Assert(r, IsNil, Commentf("case: %v\n sql: %s\n error stack: %v", i, tc, string(buf))) + }() + c.Assert(e.Close(), IsNil, comment) + }() + } + // The following code is used to make sure all the operators registered + // in opsNeedsCoveredMask are covered. + commentBuf := strings.Builder{} + if opsAlreadyCoveredMask != opsNeedsCoveredMask { + for i := range opsNeedsCovered { + if opsAlreadyCoveredMask&(1< Date: Thu, 16 Dec 2021 13:42:35 +0800 Subject: [PATCH 11/15] planner: fix wrong subquery's coercibility (#30750) --- .../r/collation_check_use_collation.result | 25 +++++++++++++++++++ .../t/collation_check_use_collation.test | 23 +++++++++++++++++ planner/core/expression_rewriter.go | 15 ++++++++--- 3 files changed, 59 insertions(+), 4 deletions(-) create mode 100644 cmd/explaintest/r/collation_check_use_collation.result create mode 100644 cmd/explaintest/t/collation_check_use_collation.test diff --git a/cmd/explaintest/r/collation_check_use_collation.result b/cmd/explaintest/r/collation_check_use_collation.result new file mode 100644 index 0000000000000..ffd787a4cef43 --- /dev/null +++ b/cmd/explaintest/r/collation_check_use_collation.result @@ -0,0 +1,25 @@ +create database collation_check_use_collation; +use collation_check_use_collation; +CREATE TABLE `t` ( +`a` char(10) DEFAULT NULL +); +CREATE TABLE `t1` ( +`a` char(10) COLLATE utf8mb4_general_ci DEFAULT NULL +); +insert into t values ("a"); +insert into t1 values ("A"); +select a as a_col from t where t.a = all (select a collate utf8mb4_general_ci from t1); +a_col +a +select a as a_col from t where t.a != any (select a collate utf8mb4_general_ci from t1); +a_col +select a as a_col from t where t.a <= all (select a collate utf8mb4_general_ci from t1); +a_col +a +select a as a_col from t where t.a <= any (select a collate utf8mb4_general_ci from t1); +a_col +a +select a as a_col from t where t.a = (select a collate utf8mb4_general_ci from t1); +a_col +a +use test diff --git a/cmd/explaintest/t/collation_check_use_collation.test b/cmd/explaintest/t/collation_check_use_collation.test new file mode 100644 index 0000000000000..67e75f32e38f9 --- /dev/null +++ b/cmd/explaintest/t/collation_check_use_collation.test @@ -0,0 +1,23 @@ +# These tests check that the used collation is correct. + +# prepare database +create database collation_check_use_collation; +use collation_check_use_collation; + +# Check subquery. +CREATE TABLE `t` ( + `a` char(10) DEFAULT NULL +); +CREATE TABLE `t1` ( + `a` char(10) COLLATE utf8mb4_general_ci DEFAULT NULL +); +insert into t values ("a"); +insert into t1 values ("A"); +select a as a_col from t where t.a = all (select a collate utf8mb4_general_ci from t1); +select a as a_col from t where t.a != any (select a collate utf8mb4_general_ci from t1); +select a as a_col from t where t.a <= all (select a collate utf8mb4_general_ci from t1); +select a as a_col from t where t.a <= any (select a collate utf8mb4_general_ci from t1); +select a as a_col from t where t.a = (select a collate utf8mb4_general_ci from t1); + +# cleanup environment +use test diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index 8f950b3d3ece5..775aeda6a880f 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -618,6 +618,7 @@ func (er *expressionRewriter) handleOtherComparableSubq(lexpr, rexpr expression. UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(), RetType: funcMaxOrMin.RetTp, } + colMaxOrMin.SetCoercibility(rexpr.Coercibility()) schema := expression.NewSchema(colMaxOrMin) plan4Agg.names = append(plan4Agg.names, types.EmptyName) @@ -735,6 +736,7 @@ func (er *expressionRewriter) handleNEAny(lexpr, rexpr expression.Expression, np UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(), RetType: maxFunc.RetTp, } + maxResultCol.SetCoercibility(rexpr.Coercibility()) count := &expression.Column{ UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(), RetType: countFunc.RetTp, @@ -772,6 +774,7 @@ func (er *expressionRewriter) handleEQAll(lexpr, rexpr expression.Expression, np UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(), RetType: firstRowFunc.RetTp, } + firstRowResultCol.SetCoercibility(rexpr.Coercibility()) plan4Agg.names = append(plan4Agg.names, types.EmptyName) count := &expression.Column{ UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(), @@ -1008,9 +1011,11 @@ func (er *expressionRewriter) handleScalarSubquery(ctx context.Context, v *ast.S if np.Schema().Len() > 1 { newCols := make([]expression.Expression, 0, np.Schema().Len()) for i, data := range row { - newCols = append(newCols, &expression.Constant{ + constant := &expression.Constant{ Value: data, - RetType: np.Schema().Columns[i].GetType()}) + RetType: np.Schema().Columns[i].GetType()} + constant.SetCoercibility(np.Schema().Columns[i].Coercibility()) + newCols = append(newCols, constant) } expr, err1 := er.newFunction(ast.RowFunc, newCols[0].GetType(), newCols...) if err1 != nil { @@ -1019,10 +1024,12 @@ func (er *expressionRewriter) handleScalarSubquery(ctx context.Context, v *ast.S } er.ctxStackAppend(expr, types.EmptyName) } else { - er.ctxStackAppend(&expression.Constant{ + constant := &expression.Constant{ Value: row[0], RetType: np.Schema().Columns[0].GetType(), - }, types.EmptyName) + } + constant.SetCoercibility(np.Schema().Columns[0].Coercibility()) + er.ctxStackAppend(constant, types.EmptyName) } return v, true } From 4069b106350f9b0748f6ea6ed47e8c2b202fd9b2 Mon Sep 17 00:00:00 2001 From: guo-shaoge Date: Thu, 16 Dec 2021 14:54:36 +0800 Subject: [PATCH 12/15] executor: add more testcases for index merge (#30497) --- cmd/explaintest/r/index_merge.result | 863 +++++++++++++++++++++++++++ cmd/explaintest/t/index_merge.test | 238 ++++++++ executor/index_merge_reader_test.go | 33 + 3 files changed, 1134 insertions(+) create mode 100644 cmd/explaintest/r/index_merge.result create mode 100644 cmd/explaintest/t/index_merge.test diff --git a/cmd/explaintest/r/index_merge.result b/cmd/explaintest/r/index_merge.result new file mode 100644 index 0000000000000..f790569635b28 --- /dev/null +++ b/cmd/explaintest/r/index_merge.result @@ -0,0 +1,863 @@ +///// SUBQUERY +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +drop table if exists t2; +create table t2(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t2 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +// IN +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 in (select c3 from t1) order by 1; +id estRows task access object operator info +Sort_8 4433.77 root test.t1.c1 +└─Projection_10 4433.77 root test.t1.c1, test.t1.c2, test.t1.c3 + └─Selection_11 4433.77 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), Column#9)) + └─HashJoin_12 5542.21 root CARTESIAN left outer semi join, other cond:eq(test.t1.c3, test.t1.c3) + ├─TableReader_18(Build) 10000.00 root data:TableFullScan_17 + │ └─TableFullScan_17 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─IndexMerge_16(Probe) 5542.21 root + ├─IndexRangeScan_13(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_14(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_15(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 in (select c3 from t1) order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +// NOT IN +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 not in (select c3 from t1) order by 1; +id estRows task access object operator info +Sort_8 4433.77 root test.t1.c1 +└─Projection_10 4433.77 root test.t1.c1, test.t1.c2, test.t1.c3 + └─Selection_11 4433.77 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), Column#9)) + └─HashJoin_12 5542.21 root CARTESIAN anti left outer semi join, other cond:eq(test.t1.c3, test.t1.c3) + ├─TableReader_18(Build) 10000.00 root data:TableFullScan_17 + │ └─TableFullScan_17 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─IndexMerge_16(Probe) 5542.21 root + ├─IndexRangeScan_13(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_14(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_15(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 not in (select c3 from t1) order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +// MAX +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = (select max(c3) from t1) order by 1; +id estRows task access object operator info +Sort_33 3325.55 root test.t1.c1 +└─IndexMerge_40 1843.09 root + ├─IndexRangeScan_36(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_37(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_39(Probe) 1843.09 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), eq(test.t1.c3, 5))) + └─TableRowIDScan_38 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = (select max(c3) from t1) order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +// EXISTS +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and EXISTS(select 1 from t2 where t2.c1 = t1.c1) order by 1; +id estRows task access object operator info +Sort_9 4433.77 root test.t1.c1 +└─Projection_11 4433.77 root test.t1.c1, test.t1.c2, test.t1.c3 + └─Selection_12 4433.77 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), Column#10)) + └─HashJoin_22 5542.21 root left outer semi join, equal:[eq(test.t1.c1, test.t2.c1)] + ├─IndexReader_30(Build) 10000.00 root index:IndexFullScan_29 + │ └─IndexFullScan_29 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo + └─IndexMerge_26(Probe) 5542.21 root + ├─IndexRangeScan_23(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_24(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_25(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and EXISTS(select 1 from t2 where t2.c1 = t1.c1) order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +// EXISTS +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and NOT EXISTS(select 1 from t2 where t2.c1 = t1.c1) order by 1; +id estRows task access object operator info +Sort_9 4433.77 root test.t1.c1 +└─Projection_11 4433.77 root test.t1.c1, test.t1.c2, test.t1.c3 + └─Selection_12 4433.77 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), Column#10)) + └─HashJoin_22 5542.21 root anti left outer semi join, equal:[eq(test.t1.c1, test.t2.c1)] + ├─IndexReader_30(Build) 10000.00 root index:IndexFullScan_29 + │ └─IndexFullScan_29 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo + └─IndexMerge_26(Probe) 5542.21 root + ├─IndexRangeScan_23(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_24(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_25(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and NOT EXISTS(select 1 from t2 where t2.c1 = t1.c1) order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +// Non-Correlated +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = (select count(1) from t2) order by 1; +id estRows task access object operator info +Sort_38 3325.55 root test.t1.c1 +└─IndexMerge_45 1843.09 root + ├─IndexRangeScan_41(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_42(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_44(Probe) 1843.09 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), eq(test.t1.c3, 5))) + └─TableRowIDScan_43 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = (select count(1) from t2) order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +// ANY +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > ANY(select count(1) from t2) order by 1; +id estRows task access object operator info +Sort_11 5098.44 root test.t1.c1 +└─HashJoin_15 5098.44 root CARTESIAN inner join, other cond:or(lt(test.t1.c1, 10), and(and(lt(test.t1.c2, 10), or(gt(test.t1.c3, Column#10), if(ne(Column#11, 0), NULL, 0))), and(ne(Column#12, 0), if(isnull(test.t1.c3), NULL, 1)))) + ├─StreamAgg_23(Build) 1.00 root funcs:min(Column#9)->Column#10, funcs:sum(0)->Column#11, funcs:count(1)->Column#12 + │ └─StreamAgg_43 1.00 root funcs:count(Column#25)->Column#9 + │ └─IndexReader_44 1.00 root index:StreamAgg_27 + │ └─StreamAgg_27 1.00 cop[tikv] funcs:count(1)->Column#25 + │ └─IndexFullScan_41 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo + └─IndexMerge_21(Probe) 2825.66 root + ├─IndexRangeScan_17(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_18(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_20(Probe) 2825.66 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), if(isnull(test.t1.c3), NULL, 1))) + └─TableRowIDScan_19 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > ANY(select count(1) from t2) order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +// SOME +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > SOME(select count(1) from t2) order by 1; +id estRows task access object operator info +Sort_11 5098.44 root test.t1.c1 +└─HashJoin_15 5098.44 root CARTESIAN inner join, other cond:or(lt(test.t1.c1, 10), and(and(lt(test.t1.c2, 10), or(gt(test.t1.c3, Column#10), if(ne(Column#11, 0), NULL, 0))), and(ne(Column#12, 0), if(isnull(test.t1.c3), NULL, 1)))) + ├─StreamAgg_23(Build) 1.00 root funcs:min(Column#9)->Column#10, funcs:sum(0)->Column#11, funcs:count(1)->Column#12 + │ └─StreamAgg_43 1.00 root funcs:count(Column#25)->Column#9 + │ └─IndexReader_44 1.00 root index:StreamAgg_27 + │ └─StreamAgg_27 1.00 cop[tikv] funcs:count(1)->Column#25 + │ └─IndexFullScan_41 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo + └─IndexMerge_21(Probe) 2825.66 root + ├─IndexRangeScan_17(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_18(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_20(Probe) 2825.66 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), if(isnull(test.t1.c3), NULL, 1))) + └─TableRowIDScan_19 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > SOME(select count(1) from t2) order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +// ALL +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > ALL(select count(1) from t2) order by 1; +id estRows task access object operator info +Sort_11 5542.21 root test.t1.c1 +└─HashJoin_15 5542.21 root CARTESIAN inner join, other cond:or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), or(and(gt(test.t1.c3, Column#10), if(ne(Column#11, 0), NULL, 1)), or(eq(Column#12, 0), if(isnull(test.t1.c3), NULL, 0))))) + ├─StreamAgg_22(Build) 1.00 root funcs:max(Column#9)->Column#10, funcs:sum(0)->Column#11, funcs:count(1)->Column#12 + │ └─StreamAgg_42 1.00 root funcs:count(Column#25)->Column#9 + │ └─IndexReader_43 1.00 root index:StreamAgg_26 + │ └─StreamAgg_26 1.00 cop[tikv] funcs:count(1)->Column#25 + │ └─IndexFullScan_40 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo + └─IndexMerge_20(Probe) 5542.21 root + ├─IndexRangeScan_17(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_18(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_19(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > ALL(select count(1) from t2) order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +// SELECT FIELD +explain select /*+ use_index_merge(t1) */ c1, (select sum(c2) from t2) from t1 where c1 < 10 or c2 < 10 and c3 > ALL(select count(1) from t2) order by 1; +id estRows task access object operator info +Sort_39 5542.21 root test.t1.c1 +└─Projection_41 5542.21 root test.t1.c1, 15->Column#25 + └─HashJoin_43 5542.21 root CARTESIAN inner join, other cond:or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), or(and(gt(test.t1.c3, Column#14), if(ne(Column#15, 0), NULL, 1)), or(eq(Column#16, 0), if(isnull(test.t1.c3), NULL, 0))))) + ├─StreamAgg_50(Build) 1.00 root funcs:max(Column#13)->Column#14, funcs:sum(0)->Column#15, funcs:count(1)->Column#16 + │ └─StreamAgg_70 1.00 root funcs:count(Column#38)->Column#13 + │ └─IndexReader_71 1.00 root index:StreamAgg_54 + │ └─StreamAgg_54 1.00 cop[tikv] funcs:count(1)->Column#38 + │ └─IndexFullScan_68 10000.00 cop[tikv] table:t2, index:c1(c1) keep order:false, stats:pseudo + └─IndexMerge_48(Probe) 5542.21 root + ├─IndexRangeScan_45(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_46(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_47(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ c1, (select sum(c2) from t2) from t1 where c1 < 10 or c2 < 10 and c3 > ALL(select count(1) from t2) order by 1; +c1 (select sum(c2) from t2) +1 15 +2 15 +3 15 +4 15 +5 15 +// MULTIPLE LEVEL +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 IN (select c1 from t2 where c2 in (select c3 from t2)) order by 1; +id estRows task access object operator info +Sort_14 4433.77 root test.t1.c1 +└─Projection_16 4433.77 root test.t1.c1, test.t1.c2, test.t1.c3 + └─Selection_17 4433.77 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), Column#13)) + └─HashJoin_18 5542.21 root CARTESIAN left outer semi join, other cond:eq(test.t1.c3, test.t2.c1) + ├─HashJoin_37(Build) 9990.00 root inner join, equal:[eq(test.t2.c2, test.t2.c3)] + │ ├─HashAgg_41(Build) 7992.00 root group by:test.t2.c3, funcs:firstrow(test.t2.c3)->test.t2.c3 + │ │ └─TableReader_48 9990.00 root data:Selection_47 + │ │ └─Selection_47 9990.00 cop[tikv] not(isnull(test.t2.c3)) + │ │ └─TableFullScan_46 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + │ └─TableReader_51(Probe) 9990.00 root data:Selection_50 + │ └─Selection_50 9990.00 cop[tikv] not(isnull(test.t2.c2)) + │ └─TableFullScan_49 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─IndexMerge_22(Probe) 5542.21 root + ├─IndexRangeScan_19(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_20(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_21(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 IN (select c1 from t2 where c2 in (select c3 from t2)) order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +///// Generated Column +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int as (c1 + c2), key(c1), key(c2)); +insert into t1(c1, c2) values(1, 1), (2, 2), (3, 3), (4, 4), (5, 5); +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +id estRows task access object operator info +Sort_5 4060.74 root test.t1.c1 +└─IndexMerge_12 2250.55 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 2250.55 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +c1 c2 c3 +1 1 2 +2 2 4 +3 3 6 +4 4 8 +5 5 10 +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = c1 + c2 order by 1; +id estRows task access object operator info +Sort_5 5098.44 root test.t1.c1 +└─IndexMerge_12 2825.66 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 2825.66 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), eq(test.t1.c3, plus(test.t1.c1, test.t1.c2)))) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = c1 + c2 order by 1; +c1 c2 c3 +1 1 2 +2 2 4 +3 3 6 +4 4 8 +5 5 10 +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and substring(c3, c2) order by 1; +id estRows task access object operator info +Sort_5 5098.44 root test.t1.c1 +└─IndexMerge_12 2825.66 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 2825.66 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), istrue_with_null(cast(substring(cast(test.t1.c3, var_string(20)), test.t1.c2), double BINARY)))) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and substring(c3, c2) order by 1; +c1 c2 c3 +1 1 2 +2 2 4 +3 3 6 +4 4 8 +5 5 10 +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 order by 1; +id estRows task access object operator info +Sort_5 4800.37 root test.t1.c1 +└─IndexMerge_12 2660.47 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 2660.47 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), test.t1.c3)) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 order by 1; +c1 c2 c3 +1 1 2 +2 2 4 +3 3 6 +4 4 8 +5 5 10 +///// SQL Binding +create global binding for +select * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1 +using +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +explain select * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +id estRows task access object operator info +Sort_5 4060.74 root test.t1.c1 +└─IndexMerge_12 2250.55 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 2250.55 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +c1 c2 c3 +1 1 2 +2 2 4 +3 3 6 +4 4 8 +5 5 10 +///// CREATE TABLE/VIEW +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +drop view if exists v2; +create view v2 as select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10; +show create view v2; +View Create View character_set_client collation_connection +v2 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`%` SQL SECURITY DEFINER VIEW `v2` (`c1`, `c2`, `c3`) AS SELECT /*+ USE_INDEX_MERGE(`t1` )*/ `test`.`t1`.`c1` AS `c1`,`test`.`t1`.`c2` AS `c2`,`test`.`t1`.`c3` AS `c3` FROM `test`.`t1` WHERE `c1`<10 OR `c2`<10 AND `c3`<10 utf8mb4 utf8mb4_general_ci +select * from v2 order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +///// DROP/ALTER INDEX +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +id estRows task access object operator info +Sort_5 4060.74 root test.t1.c1 +└─IndexMerge_12 2250.55 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 2250.55 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +drop index c1 on t1; +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +id estRows task access object operator info +Sort_5 4060.74 root test.t1.c1 +└─TableReader_10 4060.74 root data:Selection_9 + └─Selection_9 4060.74 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─TableFullScan_8 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +alter table t1 add index c1(c1); +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +id estRows task access object operator info +Sort_5 4060.74 root test.t1.c1 +└─IndexMerge_12 2250.55 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 2250.55 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +///// DELETE +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +explain delete from t1 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10) order by 1; +id estRows task access object operator info +Delete_10 N/A root N/A +└─Sort_14 4056.68 root test.t1.c1 + └─HashJoin_31 4056.68 root inner join, equal:[eq(test.t1.c1, test.t1.c1)] + ├─HashAgg_34(Build) 3245.34 root group by:test.t1.c1, funcs:firstrow(test.t1.c1)->test.t1.c1 + │ └─IndexMerge_39 2248.30 root + │ ├─IndexRangeScan_35(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + │ ├─IndexRangeScan_36(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + │ └─Selection_38(Probe) 2248.30 cop[tikv] not(isnull(test.t1.c1)), or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + │ └─TableRowIDScan_37 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo + └─TableReader_42(Probe) 9990.00 root data:Selection_41 + └─Selection_41 9990.00 cop[tikv] not(isnull(test.t1.c1)) + └─TableFullScan_40 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +delete from t1 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10) order by 1; +select * from t1; +c1 c2 c3 +///// UPDATE +explain update t1 set c1 = 100, c2 = 100, c3 = 100 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10); +id estRows task access object operator info +Update_9 N/A root N/A +└─HashJoin_28 4056.68 root inner join, equal:[eq(test.t1.c1, test.t1.c1)] + ├─HashAgg_31(Build) 3245.34 root group by:test.t1.c1, funcs:firstrow(test.t1.c1)->test.t1.c1 + │ └─IndexMerge_36 2248.30 root + │ ├─IndexRangeScan_32(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + │ ├─IndexRangeScan_33(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + │ └─Selection_35(Probe) 2248.30 cop[tikv] not(isnull(test.t1.c1)), or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + │ └─TableRowIDScan_34 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo + └─TableReader_39(Probe) 9990.00 root data:Selection_38 + └─Selection_38 9990.00 cop[tikv] not(isnull(test.t1.c1)) + └─TableFullScan_37 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +update t1 set c1 = 100, c2 = 100, c3 = 100 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10); +select * from t1; +c1 c2 c3 +///// FOR UPDATE +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1 for update; +id estRows task access object operator info +Sort_6 4060.74 root test.t1.c1 +└─Projection_8 4060.74 root test.t1.c1, test.t1.c2, test.t1.c3 + └─SelectLock_9 4060.74 root for update 0 + └─IndexMerge_14 2250.55 root + ├─IndexRangeScan_10(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_11(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_13(Probe) 2250.55 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─TableRowIDScan_12 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1 for update; +c1 c2 c3 +///// TEMPORARY Table. Not support for now. +drop table if exists t1; +create temporary table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +id estRows task access object operator info +Sort_6 4060.74 root test.t1.c1 +└─Projection_8 4060.74 root test.t1.c1, test.t1.c2, test.t1.c3 + └─UnionScan_9 4060.74 root or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─TableReader_12 4060.74 root data:Selection_11 + └─Selection_11 4060.74 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─TableFullScan_10 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +///// MEMORY Table +explain select count(c1) from (select /*+ use_index_merge(t_alias), stream_agg() */ count(1) c1 from information_schema.statements_summary where sum_latency >= 0 or max_latency >= 0 order by 1) dt; +id estRows task access object operator info +StreamAgg_10 1.00 root funcs:count(Column#92)->Column#93 +└─Sort_11 1.00 root Column#92 + └─StreamAgg_14 1.00 root funcs:count(1)->Column#92 + └─MemTableScan_18 10000.00 root table:STATEMENTS_SUMMARY +show warnings; +Level Code Message +select count(c1) from (select /*+ use_index_merge(t_alias), stream_agg() */ count(1) c1 from information_schema.statements_summary where sum_latency >= 0 or max_latency >= 0 order by 1) dt; +count(c1) +1 +///// Limit +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and c3 < 10 order by 1 limit 1 offset 2; +id estRows task access object operator info +TopN_10 1.00 root test.t1.c1, offset:2, count:1 +└─IndexMerge_19 1841.86 root + ├─IndexRangeScan_15(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_16(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_18(Probe) 1841.86 cop[tikv] lt(test.t1.c3, 10) + └─TableRowIDScan_17 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and c3 < 10 order by 1 limit 1 offset 2; +c1 c2 c3 +3 3 3 +///// GROUP BY +explain select /*+ use_index_merge(t1) */ sum(c1) from t1 where (c1 < 10 or c2 < 10) and c3 < 10 group by c1 order by 1; +id estRows task access object operator info +Sort_6 1473.49 root Column#5 +└─HashAgg_11 1473.49 root group by:Column#10, funcs:sum(Column#9)->Column#5 + └─Projection_18 1841.86 root cast(test.t1.c1, decimal(32,0) BINARY)->Column#9, test.t1.c1 + └─IndexMerge_16 1841.86 root + ├─IndexRangeScan_12(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_13(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_15(Probe) 1841.86 cop[tikv] lt(test.t1.c3, 10) + └─TableRowIDScan_14 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ sum(c1) from t1 where (c1 < 10 or c2 < 10) and c3 < 10 group by c1 order by 1; +sum(c1) +1 +2 +3 +4 +5 +///// Apply +drop table if exists t2; +create table t2(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t2 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +explain select /*+ use_index_merge(t1) */ * from t1 where t1.c1 = (select avg(t2.c1) from t2 where t1.c1 = t2.c1 group by t2.c1) and (c1 < 10 or c2 < -1) and c3 < 10 order by 1; +id estRows task access object operator info +Sort_12 1841.86 root test.t1.c1 +└─Projection_14 1841.86 root test.t1.c1, test.t1.c2, test.t1.c3 + └─Apply_16 1841.86 root inner join, equal:[eq(Column#10, Column#9)] + ├─Projection_17(Build) 1841.86 root test.t1.c1, test.t1.c2, test.t1.c3, cast(test.t1.c1, decimal(20,0) BINARY)->Column#10 + │ └─IndexMerge_22 1841.86 root + │ ├─IndexRangeScan_18(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + │ ├─IndexRangeScan_19(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,-1), keep order:false, stats:pseudo + │ └─Selection_21(Probe) 1841.86 cop[tikv] lt(test.t1.c3, 10) + │ └─TableRowIDScan_20 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo + └─MaxOneRow_23(Probe) 1.00 root + └─StreamAgg_35 2.00 root group by:test.t2.c1, funcs:avg(Column#17, Column#18)->Column#9 + └─IndexReader_36 2.00 root index:StreamAgg_27 + └─StreamAgg_27 2.00 cop[tikv] group by:test.t2.c1, funcs:count(test.t2.c1)->Column#17, funcs:sum(test.t2.c1)->Column#18 + └─IndexRangeScan_34 2.50 cop[tikv] table:t2, index:c1(c1) range: decided by [eq(test.t1.c1, test.t2.c1)], keep order:true, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where t1.c1 = (select avg(t2.c1) from t2 where t1.c1 = t2.c1 group by t2.c1) and (c1 < 10 or c2 < -1) and c3 < 10 order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +explain select /*+ use_index_merge(t1) */ * from t1 where t1.c1 = (select /*+ use_index_merge(t2) */ avg(t2.c1) from t2 where t1.c1 = t2.c1 and t2.c1 < 10 or t2.c2 < 10 group by t2.c1 order by c1 limit 1 offset 2) and (c1 < 10 or c2 < -1) and c3 < 10 order by 1; +id estRows task access object operator info +Sort_16 1841.86 root test.t1.c1 +└─Projection_18 1841.86 root test.t1.c1, test.t1.c2, test.t1.c3 + └─Apply_20 1841.86 root inner join, equal:[eq(Column#11, Column#9)] + ├─Projection_21(Build) 1841.86 root test.t1.c1, test.t1.c2, test.t1.c3, cast(test.t1.c1, decimal(20,0) BINARY)->Column#11 + │ └─IndexMerge_26 1841.86 root + │ ├─IndexRangeScan_22(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + │ ├─IndexRangeScan_23(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,-1), keep order:false, stats:pseudo + │ └─Selection_25(Probe) 1841.86 cop[tikv] lt(test.t1.c3, 10) + │ └─TableRowIDScan_24 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo + └─TopN_29(Probe) 1.00 root test.t2.c1, offset:2, count:1 + └─HashAgg_36 2660.44 root group by:Column#21, funcs:avg(Column#19)->Column#9, funcs:firstrow(Column#20)->test.t2.c1 + └─Projection_48 3325.55 root cast(test.t2.c1, decimal(15,4) BINARY)->Column#19, test.t2.c1, test.t2.c1 + └─IndexMerge_41 3325.55 root + ├─Selection_38(Build) 3.32 cop[tikv] eq(test.t1.c1, test.t2.c1) + │ └─IndexRangeScan_37 3323.33 cop[tikv] table:t2, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_39(Build) 3323.33 cop[tikv] table:t2, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_40(Probe) 3325.55 cop[tikv] table:t2 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where t1.c1 = (select /*+ use_index_merge(t2) */ avg(t2.c1) from t2 where t1.c1 = t2.c1 and t2.c1 < 10 or t2.c2 < 10 group by t2.c1 order by c1 limit 1 offset 2) and (c1 < 10 or c2 < -1) and c3 < 10 order by 1; +c1 c2 c3 +3 3 3 +///// Nested filters +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, c4 int, c5 int, key(c1), key(c2), key(c3), key(c4)); +insert into t1 values(1, 1, 1, 1, 1), (2, 2, 2, 2, 2), (3, 3, 3, 3, 3), (4, 4, 4, 4, 4), (5, 5, 5, 5, 5); +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and (c3 < 10 or c4 < 10) order by 1; +id estRows task access object operator info +Sort_5 3071.61 root test.t1.c1 +└─IndexMerge_12 3071.61 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 3071.61 cop[tikv] or(lt(test.t1.c3, 10), lt(test.t1.c4, 10)) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and (c3 < 10 or c4 < 10) order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 and c2 < 10) or (c3 < 10 and c4 < 10) order by 1; +id estRows task access object operator info +Sort_5 2086.93 root test.t1.c1 +└─IndexMerge_12 1156.62 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c3(c3) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 1156.62 cop[tikv] or(and(lt(test.t1.c1, 10), lt(test.t1.c2, 10)), and(lt(test.t1.c3, 10), lt(test.t1.c4, 10))) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 and c2 < 10) or (c3 < 10 and c4 < 10) order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 and c2 < 10) or (c3 < 10 and c4 < 10) and c5 < 10 order by 1; +id estRows task access object operator info +Sort_5 1430.96 root test.t1.c1 +└─IndexMerge_12 793.07 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c3(c3) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 793.07 cop[tikv] or(and(lt(test.t1.c1, 10), lt(test.t1.c2, 10)), and(lt(test.t1.c3, 10), and(lt(test.t1.c4, 10), lt(test.t1.c5, 10)))) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 and c2 < 10) or (c3 < 10 and c4 < 10) and c4 < 10 order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +explain select /*+ use_index_merge(t1) */ * from t1 where ((c1 < 10 and c4 < 10) or c2 < 10) and (c3 < 10 or c5 < 10) order by 1; +id estRows task access object operator info +Sort_5 2250.55 root test.t1.c1 +└─IndexMerge_12 1247.30 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 1247.30 cop[tikv] or(and(lt(test.t1.c1, 10), lt(test.t1.c4, 10)), lt(test.t1.c2, 10)), or(lt(test.t1.c3, 10), lt(test.t1.c5, 10)) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where ((c1 < 10 and c4 < 10) or c2 < 10) and (c3 < 10 or c4 < 10) order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +explain select /*+ use_index_merge(t1) */ * from t1 where (((c1 < 10 or c3 < 10) and (c1 < 10 or c4 < 10)) or c2 < 10) and (c3 < 10 or c5 < 10) order by 1; +id estRows task access object operator info +Sort_5 2978.47 root test.t1.c1 +└─TableReader_10 2978.47 root data:Selection_9 + └─Selection_9 2978.47 cop[tikv] or(and(or(lt(test.t1.c1, 10), lt(test.t1.c3, 10)), or(lt(test.t1.c1, 10), lt(test.t1.c4, 10))), lt(test.t1.c2, 10)), or(lt(test.t1.c3, 10), lt(test.t1.c5, 10)) + └─TableFullScan_8 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +show warnings; +Level Code Message +select /*+ use_index_merge(t1) */ * from t1 where (((c1 < 10 or c3 < 10) and (c1 < 10 or c4 < 10)) or c2 < 10) and (c3 < 10 or c5 < 10) order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +explain select /*+ use_index_merge(t1) */ * from t1 where (((c1 < 10 or c3 < 10) and c1 < 10) or c2 < 10) and (c3 < 10 or c5 < 10) order by 1; +id estRows task access object operator info +Sort_5 2523.42 root test.t1.c1 +└─IndexMerge_12 1398.53 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 1398.53 cop[tikv] or(and(or(lt(test.t1.c1, 10), lt(test.t1.c3, 10)), lt(test.t1.c1, 10)), lt(test.t1.c2, 10)), or(lt(test.t1.c3, 10), lt(test.t1.c5, 10)) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (((c1 < 10 or c3 < 10) and c1 < 10) or c2 < 10) and (c3 < 10 or c5 < 10) order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +///// All kinds of expressions +// common functions +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and coalesce(c1, c2, c4) = 1 order by 1; +id estRows task access object operator info +Sort_5 4433.77 root test.t1.c1 +└─IndexMerge_12 4433.77 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 4433.77 cop[tikv] eq(coalesce(test.t1.c1, test.t1.c2, test.t1.c4), 1) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and coalesce(c1, c2, c4) = 1 order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and greatest(c1, c2, c4) = 1 order by 1; +id estRows task access object operator info +Sort_5 4433.77 root test.t1.c1 +└─Selection_8 4433.77 root eq(greatest(test.t1.c1, test.t1.c2, test.t1.c4), 1) + └─IndexMerge_12 5542.21 root + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_10(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_11(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and greatest(c1, c2, c4) = 1 order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +// math functions +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and abs(c1) = 1 order by 1; +id estRows task access object operator info +Sort_5 4433.77 root test.t1.c1 +└─IndexMerge_12 4433.77 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 4433.77 cop[tikv] eq(abs(test.t1.c1), 1) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and abs(c1) = 1 order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and pi() order by 1; +id estRows task access object operator info +Sort_5 5542.21 root test.t1.c1 +└─IndexMerge_11 5542.21 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and pi() order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and ceil(c1) order by 1; +id estRows task access object operator info +Sort_5 4433.77 root test.t1.c1 +└─IndexMerge_12 4433.77 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 4433.77 cop[tikv] ceil(test.t1.c1) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and ceil(c1) order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and truncate(c1, 1) = 1 order by 1; +id estRows task access object operator info +Sort_5 4433.77 root test.t1.c1 +└─Selection_8 4433.77 root eq(truncate(test.t1.c1, 1), 1) + └─IndexMerge_12 5542.21 root + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_10(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_11(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and truncate(c1, 1) = 1 order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and sqrt(-1) order by 1; +id estRows task access object operator info +TableDual_11 0.00 root rows:0 +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and sqrt(-1) order by 1; +c1 c2 c3 c4 c5 +// string functions +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and substring(c3, 1, 1) = '1' order by 1; +id estRows task access object operator info +Sort_5 4433.77 root test.t1.c1 +└─IndexMerge_12 4433.77 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 4433.77 cop[tikv] eq(substring(cast(test.t1.c3, var_string(20)), 1, 1), "1") + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and substring(c3, 1, 1) = '1' order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +// control functions +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and ifnull(c1, c2) order by 1; +id estRows task access object operator info +Sort_5 4433.77 root test.t1.c1 +└─IndexMerge_12 4433.77 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 4433.77 cop[tikv] ifnull(test.t1.c1, test.t1.c2) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and ifnull(c1, c2) order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and if(c1, c2, c3) order by 1; +id estRows task access object operator info +Sort_5 4433.77 root test.t1.c1 +└─IndexMerge_12 4433.77 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 4433.77 cop[tikv] if(test.t1.c1, test.t1.c2, test.t1.c3) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and if(c1, c2, c3) order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and (c1 between 1 and 2) order by 1; +id estRows task access object operator info +Sort_5 138.56 root test.t1.c1 +└─IndexMerge_12 138.56 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 138.56 cop[tikv] ge(test.t1.c1, 1), le(test.t1.c1, 2) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and (c1 between 1 and 2) order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +// mixed usage +set @a = 1; +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and length(substring(sqrt(c3), @a, 1)) = char_length(if(c1, c2, c3)) order by 1; +id estRows task access object operator info +Sort_5 4433.77 root test.t1.c1 +└─Selection_8 4433.77 root eq(length(substring(cast(sqrt(cast(test.t1.c3, double BINARY)), var_string(5)), getvar("a"), 1)), char_length(cast(if(test.t1.c1, test.t1.c2, test.t1.c3), var_string(20)))) + └─IndexMerge_12 5542.21 root + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_10(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─TableRowIDScan_11(Probe) 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and length(substring(sqrt(c3), @a, 1)) = char_length(if(c1, c2, c3)) order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +///// CTE +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, c4 int, c5 int, key(c1), key(c2), key(c3), key(c4)); +insert into t1 values(1, 1, 1, 1, 1), (2, 2, 2, 2, 2), (3, 3, 3, 3, 3), (4, 4, 4, 4, 4), (5, 5, 5, 5, 5); +explain with cte1 as (select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10) select * from cte1 order by 1; +id estRows task access object operator info +Sort_13 2250.55 root test.t1.c1 +└─CTEFullScan_16 2250.55 root CTE:cte1 data:CTE_0 +CTE_0 2250.55 root Non-Recursive CTE +└─IndexMerge_12(Seed Part) 2250.55 root + ├─IndexRangeScan_8(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo + └─Selection_11(Probe) 2250.55 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) + └─TableRowIDScan_10 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +with cte1 as (select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10) select * from cte1 order by 1; +c1 c2 c3 c4 c5 +1 1 1 1 1 +2 2 2 2 2 +3 3 3 3 3 +4 4 4 4 4 +5 5 5 5 5 +explain with recursive cte1 as (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10 UNION ALL select c1 + 100 from cte1 where c1 < 10) select * from cte1 order by 1; +id estRows task access object operator info +Sort_23 7309.33 root test.t1.c1 +└─CTEFullScan_26 7309.33 root CTE:cte1 data:CTE_0 +CTE_0 7309.33 root Recursive CTE +├─Projection_14(Seed Part) 4060.74 root test.t1.c1 +│ └─IndexMerge_19 2250.55 root +│ ├─IndexRangeScan_15(Build) 3323.33 cop[tikv] table:t1, index:c1(c1) range:[-inf,10), keep order:false, stats:pseudo +│ ├─IndexRangeScan_16(Build) 3323.33 cop[tikv] table:t1, index:c2(c2) range:[-inf,10), keep order:false, stats:pseudo +│ └─Selection_18(Probe) 2250.55 cop[tikv] or(lt(test.t1.c1, 10), and(lt(test.t1.c2, 10), lt(test.t1.c3, 10))) +│ └─TableRowIDScan_17 5542.21 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Projection_20(Recursive Part) 3248.59 root cast(plus(test.t1.c1, 100), int(11))->test.t1.c1 + └─Selection_21 3248.59 root lt(test.t1.c1, 10) + └─CTETable_22 4060.74 root Scan on CTE_0 +with recursive cte1 as (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10 UNION ALL select c1 + 100 from cte1 where c1 < 10) select * from cte1 order by 1; +c1 +1 +2 +3 +4 +5 +101 +102 +103 +104 +105 +explain with recursive cte1 as (select 1 c1, 1 c2, 1 c3 UNION ALL select /*+ use_index_merge(t_alias) */ c1 + 1, c2 + 1, c3 + 1 from cte1 t_alias where c1 < 10 or c2 < 10 and c3 < 10) select * from cte1 order by 1; +id estRows task access object operator info +Sort_17 1.80 root Column#16 +└─CTEFullScan_20 1.80 root CTE:cte1 data:CTE_0 +CTE_0 1.80 root Recursive CTE +├─Projection_12(Seed Part) 1.00 root 1->Column#4, 1->Column#5, 1->Column#6 +│ └─TableDual_13 1.00 root rows:1 +└─Projection_14(Recursive Part) 0.80 root cast(plus(Column#7, 1), bigint(1) BINARY)->Column#13, cast(plus(Column#8, 1), bigint(1) BINARY)->Column#14, cast(plus(Column#9, 1), bigint(1) BINARY)->Column#15 + └─Selection_15 0.80 root or(lt(Column#7, 10), and(lt(Column#8, 10), lt(Column#9, 10))) + └─CTETable_16 1.00 root Scan on CTE_0 +show warnings; +Level Code Message +with recursive cte1 as (select 1 c1, 1 c2, 1 c3 UNION ALL select /*+ use_index_merge(t_alias) */ c1 + 1, c2 + 1, c3 + 1 from cte1 t_alias where c1 < 10 or c2 < 10 and c3 < 10) select * from cte1 order by 1; +c1 c2 c3 +1 1 1 +2 2 2 +3 3 3 +4 4 4 +5 5 5 +6 6 6 +7 7 7 +8 8 8 +9 9 9 +10 10 10 diff --git a/cmd/explaintest/t/index_merge.test b/cmd/explaintest/t/index_merge.test new file mode 100644 index 0000000000000..e89af1b613c27 --- /dev/null +++ b/cmd/explaintest/t/index_merge.test @@ -0,0 +1,238 @@ +--echo ///// SUBQUERY +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +drop table if exists t2; +create table t2(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t2 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); + +--echo // IN +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 in (select c3 from t1) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 in (select c3 from t1) order by 1; + +--echo // NOT IN +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 not in (select c3 from t1) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 not in (select c3 from t1) order by 1; + +--echo // MAX +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = (select max(c3) from t1) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = (select max(c3) from t1) order by 1; + +--echo // EXISTS +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and EXISTS(select 1 from t2 where t2.c1 = t1.c1) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and EXISTS(select 1 from t2 where t2.c1 = t1.c1) order by 1; + +--echo // EXISTS +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and NOT EXISTS(select 1 from t2 where t2.c1 = t1.c1) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and NOT EXISTS(select 1 from t2 where t2.c1 = t1.c1) order by 1; + +--echo // Non-Correlated +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = (select count(1) from t2) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = (select count(1) from t2) order by 1; + +--echo // ANY +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > ANY(select count(1) from t2) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > ANY(select count(1) from t2) order by 1; + +--echo // SOME +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > SOME(select count(1) from t2) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > SOME(select count(1) from t2) order by 1; + +--echo // ALL +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > ALL(select count(1) from t2) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 > ALL(select count(1) from t2) order by 1; + +--echo // SELECT FIELD +explain select /*+ use_index_merge(t1) */ c1, (select sum(c2) from t2) from t1 where c1 < 10 or c2 < 10 and c3 > ALL(select count(1) from t2) order by 1; +select /*+ use_index_merge(t1) */ c1, (select sum(c2) from t2) from t1 where c1 < 10 or c2 < 10 and c3 > ALL(select count(1) from t2) order by 1; + +--echo // MULTIPLE LEVEL +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 IN (select c1 from t2 where c2 in (select c3 from t2)) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 IN (select c1 from t2 where c2 in (select c3 from t2)) order by 1; + +--echo ///// Generated Column +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int as (c1 + c2), key(c1), key(c2)); +insert into t1(c1, c2) values(1, 1), (2, 2), (3, 3), (4, 4), (5, 5); + +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = c1 + c2 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 = c1 + c2 order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and substring(c3, c2) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and substring(c3, c2) order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 order by 1; + +--echo ///// SQL Binding +create global binding for + select * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1 +using + select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +explain select * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +select * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; + +--echo ///// CREATE TABLE/VIEW +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); + +drop view if exists v2; +create view v2 as select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10; +show create view v2; +select * from v2 order by 1; + +--echo ///// DROP/ALTER INDEX +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); + +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; + +drop index c1 on t1; + +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; + +alter table t1 add index c1(c1); + +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; + +--echo ///// DELETE +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +explain delete from t1 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10) order by 1; +delete from t1 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10) order by 1; +select * from t1; + +--echo ///// UPDATE +explain update t1 set c1 = 100, c2 = 100, c3 = 100 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10); +update t1 set c1 = 100, c2 = 100, c3 = 100 where c1 in (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10); +select * from t1; + +--echo ///// FOR UPDATE +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1 for update; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1 for update; + +--echo ///// TEMPORARY Table. Not support for now. +drop table if exists t1; +create temporary table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); +explain select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10 order by 1; + +--echo ///// MEMORY Table +explain select count(c1) from (select /*+ use_index_merge(t_alias), stream_agg() */ count(1) c1 from information_schema.statements_summary where sum_latency >= 0 or max_latency >= 0 order by 1) dt; +show warnings; +select count(c1) from (select /*+ use_index_merge(t_alias), stream_agg() */ count(1) c1 from information_schema.statements_summary where sum_latency >= 0 or max_latency >= 0 order by 1) dt; + +--echo ///// Limit +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t1 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and c3 < 10 order by 1 limit 1 offset 2; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and c3 < 10 order by 1 limit 1 offset 2; + +--echo ///// GROUP BY +explain select /*+ use_index_merge(t1) */ sum(c1) from t1 where (c1 < 10 or c2 < 10) and c3 < 10 group by c1 order by 1; +select /*+ use_index_merge(t1) */ sum(c1) from t1 where (c1 < 10 or c2 < 10) and c3 < 10 group by c1 order by 1; + +--echo ///// Apply +drop table if exists t2; +create table t2(c1 int, c2 int, c3 int, key(c1), key(c2)); +insert into t2 values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5); + +explain select /*+ use_index_merge(t1) */ * from t1 where t1.c1 = (select avg(t2.c1) from t2 where t1.c1 = t2.c1 group by t2.c1) and (c1 < 10 or c2 < -1) and c3 < 10 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where t1.c1 = (select avg(t2.c1) from t2 where t1.c1 = t2.c1 group by t2.c1) and (c1 < 10 or c2 < -1) and c3 < 10 order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where t1.c1 = (select /*+ use_index_merge(t2) */ avg(t2.c1) from t2 where t1.c1 = t2.c1 and t2.c1 < 10 or t2.c2 < 10 group by t2.c1 order by c1 limit 1 offset 2) and (c1 < 10 or c2 < -1) and c3 < 10 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where t1.c1 = (select /*+ use_index_merge(t2) */ avg(t2.c1) from t2 where t1.c1 = t2.c1 and t2.c1 < 10 or t2.c2 < 10 group by t2.c1 order by c1 limit 1 offset 2) and (c1 < 10 or c2 < -1) and c3 < 10 order by 1; + +--echo ///// Nested filters +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, c4 int, c5 int, key(c1), key(c2), key(c3), key(c4)); +insert into t1 values(1, 1, 1, 1, 1), (2, 2, 2, 2, 2), (3, 3, 3, 3, 3), (4, 4, 4, 4, 4), (5, 5, 5, 5, 5); + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and (c3 < 10 or c4 < 10) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and (c3 < 10 or c4 < 10) order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 and c2 < 10) or (c3 < 10 and c4 < 10) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 and c2 < 10) or (c3 < 10 and c4 < 10) order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 and c2 < 10) or (c3 < 10 and c4 < 10) and c5 < 10 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 and c2 < 10) or (c3 < 10 and c4 < 10) and c4 < 10 order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where ((c1 < 10 and c4 < 10) or c2 < 10) and (c3 < 10 or c5 < 10) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where ((c1 < 10 and c4 < 10) or c2 < 10) and (c3 < 10 or c4 < 10) order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (((c1 < 10 or c3 < 10) and (c1 < 10 or c4 < 10)) or c2 < 10) and (c3 < 10 or c5 < 10) order by 1; +show warnings; +select /*+ use_index_merge(t1) */ * from t1 where (((c1 < 10 or c3 < 10) and (c1 < 10 or c4 < 10)) or c2 < 10) and (c3 < 10 or c5 < 10) order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (((c1 < 10 or c3 < 10) and c1 < 10) or c2 < 10) and (c3 < 10 or c5 < 10) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (((c1 < 10 or c3 < 10) and c1 < 10) or c2 < 10) and (c3 < 10 or c5 < 10) order by 1; + +--echo ///// All kinds of expressions +--echo // common functions +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and coalesce(c1, c2, c4) = 1 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and coalesce(c1, c2, c4) = 1 order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and greatest(c1, c2, c4) = 1 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and greatest(c1, c2, c4) = 1 order by 1; + +--echo // math functions +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and abs(c1) = 1 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and abs(c1) = 1 order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and pi() order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and pi() order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and ceil(c1) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and ceil(c1) order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and truncate(c1, 1) = 1 order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and truncate(c1, 1) = 1 order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and sqrt(-1) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and sqrt(-1) order by 1; + +--echo // string functions +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and substring(c3, 1, 1) = '1' order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and substring(c3, 1, 1) = '1' order by 1; + +--echo // control functions +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and ifnull(c1, c2) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and ifnull(c1, c2) order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and if(c1, c2, c3) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and if(c1, c2, c3) order by 1; + +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and (c1 between 1 and 2) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and (c1 between 1 and 2) order by 1; + +--echo // mixed usage +set @a = 1; +explain select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and length(substring(sqrt(c3), @a, 1)) = char_length(if(c1, c2, c3)) order by 1; +select /*+ use_index_merge(t1) */ * from t1 where (c1 < 10 or c2 < 10) and length(substring(sqrt(c3), @a, 1)) = char_length(if(c1, c2, c3)) order by 1; + +--echo ///// CTE +drop table if exists t1; +create table t1(c1 int, c2 int, c3 int, c4 int, c5 int, key(c1), key(c2), key(c3), key(c4)); +insert into t1 values(1, 1, 1, 1, 1), (2, 2, 2, 2, 2), (3, 3, 3, 3, 3), (4, 4, 4, 4, 4), (5, 5, 5, 5, 5); + +explain with cte1 as (select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10) select * from cte1 order by 1; +with cte1 as (select /*+ use_index_merge(t1) */ * from t1 where c1 < 10 or c2 < 10 and c3 < 10) select * from cte1 order by 1; + +explain with recursive cte1 as (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10 UNION ALL select c1 + 100 from cte1 where c1 < 10) select * from cte1 order by 1; +with recursive cte1 as (select /*+ use_index_merge(t1) */ c1 from t1 where c1 < 10 or c2 < 10 and c3 < 10 UNION ALL select c1 + 100 from cte1 where c1 < 10) select * from cte1 order by 1; + +explain with recursive cte1 as (select 1 c1, 1 c2, 1 c3 UNION ALL select /*+ use_index_merge(t_alias) */ c1 + 1, c2 + 1, c3 + 1 from cte1 t_alias where c1 < 10 or c2 < 10 and c3 < 10) select * from cte1 order by 1; +show warnings; +with recursive cte1 as (select 1 c1, 1 c2, 1 c3 UNION ALL select /*+ use_index_merge(t_alias) */ c1 + 1, c2 + 1, c3 + 1 from cte1 t_alias where c1 < 10 or c2 < 10 and c3 < 10) select * from cte1 order by 1; diff --git a/executor/index_merge_reader_test.go b/executor/index_merge_reader_test.go index 7fc2ac15e9473..15f8228bbccc0 100644 --- a/executor/index_merge_reader_test.go +++ b/executor/index_merge_reader_test.go @@ -22,6 +22,7 @@ import ( "strings" . "github.com/pingcap/check" + "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/israce" "github.com/pingcap/tidb/util/testkit" ) @@ -173,6 +174,38 @@ func (s *testSuite1) TestPartitionTableRandomIndexMerge(c *C) { } } +func (s *testSuite1) TestIndexMergeWithPreparedStmt(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 int, c2 int, c3 int, key(c1), key(c2));") + insertStr := "insert into t1 values(0, 0, 0)" + for i := 1; i < 100; i++ { + insertStr += fmt.Sprintf(", (%d, %d, %d)", i, i, i) + } + tk.MustExec(insertStr) + + tk.MustExec("prepare stmt1 from 'select /*+ use_index_merge(t1) */ count(1) from t1 where c1 < ? or c2 < ?';") + tk.MustExec("set @a = 10;") + tk.MustQuery("execute stmt1 using @a, @a;").Check(testkit.Rows("10")) + tk.Se.SetSessionManager(&mockSessionManager1{ + PS: []*util.ProcessInfo{tk.Se.ShowProcess()}, + }) + explainStr := "explain for connection " + strconv.FormatUint(tk.Se.ShowProcess().ID, 10) + res := tk.MustQuery(explainStr) + indexMergeLine := res.Rows()[1][0].(string) + re, err := regexp.Compile(".*IndexMerge.*") + c.Assert(err, IsNil) + c.Assert(re.MatchString(indexMergeLine), IsTrue) + + tk.MustExec("prepare stmt1 from 'select /*+ use_index_merge(t1) */ count(1) from t1 where c1 < ? or c2 < ? and c3';") + tk.MustExec("set @a = 10;") + tk.MustQuery("execute stmt1 using @a, @a;").Check(testkit.Rows("10")) + res = tk.MustQuery(explainStr) + indexMergeLine = res.Rows()[1][0].(string) + c.Assert(re.MatchString(indexMergeLine), IsTrue) +} + func (s *testSuite1) TestIndexMergeInTransaction(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) From eed16a6cb46b105f28d2b3447c761f48ab1fc407 Mon Sep 17 00:00:00 2001 From: Zhenchi Date: Thu, 16 Dec 2021 15:18:36 +0800 Subject: [PATCH 13/15] server: add grpc server config for a suitable behavior (#30774) --- config/config.go | 27 ++++++++++++++++++++++----- config/config_test.go | 11 +++++++++++ server/rpc_server.go | 12 +++++++++++- 3 files changed, 44 insertions(+), 6 deletions(-) diff --git a/config/config.go b/config/config.go index 53141fadd093a..2319d953286c7 100644 --- a/config/config.go +++ b/config/config.go @@ -445,6 +445,18 @@ type Status struct { MetricsInterval uint `toml:"metrics-interval" json:"metrics-interval"` ReportStatus bool `toml:"report-status" json:"report-status"` RecordQPSbyDB bool `toml:"record-db-qps" json:"record-db-qps"` + // After a duration of this time in seconds if the server doesn't see any activity it pings + // the client to see if the transport is still alive. + GRPCKeepAliveTime uint `toml:"grpc-keepalive-time" json:"grpc-keepalive-time"` + // After having pinged for keepalive check, the server waits for a duration of timeout in seconds + // and if no activity is seen even after that the connection is closed. + GRPCKeepAliveTimeout uint `toml:"grpc-keepalive-timeout" json:"grpc-keepalive-timeout"` + // The number of max concurrent streams/requests on a client connection. + GRPCConcurrentStreams uint `toml:"grpc-concurrent-streams" json:"grpc-concurrent-streams"` + // Sets window size for stream. The default value is 2MB. + GRPCInitialWindowSize int `toml:"grpc-initial-window-size" json:"grpc-initial-window-size"` + // Set maximum message length in bytes that gRPC can send. `-1` means unlimited. The default value is 10MB. + GRPCMaxSendMsgSize int `toml:"grpc-max-send-msg-size" json:"grpc-max-send-msg-size"` } // Performance is the performance section of the config. @@ -658,11 +670,16 @@ var defaultConf = Config{ EnableSlowLog: *NewAtomicBool(logutil.DefaultTiDBEnableSlowLog), }, Status: Status{ - ReportStatus: true, - StatusHost: DefStatusHost, - StatusPort: DefStatusPort, - MetricsInterval: 15, - RecordQPSbyDB: false, + ReportStatus: true, + StatusHost: DefStatusHost, + StatusPort: DefStatusPort, + MetricsInterval: 15, + RecordQPSbyDB: false, + GRPCKeepAliveTime: 10, + GRPCKeepAliveTimeout: 3, + GRPCConcurrentStreams: 1024, + GRPCInitialWindowSize: 2 * 1024 * 1024, + GRPCMaxSendMsgSize: 10 * 1024 * 1024, }, Performance: Performance{ MaxMemory: 0, diff --git a/config/config_test.go b/config/config_test.go index ab3edc62a0fc4..25c79dc40ebe3 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -261,6 +261,12 @@ deadlock-history-capacity = 123 deadlock-history-collect-retryable = true [top-sql] receiver-address = "127.0.0.1:10100" +[status] +grpc-keepalive-time = 20 +grpc-keepalive-timeout = 10 +grpc-concurrent-streams = 2048 +grpc-initial-window-size = 10240 +grpc-max-send-msg-size = 40960 `) require.NoError(t, err) @@ -318,6 +324,11 @@ receiver-address = "127.0.0.1:10100" require.False(t, conf.Experimental.EnableNewCharset) require.Equal(t, "127.0.0.1:10100", conf.TopSQL.ReceiverAddress) require.True(t, conf.Experimental.AllowsExpressionIndex) + require.Equal(t, uint(20), conf.Status.GRPCKeepAliveTime) + require.Equal(t, uint(10), conf.Status.GRPCKeepAliveTimeout) + require.Equal(t, uint(2048), conf.Status.GRPCConcurrentStreams) + require.Equal(t, 10240, conf.Status.GRPCInitialWindowSize) + require.Equal(t, 40960, conf.Status.GRPCMaxSendMsgSize) err = f.Truncate(0) require.NoError(t, err) diff --git a/server/rpc_server.go b/server/rpc_server.go index 67965ac381f4d..674047781a6bd 100644 --- a/server/rpc_server.go +++ b/server/rpc_server.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "net" + "time" "github.com/pingcap/kvproto/pkg/coprocessor" "github.com/pingcap/kvproto/pkg/diagnosticspb" @@ -34,6 +35,7 @@ import ( "github.com/pingcap/tidb/util/memory" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/peer" ) @@ -46,7 +48,15 @@ func NewRPCServer(config *config.Config, dom *domain.Domain, sm util.SessionMana } }() - s := grpc.NewServer() + s := grpc.NewServer( + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: time.Duration(config.Status.GRPCKeepAliveTime) * time.Second, + Timeout: time.Duration(config.Status.GRPCKeepAliveTimeout) * time.Second, + }), + grpc.MaxConcurrentStreams(uint32(config.Status.GRPCConcurrentStreams)), + grpc.InitialWindowSize(int32(config.Status.GRPCInitialWindowSize)), + grpc.MaxSendMsgSize(config.Status.GRPCMaxSendMsgSize), + ) rpcSrv := &rpcServer{ DiagnosticsServer: sysutil.NewDiagnosticsServer(config.Log.File.Filename), dom: dom, From 43caa02f701e3eee14a273641949b8c219c620eb Mon Sep 17 00:00:00 2001 From: xiongjiwei Date: Thu, 16 Dec 2021 15:32:35 +0800 Subject: [PATCH 14/15] config, charset: make charset config not affected by collation config (#30572) --- executor/executor_test.go | 17 +++++++++++++++++ session/session.go | 6 +++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/executor/executor_test.go b/executor/executor_test.go index ef4f434a9fb67..ce2d25b8d8a91 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -5797,6 +5797,23 @@ func (s *testSuiteWithCliBaseCharset) TestCharsetFeature(c *C) { " `a` char(10) DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci", )) + + collate.SetNewCollationEnabledForTest(false) + tk.MustQuery("show charset").Check(testkit.Rows( + "ascii US ASCII ascii_bin 1", + "binary binary binary 1", + "gbk Chinese Internal Code Specification gbk_chinese_ci 2", + "latin1 Latin1 latin1_bin 1", + "utf8 UTF-8 Unicode utf8_bin 3", + "utf8mb4 UTF-8 Unicode utf8mb4_bin 4", + )) + tk.MustQuery("show collation").Check(testkit.Rows( + "utf8mb4_bin utf8mb4 46 Yes Yes 1", + "latin1_bin latin1 47 Yes Yes 1", + "binary binary 63 Yes Yes 1", + "ascii_bin ascii 65 Yes Yes 1", + "utf8_bin utf8 83 Yes Yes 1", + )) } func (s *testSuiteWithCliBaseCharset) TestCharsetFeatureCollation(c *C) { diff --git a/session/session.go b/session/session.go index 16fecfa3b0121..c57628d090f44 100644 --- a/session/session.go +++ b/session/session.go @@ -2535,9 +2535,9 @@ func BootstrapSession(store kv.Storage) (*domain.Domain, error) { if newCollationEnabled { collate.EnableNewCollations() - if cfg.Experimental.EnableNewCharset { - collate.EnableNewCharset() - } + } + if cfg.Experimental.EnableNewCharset { + collate.EnableNewCharset() } newMemoryQuotaQuery, err := loadDefMemQuotaQuery(se) From 5eac82b0521de127ab3d7380dcf7506e5daee729 Mon Sep 17 00:00:00 2001 From: glorv Date: Thu, 16 Dec 2021 15:48:36 +0800 Subject: [PATCH 15/15] lightning: emit tidb log by change FilterCore to only allow matched packages (#30700) --- br/pkg/lightning/backend/local/engine.go | 2 +- br/pkg/lightning/log/filter.go | 12 ++++++------ br/pkg/lightning/log/filter_test.go | 13 +++++-------- br/pkg/lightning/log/log.go | 2 +- 4 files changed, 13 insertions(+), 16 deletions(-) diff --git a/br/pkg/lightning/backend/local/engine.go b/br/pkg/lightning/backend/local/engine.go index a4ba47ac42a43..1091bb7a58c0e 100644 --- a/br/pkg/lightning/backend/local/engine.go +++ b/br/pkg/lightning/backend/local/engine.go @@ -883,7 +883,7 @@ func (e *Engine) loadEngineMeta() error { jsonBytes, closer, err := e.db.Get(engineMetaKey) if err != nil { if err == pebble.ErrNotFound { - log.L().Debug("local db missing engine meta", zap.Stringer("uuid", e.UUID), zap.Error(err)) + log.L().Debug("local db missing engine meta", zap.Stringer("uuid", e.UUID), log.ShortError(err)) return nil } return err diff --git a/br/pkg/lightning/log/filter.go b/br/pkg/lightning/log/filter.go index 2ed88708afa6b..50dea0711b9d3 100644 --- a/br/pkg/lightning/log/filter.go +++ b/br/pkg/lightning/log/filter.go @@ -17,14 +17,14 @@ type FilterCore struct { filters []string } -// NewFilterCore returns a FilterCore. +// NewFilterCore returns a FilterCore, only logs under allowPackages will be written. // -// Example, filter TiDB's log, `NewFilterCore(core, "github.com/pingcap/tidb/")`. +// Example, only write br's log and ignore any other, `NewFilterCore(core, "github.com/pingcap/tidb/br/")`. // Note, must set AddCaller() to the logger. -func NewFilterCore(core zapcore.Core, filteredPackages ...string) *FilterCore { +func NewFilterCore(core zapcore.Core, allowPackages ...string) *FilterCore { return &FilterCore{ Core: core, - filters: filteredPackages, + filters: allowPackages, } } @@ -50,8 +50,8 @@ func (f *FilterCore) Write(entry zapcore.Entry, fields []zapcore.Field) error { for i := range f.filters { // Caller.Function is a package path-qualified function name. if strings.Contains(entry.Caller.Function, f.filters[i]) { - return nil + return f.Core.Write(entry, fields) } } - return f.Core.Write(entry, fields) + return nil } diff --git a/br/pkg/lightning/log/filter_test.go b/br/pkg/lightning/log/filter_test.go index 21b08962558b7..6d1530ce9ee45 100644 --- a/br/pkg/lightning/log/filter_test.go +++ b/br/pkg/lightning/log/filter_test.go @@ -25,13 +25,13 @@ func (s *testFilterSuite) TestFilter(c *C) { ) logger, buffer = log.MakeTestLogger(zap.WrapCore(func(c zapcore.Core) zapcore.Core { - return log.NewFilterCore(c, "github.com/pingcap/tidb/br/") + return log.NewFilterCore(c, "github.com/pingcap/br/") }), zap.AddCaller()) logger.Warn("the message", zap.Int("number", 123456), zap.Ints("array", []int{7, 8, 9})) c.Assert(buffer.Stripped(), HasLen, 0) logger, buffer = log.MakeTestLogger(zap.WrapCore(func(c zapcore.Core) zapcore.Core { - return log.NewFilterCore(c, "github.com/pingcap/br/").With([]zap.Field{zap.String("a", "b")}) + return log.NewFilterCore(c, "github.com/pingcap/tidb/br/").With([]zap.Field{zap.String("a", "b")}) }), zap.AddCaller()) logger.Warn("the message", zap.Int("number", 123456), zap.Ints("array", []int{7, 8, 9})) c.Assert( @@ -40,7 +40,7 @@ func (s *testFilterSuite) TestFilter(c *C) { ) logger, buffer = log.MakeTestLogger(zap.WrapCore(func(c zapcore.Core) zapcore.Core { - return log.NewFilterCore(c, "github.com/pingcap/tidb/br/").With([]zap.Field{zap.String("a", "b")}) + return log.NewFilterCore(c, "github.com/pingcap/br/").With([]zap.Field{zap.String("a", "b")}) }), zap.AddCaller()) logger.Warn("the message", zap.Int("number", 123456), zap.Ints("array", []int{7, 8, 9})) c.Assert(buffer.Stripped(), HasLen, 0) @@ -49,11 +49,8 @@ func (s *testFilterSuite) TestFilter(c *C) { logger, buffer = log.MakeTestLogger(zap.WrapCore(func(c zapcore.Core) zapcore.Core { return log.NewFilterCore(c, "github.com/pingcap/check/").With([]zap.Field{zap.String("a", "b")}) }), zap.AddCaller()) - logger.Warn("the message", zap.String("stack", "github.com/pingcap/check/")) - c.Assert( - buffer.Stripped(), Equals, - `{"$lvl":"WARN","$msg":"the message","a":"b","stack":"github.com/pingcap/check/"}`, - ) + logger.Warn("the message", zap.String("stack", "github.com/pingcap/tidb/br/")) + c.Assert(buffer.Stripped(), HasLen, 0) } // PASS: filter_test.go:82: testFilterSuite.BenchmarkFilterRegexMatchString 1000000 1163 ns/op diff --git a/br/pkg/lightning/log/log.go b/br/pkg/lightning/log/log.go index 8521cf85a6579..97cbfdd8c0457 100644 --- a/br/pkg/lightning/log/log.go +++ b/br/pkg/lightning/log/log.go @@ -91,7 +91,7 @@ func InitLogger(cfg *Config, tidbLoglevel string) error { } filterTiDBLog := zap.WrapCore(func(core zapcore.Core) zapcore.Core { // Filter logs from TiDB and PD. - return NewFilterCore(core, "github.com/tikv/pd/") + return NewFilterCore(core, "github.com/pingcap/tidb/br/") }) // "-" is a special config for log to stdout. if len(cfg.File) > 0 && cfg.File != "-" {