Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/pingcap/tidb into fix_clu…
Browse files Browse the repository at this point in the history
…ster_virtual_index
  • Loading branch information
wjhuang2016 committed Jun 18, 2020
2 parents 836599c + 2293925 commit 500ee76
Show file tree
Hide file tree
Showing 49 changed files with 1,093 additions and 263 deletions.
32 changes: 16 additions & 16 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
/distsql @pingcap/co-exec
/executor @pingcap/co-exec
/expression @pingcap/co-exec
/types @pingcap/co-exec
/util/chunk @pingcap/co-exec
/util/disk @pingcap/co-exec
/util/execdetails @pingcap/co-exec
/util/expensivequery @pingcap/co-exec
/util/filesort @pingcap/co-exec
/util/memory @pingcap/co-exec
/util/sqlexec @pingcap/co-exec
/distsql @pingcap/exec-reviewers
/executor @pingcap/exec-reviewers
/expression @pingcap/exec-reviewers
/types @pingcap/exec-reviewers
/util/chunk @pingcap/exec-reviewers
/util/disk @pingcap/exec-reviewers
/util/execdetails @pingcap/exec-reviewers
/util/expensivequery @pingcap/exec-reviewers
/util/filesort @pingcap/exec-reviewers
/util/memory @pingcap/exec-reviewers
/util/sqlexec @pingcap/exec-reviewers

/planner @pingcap/co-planner
/statistics @pingcap/co-planner
/util/ranger @pingcap/co-planner
/util/plancodec @pingcap/co-planner
/bindinfo @pingcap/co-planner
/planner @pingcap/planner-reviewers
/statistics @pingcap/planner-reviewers
/util/ranger @pingcap/planner-reviewers
/util/plancodec @pingcap/planner-reviewers
/bindinfo @pingcap/planner-reviewers

/ddl @pingcap/co-ddl
/domain @pingcap/co-ddl
Expand Down
1 change: 1 addition & 0 deletions cmd/explaintest/config.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
port = 4001
lease = "0"
mem-quota-query = 34359738368
nested-loop-join-cache-capacity = 20971520

[status]
status-port = 10081
Expand Down
32 changes: 17 additions & 15 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,21 +77,22 @@ var (

// Config contains configuration options.
type Config struct {
Host string `toml:"host" json:"host"`
AdvertiseAddress string `toml:"advertise-address" json:"advertise-address"`
Port uint `toml:"port" json:"port"`
Cors string `toml:"cors" json:"cors"`
Store string `toml:"store" json:"store"`
Path string `toml:"path" json:"path"`
Socket string `toml:"socket" json:"socket"`
Lease string `toml:"lease" json:"lease"`
RunDDL bool `toml:"run-ddl" json:"run-ddl"`
SplitTable bool `toml:"split-table" json:"split-table"`
TokenLimit uint `toml:"token-limit" json:"token-limit"`
OOMUseTmpStorage bool `toml:"oom-use-tmp-storage" json:"oom-use-tmp-storage"`
TempStoragePath string `toml:"tmp-storage-path" json:"tmp-storage-path"`
OOMAction string `toml:"oom-action" json:"oom-action"`
MemQuotaQuery int64 `toml:"mem-quota-query" json:"mem-quota-query"`
Host string `toml:"host" json:"host"`
AdvertiseAddress string `toml:"advertise-address" json:"advertise-address"`
Port uint `toml:"port" json:"port"`
Cors string `toml:"cors" json:"cors"`
Store string `toml:"store" json:"store"`
Path string `toml:"path" json:"path"`
Socket string `toml:"socket" json:"socket"`
Lease string `toml:"lease" json:"lease"`
RunDDL bool `toml:"run-ddl" json:"run-ddl"`
SplitTable bool `toml:"split-table" json:"split-table"`
TokenLimit uint `toml:"token-limit" json:"token-limit"`
OOMUseTmpStorage bool `toml:"oom-use-tmp-storage" json:"oom-use-tmp-storage"`
TempStoragePath string `toml:"tmp-storage-path" json:"tmp-storage-path"`
OOMAction string `toml:"oom-action" json:"oom-action"`
MemQuotaQuery int64 `toml:"mem-quota-query" json:"mem-quota-query"`
NestedLoopJoinCacheCapacity int64 `toml:"nested-loop-join-cache-capacity" json:"nested-loop-join-cache-capacity"`
// TempStorageQuota describe the temporary storage Quota during query exector when OOMUseTmpStorage is enabled
// If the quota exceed the capacity of the TempStoragePath, the tidb-server would exit with fatal error
TempStorageQuota int64 `toml:"tmp-storage-quota" json:"tmp-storage-quota"` // Bytes
Expand Down Expand Up @@ -559,6 +560,7 @@ var defaultConf = Config{
TempStoragePath: tempStorageDirName,
OOMAction: OOMActionCancel,
MemQuotaQuery: 1 << 30,
NestedLoopJoinCacheCapacity: 20971520,
EnableStreaming: false,
EnableBatchDML: false,
CheckMb4ValueInUTF8: true,
Expand Down
3 changes: 3 additions & 0 deletions config/config.toml.example
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ token-limit = 1000
# The maximum memory available for a single SQL statement. Default: 1GB
mem-quota-query = 1073741824

# The maximum number available of a NLJ cache for a single SQL statement. Default: 20MB
nested-loop-join-cache-capacity = 20971520

# Controls whether to enable the temporary storage for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query.
oom-use-tmp-storage = true

Expand Down
2 changes: 2 additions & 0 deletions config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,7 @@ server-version = "test_version"
repair-mode = true
max-server-connections = 200
mem-quota-query = 10000
nested-loop-join-cache-capacity = 100
max-index-length = 3080
[performance]
txn-total-size-limit=2000
Expand Down Expand Up @@ -242,6 +243,7 @@ engines = ["tiflash"]
c.Assert(conf.RepairMode, Equals, true)
c.Assert(conf.MaxServerConnections, Equals, uint32(200))
c.Assert(conf.MemQuotaQuery, Equals, int64(10000))
c.Assert(conf.NestedLoopJoinCacheCapacity, Equals, int64(100))
c.Assert(conf.Experimental.AllowsExpressionIndex, IsTrue)
c.Assert(conf.IsolationRead.Engines, DeepEquals, []string{"tiflash"})
c.Assert(conf.MaxIndexLength, Equals, 3080)
Expand Down
55 changes: 48 additions & 7 deletions ddl/db_change_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -562,13 +562,57 @@ func (s *testStateChangeSuite) TestWriteOnlyForAddColumns(c *C) {
s.runTestInSchemaState(c, model.StateWriteOnly, true, addColumnsSQL, sqls, nil)
}

// TestDeletaOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
// TestDeleteOnly tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
func (s *testStateChangeSuite) TestDeleteOnly(c *C) {
sqls := make([]sqlWithErr, 1)
_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tt (c varchar(64), c4 int)`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tt (c, c4) values('a', 8)")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tt")

sqls := make([]sqlWithErr, 5)
sqls[0] = sqlWithErr{"insert t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1",
errors.Errorf("Can't find column c1")}
sqls[1] = sqlWithErr{"update t set c1 = 'c1_insert', c3 = '2018-02-12', c4 = 1",
errors.Errorf("[planner:1054]Unknown column 'c1' in 'field list'")}
sqls[2] = sqlWithErr{"delete from t where c1='a'",
errors.Errorf("[planner:1054]Unknown column 'c1' in 'where clause'")}
sqls[3] = sqlWithErr{"delete t, tt from tt inner join t on t.c4=tt.c4 where tt.c='a' and t.c1='a'",
errors.Errorf("[planner:1054]Unknown column 't.c1' in 'where clause'")}
sqls[4] = sqlWithErr{"delete t, tt from tt inner join t on t.c1=tt.c where tt.c='a'",
errors.Errorf("[planner:1054]Unknown column 't.c1' in 'on clause'")}
query := &expectQuery{sql: "select * from t;", rows: []string{"N 2017-07-01 00:00:00 8"}}
dropColumnSQL := "alter table t drop column c1"
s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropColumnSQL, sqls, nil)
s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropColumnSQL, sqls, query)
}

// TestDeleteOnlyForDropExpressionIndex tests for deleting data when the hidden column is delete-only state.
func (s *serialTestStateChangeSuite) TestDeleteOnlyForDropExpressionIndex(c *C) {
originalVal := config.GetGlobalConfig().Experimental.AllowsExpressionIndex
config.GetGlobalConfig().Experimental.AllowsExpressionIndex = true
defer func() {
config.GetGlobalConfig().Experimental.AllowsExpressionIndex = originalVal
}()

_, err := s.se.Execute(context.Background(), "use test_db_state")
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `create table tt (a int, b int)`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), `alter table tt add index expr_idx((a+1))`)
c.Assert(err, IsNil)
_, err = s.se.Execute(context.Background(), "insert into tt (a, b) values(8, 8)")
c.Assert(err, IsNil)
defer s.se.Execute(context.Background(), "drop table tt")

sqls := make([]sqlWithErr, 1)
sqls[0] = sqlWithErr{"delete from tt where b=8", nil}
dropIdxSQL := "alter table tt drop index expr_idx"
s.runTestInSchemaState(c, model.StateDeleteOnly, true, dropIdxSQL, sqls, nil)

_, err = s.se.Execute(context.Background(), "admin check table tt")
c.Assert(err, IsNil)
}

// TestDeleteOnlyForDropColumns tests whether the correct columns is used in PhysicalIndexScan's ToPB function.
Expand Down Expand Up @@ -655,10 +699,7 @@ func (s *testStateChangeSuiteBase) runTestInSchemaState(c *C, state model.Schema
for _, sqlWithErr := range sqlWithErrs {
_, err = se.Execute(context.Background(), sqlWithErr.sql)
if !terror.ErrorEqual(err, sqlWithErr.expectErr) {
checkErr = err
if checkErr == nil {
checkErr = errors.New("err can't be nil")
}
checkErr = errors.Errorf("sql: %s, expect err: %v, got err: %v", sqlWithErr.sql, sqlWithErr.expectErr, err)
break
}
}
Expand Down
4 changes: 4 additions & 0 deletions ddl/sequence.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,10 @@ func validateSequenceOptions(seqInfo *model.SequenceInfo) bool {
// Increment shouldn't be set as 0.
return false
}
if seqInfo.CacheValue <= 0 {
// Cache value should be bigger than 0.
return false
}
maxIncrement = math2.Abs(seqInfo.Increment)

return seqInfo.MaxValue >= seqInfo.Start &&
Expand Down
27 changes: 27 additions & 0 deletions ddl/sequence_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -972,3 +972,30 @@ func (s *testSequenceSuite) TestSequenceDefaultLogic(c *C) {
s.tk.MustGetErrMsg("alter table t add column b int default next value for seq", "[ddl:8230]Unsupported using sequence as default value in add column 'b'")
s.tk.MustQuery("select * from t").Check(testkit.Rows("-1", "-1", "-1"))
}

// Close issue #17945, sequence cache shouldn't be negative.
func (s *testSequenceSuite) TestSequenceCacheShouldNotBeNegative(c *C) {
s.tk = testkit.NewTestKit(c, s.store)
s.tk.MustExec("use test")

s.tk.MustExec("drop sequence if exists seq")
_, err := s.tk.Exec("create sequence seq cache -1")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:4136]Sequence 'test.seq' values are conflicting")

_, err = s.tk.Exec("create sequence seq cache 0")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:4136]Sequence 'test.seq' values are conflicting")

// This will error because
// 1: maxvalue = -1 by default
// 2: minvalue = -9223372036854775807 by default
// 3: increment = -9223372036854775807 by user
// `seqInfo.CacheValue < (math.MaxInt64-absIncrement)/absIncrement` will
// ensure there is enough value for one cache allocation at least.
_, err = s.tk.Exec("create sequence seq INCREMENT -9223372036854775807 cache 1")
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "[ddl:4136]Sequence 'test.seq' values are conflicting")

s.tk.MustExec("create sequence seq cache 1")
}
1 change: 1 addition & 0 deletions errno/errcode.go
Original file line number Diff line number Diff line change
Expand Up @@ -894,6 +894,7 @@ const (
ErrGeneratedColumnNonPrior = 3107
ErrDependentByGeneratedColumn = 3108
ErrGeneratedColumnRefAutoInc = 3109
ErrWarnConflictingHint = 3126
ErrInvalidJSONText = 3140
ErrInvalidJSONPath = 3143
ErrInvalidTypeForJSON = 3146
Expand Down
1 change: 1 addition & 0 deletions errno/errname.go
Original file line number Diff line number Diff line change
Expand Up @@ -887,6 +887,7 @@ var MySQLErrName = map[uint16]string{
ErrGeneratedColumnNonPrior: "Generated column can refer only to generated columns defined prior to it.",
ErrDependentByGeneratedColumn: "Column '%s' has a generated column dependency.",
ErrGeneratedColumnRefAutoInc: "Generated column '%s' cannot refer to auto-increment column.",
ErrWarnConflictingHint: "Hint %s is ignored as conflicting/duplicated.",
ErrInvalidFieldSize: "Invalid size for column '%s'.",
ErrIncorrectType: "Incorrect type for argument %s in function %s.",
ErrInvalidJSONData: "Invalid JSON data provided to function %s: %s",
Expand Down
86 changes: 86 additions & 0 deletions executor/apply_cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.

package executor

import (
"github.com/cznic/mathutil"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/kvcache"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/stringutil"
)

// applyCache is used in the apply executor. When we get the same value of the outer row.
// We fetch the inner rows in the cache not to fetch them in the inner executor.
type applyCache struct {
cache *kvcache.SimpleLRUCache
memCapacity int64
memTracker *memory.Tracker // track memory usage.
}

type applyCacheKey []byte

func (key applyCacheKey) Hash() []byte {
return key
}

func applyCacheKVMem(key applyCacheKey, value *chunk.List) int64 {
return int64(len(key)) + value.GetMemTracker().BytesConsumed()
}

func newApplyCache(ctx sessionctx.Context) (*applyCache, error) {
// since applyCache controls the memory usage by itself, set the capacity of
// the underlying LRUCache to max to close its memory control
cache := kvcache.NewSimpleLRUCache(mathutil.MaxUint, 0.1, 0)
c := applyCache{
cache: cache,
memCapacity: ctx.GetSessionVars().NestedLoopJoinCacheCapacity,
memTracker: memory.NewTracker(stringutil.StringerStr("applyCache"), -1),
}
return &c, nil
}

// Get gets a cache item according to cache key.
func (c *applyCache) Get(key applyCacheKey) (*chunk.List, error) {
value, hit := c.cache.Get(&key)
if !hit {
return nil, nil
}
typedValue := value.(*chunk.List)
return typedValue, nil
}

// Set inserts an item to the cache.
func (c *applyCache) Set(key applyCacheKey, value *chunk.List) (bool, error) {
mem := applyCacheKVMem(key, value)
if mem > c.memCapacity { // ignore this kv pair if its size is too large
return false, nil
}
for mem+c.memTracker.BytesConsumed() > c.memCapacity {
evictedKey, evictedValue, evicted := c.cache.RemoveOldest()
if !evicted {
return false, nil
}
c.memTracker.Consume(-applyCacheKVMem(evictedKey.(applyCacheKey), evictedValue.(*chunk.List)))
}
c.memTracker.Consume(mem)
c.cache.Put(key, value)
return true, nil
}

// GetMemTracker returns the memory tracker of this apply cache.
func (c *applyCache) GetMemTracker() *memory.Tracker {
return c.memTracker
}
Loading

0 comments on commit 500ee76

Please sign in to comment.