From 2ee726264b6c4182b4b403f72eb3db5267db71f0 Mon Sep 17 00:00:00 2001 From: Song Gao Date: Mon, 13 Feb 2023 14:04:08 +0800 Subject: [PATCH] This is an automated cherry-pick of #41319 Signed-off-by: ti-chi-bot --- executor/index_advise_test.go | 134 +++++++++++++ planner/core/exhaust_physical_plans.go | 73 +++++++ sessionctx/variable/session.go | 210 ++++++++++++++++++++ sessionctx/variable/sysvar.go | 255 +++++++++++++++++++++++++ sessionctx/variable/tidb_vars.go | 73 +++++++ 5 files changed, 745 insertions(+) diff --git a/executor/index_advise_test.go b/executor/index_advise_test.go index 5371ecd051bc1..66f571fae1d63 100644 --- a/executor/index_advise_test.go +++ b/executor/index_advise_test.go @@ -69,3 +69,137 @@ func TestIndexAdvise(t *testing.T) { require.Equal(t, uint64(4), ia.MaxIndexNum.PerTable) require.Equal(t, uint64(5), ia.MaxIndexNum.PerDB) } +<<<<<<< HEAD +======= + +func TestIndexJoinProjPattern(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t1( +pnbrn_cnaps varchar(5) not null, +new_accno varchar(18) not null, +primary key(pnbrn_cnaps,new_accno) nonclustered +);`) + tk.MustExec(`create table t2( +pnbrn_cnaps varchar(5) not null, +txn_accno varchar(18) not null, +txn_dt date not null, +yn_frz varchar(1) default null +);`) + tk.MustExec(`insert into t1(pnbrn_cnaps,new_accno) values ("40001","123")`) + tk.MustExec(`insert into t2(pnbrn_cnaps, txn_accno, txn_dt, yn_frz) values ("40001","123","20221201","0");`) + + sql := `update +/*+ inl_join(a) */ +t2 b, +( +select t1.pnbrn_cnaps, +t1.new_accno +from t1 +where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno;` + rows := [][]interface{}{ + {"Update_8"}, + {"└─IndexJoin_14"}, + {" ├─TableReader_25(Build)"}, + {" │ └─Selection_24"}, + {" │ └─TableFullScan_23"}, + {" └─IndexReader_12(Probe)"}, + {" └─Selection_11"}, + {" └─IndexRangeScan_10"}, + } + tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'") + tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) + rows = [][]interface{}{ + {"Update_8"}, + {"└─HashJoin_10"}, + {" ├─IndexReader_17(Build)"}, + {" │ └─IndexRangeScan_16"}, + {" └─TableReader_14(Probe)"}, + {" └─Selection_13"}, + {" └─TableFullScan_12"}, + } + tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'") + tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) + + tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'") + tk.MustExec(sql) + tk.MustQuery("select yn_frz from t2").Check(testkit.Rows("1")) +} + +func TestIndexJoinSelPattern(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(` create table tbl_miss( +id bigint(20) unsigned not null +,txn_dt date default null +,perip_sys_uuid varchar(32) not null +,rvrs_idr varchar(1) not null +,primary key(id) clustered +,key idx1 (txn_dt, perip_sys_uuid, rvrs_idr) +); +`) + tk.MustExec(`insert into tbl_miss (id,txn_dt,perip_sys_uuid,rvrs_idr) values (1,"20221201","123","1");`) + tk.MustExec(`create table tbl_src( +txn_dt date default null +,uuid varchar(32) not null +,rvrs_idr char(1) +,expd_inf varchar(5000) +,primary key(uuid,rvrs_idr) nonclustered +); +`) + tk.MustExec(`insert into tbl_src (txn_dt,uuid,rvrs_idr) values ("20221201","123","1");`) + sql := `select /*+ use_index(mis,) inl_join(src) */ + * + from tbl_miss mis + ,tbl_src src + where src.txn_dt >= str_to_date('20221201', '%Y%m%d') + and mis.id between 1 and 10000 + and mis.perip_sys_uuid = src.uuid + and mis.rvrs_idr = src.rvrs_idr + and mis.txn_dt = src.txn_dt + and ( + case when isnull(src.expd_inf) = 1 then '' + else + substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, + instr(substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) + end + ) != '01';` + rows := [][]interface{}{ + {"HashJoin_9"}, + {"├─TableReader_12(Build)"}, + {"│ └─Selection_11"}, + {"│ └─TableRangeScan_10"}, + {"└─Selection_13(Probe)"}, + {" └─TableReader_16"}, + {" └─Selection_15"}, + {" └─TableFullScan_14"}, + } + tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'") + tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) + rows = [][]interface{}{ + {"IndexJoin_13"}, + {"├─TableReader_25(Build)"}, + {"│ └─Selection_24"}, + {"│ └─TableRangeScan_23"}, + {"└─Selection_12(Probe)"}, + {" └─IndexLookUp_11"}, + {" ├─IndexRangeScan_8(Build)"}, + {" └─Selection_10(Probe)"}, + {" └─TableRowIDScan_9"}, + } + tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'") + tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) + tk.MustQuery(sql).Check(testkit.Rows("1 2022-12-01 123 1 2022-12-01 123 1 ")) + tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'") + tk.MustQuery(sql).Check(testkit.Rows("1 2022-12-01 123 1 2022-12-01 123 1 ")) +} +>>>>>>> 982a6163a1 (sysvar: introduce variable tidb_enable_inl_join_inner_multi_pattern (#41319)) diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index fecce00ced731..f7a2dfa3aa15a 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -702,6 +702,35 @@ func (p *LogicalJoin) getIndexJoinByOuterIdx(prop *property.PhysicalProperty, ou return nil } } +<<<<<<< HEAD +======= + case *LogicalProjection: + if !p.ctx.GetSessionVars().EnableINLJoinInnerMultiPattern { + return nil + } + // For now, we only allow proj with all Column expression can be the inner side of index join + for _, expr := range child.Exprs { + if _, ok := expr.(*expression.Column); !ok { + return nil + } + } + wrapper.proj = child + ds, isDataSource := wrapper.proj.Children()[0].(*DataSource) + if !isDataSource { + return nil + } + wrapper.ds = ds + case *LogicalSelection: + if !p.ctx.GetSessionVars().EnableINLJoinInnerMultiPattern { + return nil + } + wrapper.sel = child + ds, isDataSource := wrapper.sel.Children()[0].(*DataSource) + if !isDataSource { + return nil + } + wrapper.ds = ds +>>>>>>> 982a6163a1 (sysvar: introduce variable tidb_enable_inl_join_inner_multi_pattern (#41319)) } var avgInnerRowCnt float64 if outerChild.statsInfo().RowCount > 0 { @@ -1001,6 +1030,50 @@ func (p *LogicalJoin) constructInnerTableScanTask( return t } +<<<<<<< HEAD +======= +func (p *LogicalJoin) constructInnerByWrapper(wrapper *indexJoinInnerChildWrapper, child PhysicalPlan) PhysicalPlan { + if !p.ctx.GetSessionVars().EnableINLJoinInnerMultiPattern { + if wrapper.us != nil { + return p.constructInnerUnionScan(wrapper.us, child) + } + return child + } + if wrapper.us != nil { + return p.constructInnerUnionScan(wrapper.us, child) + } else if wrapper.proj != nil { + return p.constructInnerProj(wrapper.proj, child) + } else if wrapper.sel != nil { + return p.constructInnerSel(wrapper.sel, child) + } + return child +} + +func (p *LogicalJoin) constructInnerSel(sel *LogicalSelection, child PhysicalPlan) PhysicalPlan { + if sel == nil { + return child + } + physicalSel := PhysicalSelection{ + Conditions: sel.Conditions, + }.Init(sel.ctx, sel.stats, sel.blockOffset, nil) + physicalSel.SetChildren(child) + return physicalSel +} + +func (p *LogicalJoin) constructInnerProj(proj *LogicalProjection, child PhysicalPlan) PhysicalPlan { + if proj == nil { + return child + } + physicalProj := PhysicalProjection{ + Exprs: proj.Exprs, + CalculateNoDelay: proj.CalculateNoDelay, + AvoidColumnEvaluator: proj.AvoidColumnEvaluator, + }.Init(proj.ctx, proj.stats, proj.blockOffset, nil) + physicalProj.SetChildren(child) + return physicalProj +} + +>>>>>>> 982a6163a1 (sysvar: introduce variable tidb_enable_inl_join_inner_multi_pattern (#41319)) func (p *LogicalJoin) constructInnerUnionScan(us *LogicalUnionScan, reader PhysicalPlan) PhysicalPlan { if us == nil { return reader diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index f480365321b94..950db5d68665e 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -1055,6 +1055,216 @@ type SessionVars struct { // When it is false, ANALYZE reads the latest data. // When it is true, ANALYZE reads data on the snapshot at the beginning of ANALYZE. EnableAnalyzeSnapshot bool +<<<<<<< HEAD +======= + + // DefaultStrMatchSelectivity adjust the estimation strategy for string matching expressions that can't be estimated by building into range. + // when > 0: it's the selectivity for the expression. + // when = 0: try to use TopN to evaluate the like expression to estimate the selectivity. + DefaultStrMatchSelectivity float64 + + // TiFlashFastScan indicates whether use fast scan in TiFlash + TiFlashFastScan bool + + // PrimaryKeyRequired indicates if sql_require_primary_key sysvar is set + PrimaryKeyRequired bool + + // EnablePreparedPlanCache indicates whether to enable prepared plan cache. + EnablePreparedPlanCache bool + + // PreparedPlanCacheSize controls the size of prepared plan cache. + PreparedPlanCacheSize uint64 + + // PreparedPlanCacheMonitor indicates whether to enable prepared plan cache monitor. + EnablePreparedPlanCacheMemoryMonitor bool + + // EnablePlanCacheForParamLimit controls whether the prepare statement with parameterized limit can be cached + EnablePlanCacheForParamLimit bool + + // EnableNonPreparedPlanCache indicates whether to enable non-prepared plan cache. + EnableNonPreparedPlanCache bool + + // NonPreparedPlanCacheSize controls the size of non-prepared plan cache. + NonPreparedPlanCacheSize uint64 + + // ConstraintCheckInPlacePessimistic controls whether to skip the locking of some keys in pessimistic transactions. + // Postpone the conflict check and constraint check to prewrite or later pessimistic locking requests. + ConstraintCheckInPlacePessimistic bool + + // EnableTiFlashReadForWriteStmt indicates whether to enable TiFlash to read for write statements. + EnableTiFlashReadForWriteStmt bool + + // EnableUnsafeSubstitute indicates whether to enable generate column takes unsafe substitute. + EnableUnsafeSubstitute bool + + // ForeignKeyChecks indicates whether to enable foreign key constraint check. + ForeignKeyChecks bool + + // RangeMaxSize is the max memory limit for ranges. When the optimizer estimates that the memory usage of complete + // ranges would exceed the limit, it chooses less accurate ranges such as full range. 0 indicates that there is no + // memory limit for ranges. + RangeMaxSize int64 + + // LastPlanReplayerToken indicates the last plan replayer token + LastPlanReplayerToken string + + // AnalyzePartitionConcurrency indicates concurrency for partitions in Analyze + AnalyzePartitionConcurrency int + // AnalyzePartitionMergeConcurrency indicates concurrency for merging partition stats + AnalyzePartitionMergeConcurrency int + + // EnableExternalTSRead indicates whether to enable read through external ts + EnableExternalTSRead bool + + HookContext + + // MemTracker indicates the memory tracker of current session. + MemTracker *memory.Tracker + // MemDBDBFootprint tracks the memory footprint of memdb, and is attached to `MemTracker` + MemDBFootprint *memory.Tracker + DiskTracker *memory.Tracker + + // OptPrefixIndexSingleScan indicates whether to do some optimizations to avoid double scan for prefix index. + // When set to true, `col is (not) null`(`col` is index prefix column) is regarded as index filter rather than table filter. + OptPrefixIndexSingleScan bool + + // ChunkPool Several chunks and columns are cached + ChunkPool ReuseChunkPool + // EnableReuseCheck indicates request chunk whether use chunk alloc + EnableReuseCheck bool + + // preuseChunkAlloc indicates whether pre statement use chunk alloc + // like select @@last_sql_use_alloc + preUseChunkAlloc bool + + // EnablePlanReplayerCapture indicates whether enabled plan replayer capture + EnablePlanReplayerCapture bool + + // EnablePlanReplayedContinuesCapture indicates whether enabled plan replayer continues capture + EnablePlanReplayedContinuesCapture bool + + // PlanReplayerFinishedTaskKey used to record the finished plan replayer task key in order not to record the + // duplicate task in plan replayer continues capture + PlanReplayerFinishedTaskKey map[replayer.PlanReplayerTaskKey]struct{} + + // StoreBatchSize indicates the batch size limit of store batch, set this field to 0 to disable store batch. + StoreBatchSize int + + // shardRand is used by TxnCtx, for the GetCurrentShard() method. + shardRand *rand.Rand + + // Resource group name + ResourceGroupName string + + // ProtectedTSList holds a list of timestamps that should delay GC. + ProtectedTSList protectedTSList + + // PessimisticTransactionAggressiveLocking controls whether aggressive locking for pessimistic transaction + // is enabled. + PessimisticTransactionAggressiveLocking bool + + // EnableINLJoinInnerMultiPattern indicates whether enable multi pattern for index join inner side + // For now it is not public to user + EnableINLJoinInnerMultiPattern bool +} + +// planReplayerSessionFinishedTaskKeyLen is used to control the max size for the finished plan replayer task key in session +// in order to control the used memory +const planReplayerSessionFinishedTaskKeyLen = 128 + +// AddPlanReplayerFinishedTaskKey record finished task key in session +func (s *SessionVars) AddPlanReplayerFinishedTaskKey(key replayer.PlanReplayerTaskKey) { + if len(s.PlanReplayerFinishedTaskKey) >= planReplayerSessionFinishedTaskKeyLen { + s.initializePlanReplayerFinishedTaskKey() + } + s.PlanReplayerFinishedTaskKey[key] = struct{}{} +} + +func (s *SessionVars) initializePlanReplayerFinishedTaskKey() { + s.PlanReplayerFinishedTaskKey = make(map[replayer.PlanReplayerTaskKey]struct{}, planReplayerSessionFinishedTaskKeyLen) +} + +// CheckPlanReplayerFinishedTaskKey check whether the key exists +func (s *SessionVars) CheckPlanReplayerFinishedTaskKey(key replayer.PlanReplayerTaskKey) bool { + if s.PlanReplayerFinishedTaskKey == nil { + s.initializePlanReplayerFinishedTaskKey() + return false + } + _, ok := s.PlanReplayerFinishedTaskKey[key] + return ok +} + +// IsPlanReplayerCaptureEnabled indicates whether capture or continues capture enabled +func (s *SessionVars) IsPlanReplayerCaptureEnabled() bool { + return s.EnablePlanReplayerCapture || s.EnablePlanReplayedContinuesCapture +} + +// GetNewChunkWithCapacity Attempt to request memory from the chunk pool +// thread safety +func (s *SessionVars) GetNewChunkWithCapacity(fields []*types.FieldType, capacity int, maxCachesize int, pool chunk.Allocator) *chunk.Chunk { + if pool == nil { + return chunk.New(fields, capacity, maxCachesize) + } + s.ChunkPool.mu.Lock() + defer s.ChunkPool.mu.Unlock() + if pool.CheckReuseAllocSize() && (!s.GetUseChunkAlloc()) { + s.StmtCtx.SetUseChunkAlloc() + } + chk := pool.Alloc(fields, capacity, maxCachesize) + return chk +} + +// ExchangeChunkStatus give the status to preUseChunkAlloc +func (s *SessionVars) ExchangeChunkStatus() { + s.preUseChunkAlloc = s.GetUseChunkAlloc() +} + +// GetUseChunkAlloc return useChunkAlloc status +func (s *SessionVars) GetUseChunkAlloc() bool { + return s.StmtCtx.GetUseChunkAllocStatus() +} + +// SetAlloc Attempt to set the buffer pool address +func (s *SessionVars) SetAlloc(alloc chunk.Allocator) { + if !s.EnableReuseCheck { + return + } + s.ChunkPool.Alloc = alloc +} + +// ClearAlloc indicates stop reuse chunk +func (s *SessionVars) ClearAlloc(alloc *chunk.Allocator, b bool) { + if !b { + s.ChunkPool.Alloc = nil + return + } + + // If an error is reported, re-apply for alloc + // Prevent the goroutine left before, affecting the execution of the next sql + // issuse 38918 + s.ChunkPool.mu.Lock() + s.ChunkPool.Alloc = nil + s.ChunkPool.mu.Unlock() + *alloc = chunk.NewAllocator() +} + +// GetPreparedStmtByName returns the prepared statement specified by stmtName. +func (s *SessionVars) GetPreparedStmtByName(stmtName string) (interface{}, error) { + stmtID, ok := s.PreparedStmtNameToID[stmtName] + if !ok { + return nil, ErrStmtNotFound + } + return s.GetPreparedStmtByID(stmtID) +} + +// GetPreparedStmtByID returns the prepared statement specified by stmtID. +func (s *SessionVars) GetPreparedStmtByID(stmtID uint32) (interface{}, error) { + stmt, ok := s.PreparedStmts[stmtID] + if !ok { + return nil, ErrStmtNotFound + } + return stmt, nil +>>>>>>> 982a6163a1 (sysvar: introduce variable tidb_enable_inl_join_inner_multi_pattern (#41319)) } // InitStatementContext initializes a StatementContext, the object is reused to reduce allocation. diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 1a1944f7abd01..d1681cc089aad 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -1584,6 +1584,261 @@ var defaultSysVars = []*SysVar{ s.EnableAnalyzeSnapshot = TiDBOptOn(val) return nil }}, +<<<<<<< HEAD +======= + {Scope: ScopeGlobal, Name: TiDBGenerateBinaryPlan, Value: BoolToOnOff(DefTiDBGenerateBinaryPlan), Type: TypeBool, SetGlobal: func(_ context.Context, s *SessionVars, val string) error { + GenerateBinaryPlan.Store(TiDBOptOn(val)) + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBDefaultStrMatchSelectivity, Value: strconv.FormatFloat(DefTiDBDefaultStrMatchSelectivity, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: 1, + SetSession: func(s *SessionVars, val string) error { + s.DefaultStrMatchSelectivity = tidbOptFloat64(val, DefTiDBDefaultStrMatchSelectivity) + return nil + }}, + {Scope: ScopeGlobal, Name: TiDBDDLEnableFastReorg, Value: BoolToOnOff(DefTiDBEnableFastReorg), Type: TypeBool, GetGlobal: func(_ context.Context, sv *SessionVars) (string, error) { + return BoolToOnOff(EnableFastReorg.Load()), nil + }, SetGlobal: func(_ context.Context, s *SessionVars, val string) error { + EnableFastReorg.Store(TiDBOptOn(val)) + return nil + }}, + // This system var is set disk quota for lightning sort dir, from 100 GB to 1PB. + {Scope: ScopeGlobal, Name: TiDBDDLDiskQuota, Value: strconv.Itoa(DefTiDBDDLDiskQuota), Type: TypeInt, MinValue: DefTiDBDDLDiskQuota, MaxValue: 1024 * 1024 * DefTiDBDDLDiskQuota / 100, GetGlobal: func(_ context.Context, sv *SessionVars) (string, error) { + return strconv.FormatUint(DDLDiskQuota.Load(), 10), nil + }, SetGlobal: func(_ context.Context, s *SessionVars, val string) error { + DDLDiskQuota.Store(TidbOptUint64(val, DefTiDBDDLDiskQuota)) + return nil + }}, + {Scope: ScopeSession, Name: TiDBConstraintCheckInPlacePessimistic, Value: BoolToOnOff(config.GetGlobalConfig().PessimisticTxn.ConstraintCheckInPlacePessimistic), Type: TypeBool, + SetSession: func(s *SessionVars, val string) error { + s.ConstraintCheckInPlacePessimistic = TiDBOptOn(val) + if !s.ConstraintCheckInPlacePessimistic { + metrics.LazyPessimisticUniqueCheckSetCount.Inc() + } + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableTiFlashReadForWriteStmt, Value: BoolToOnOff(DefTiDBEnableTiFlashReadForWriteStmt), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + s.EnableTiFlashReadForWriteStmt = TiDBOptOn(val) + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableUnsafeSubstitute, Value: BoolToOnOff(false), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + s.EnableUnsafeSubstitute = TiDBOptOn(val) + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptRangeMaxSize, Value: strconv.FormatInt(DefTiDBOptRangeMaxSize, 10), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error { + s.RangeMaxSize = TidbOptInt64(val, DefTiDBOptRangeMaxSize) + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBAnalyzePartitionConcurrency, Value: strconv.FormatInt(DefTiDBAnalyzePartitionConcurrency, 10), + MinValue: 1, MaxValue: uint64(config.GetGlobalConfig().Performance.AnalyzePartitionConcurrencyQuota), SetSession: func(s *SessionVars, val string) error { + s.AnalyzePartitionConcurrency = int(TidbOptInt64(val, DefTiDBAnalyzePartitionConcurrency)) + return nil + }}, + { + Scope: ScopeGlobal | ScopeSession, Name: TiDBMergePartitionStatsConcurrency, Value: strconv.FormatInt(DefTiDBMergePartitionStatsConcurrency, 10), Type: TypeInt, MinValue: 1, MaxValue: MaxConfigurableConcurrency, + SetSession: func(s *SessionVars, val string) error { + s.AnalyzePartitionMergeConcurrency = TidbOptInt(val, DefTiDBMergePartitionStatsConcurrency) + return nil + }, + }, + + {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptPrefixIndexSingleScan, Value: BoolToOnOff(DefTiDBOptPrefixIndexSingleScan), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + s.OptPrefixIndexSingleScan = TiDBOptOn(val) + return nil + }}, + {Scope: ScopeGlobal, Name: TiDBExternalTS, Value: strconv.FormatInt(DefTiDBExternalTS, 10), SetGlobal: func(ctx context.Context, s *SessionVars, val string) error { + ts, err := parseTSFromNumberOrTime(s, val) + if err != nil { + return err + } + return SetExternalTimestamp(ctx, ts) + }, GetGlobal: func(ctx context.Context, s *SessionVars) (string, error) { + ts, err := GetExternalTimestamp(ctx) + if err != nil { + return "", err + } + return strconv.Itoa(int(ts)), err + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableExternalTSRead, Value: BoolToOnOff(false), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + s.EnableExternalTSRead = TiDBOptOn(val) + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableReusechunk, Value: BoolToOnOff(DefTiDBEnableReusechunk), Type: TypeBool, + SetSession: func(s *SessionVars, val string) error { + s.EnableReuseCheck = TiDBOptOn(val) + return nil + }}, + {Scope: ScopeGlobal, Name: TiDBTTLJobEnable, Value: BoolToOnOff(DefTiDBTTLJobEnable), Type: TypeBool, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { + EnableTTLJob.Store(TiDBOptOn(s)) + return nil + }, GetGlobal: func(ctx context.Context, vars *SessionVars) (string, error) { + return BoolToOnOff(EnableTTLJob.Load()), nil + }}, + {Scope: ScopeGlobal, Name: TiDBTTLScanBatchSize, Value: strconv.Itoa(DefTiDBTTLScanBatchSize), Type: TypeInt, MinValue: DefTiDBTTLScanBatchMinSize, MaxValue: DefTiDBTTLScanBatchMaxSize, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + TTLScanBatchSize.Store(val) + return nil + }, GetGlobal: func(ctx context.Context, vars *SessionVars) (string, error) { + val := TTLScanBatchSize.Load() + return strconv.FormatInt(val, 10), nil + }}, + {Scope: ScopeGlobal, Name: TiDBTTLDeleteBatchSize, Value: strconv.Itoa(DefTiDBTTLDeleteBatchSize), Type: TypeInt, MinValue: DefTiDBTTLDeleteBatchMinSize, MaxValue: DefTiDBTTLDeleteBatchMaxSize, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + TTLDeleteBatchSize.Store(val) + return nil + }, GetGlobal: func(ctx context.Context, vars *SessionVars) (string, error) { + val := TTLDeleteBatchSize.Load() + return strconv.FormatInt(val, 10), nil + }}, + {Scope: ScopeGlobal, Name: TiDBTTLDeleteRateLimit, Value: strconv.Itoa(DefTiDBTTLDeleteRateLimit), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + TTLDeleteRateLimit.Store(val) + return nil + }, GetGlobal: func(ctx context.Context, vars *SessionVars) (string, error) { + val := TTLDeleteRateLimit.Load() + return strconv.FormatInt(val, 10), nil + }}, + { + Scope: ScopeGlobal | ScopeSession, Name: TiDBStoreBatchSize, Value: strconv.FormatInt(DefTiDBStoreBatchSize, 10), + Type: TypeInt, MinValue: 0, MaxValue: 25000, SetSession: func(s *SessionVars, val string) error { + s.StoreBatchSize = TidbOptInt(val, DefTiDBStoreBatchSize) + return nil + }, + }, + {Scope: ScopeGlobal | ScopeSession, Name: MppExchangeCompressionMode, Type: TypeStr, Value: DefaultExchangeCompressionMode.Name(), + Validation: func(_ *SessionVars, normalizedValue string, originalValue string, _ ScopeFlag) (string, error) { + _, ok := kv.ToExchangeCompressionMode(normalizedValue) + if !ok { + var msg string + for m := kv.ExchangeCompressionModeNONE; m <= kv.ExchangeCompressionModeUnspecified; m += 1 { + if m == 0 { + msg = m.Name() + } else { + msg = fmt.Sprintf("%s, %s", msg, m.Name()) + } + } + err := fmt.Errorf("incorrect value: `%s`. %s options: %s", + originalValue, + MppExchangeCompressionMode, msg) + return normalizedValue, err + } + return normalizedValue, nil + }, + SetSession: func(s *SessionVars, val string) error { + s.mppExchangeCompressionMode, _ = kv.ToExchangeCompressionMode(val) + if s.ChooseMppVersion() == kv.MppVersionV0 && s.mppExchangeCompressionMode != kv.ExchangeCompressionModeUnspecified { + s.StmtCtx.AppendWarning(fmt.Errorf("mpp exchange compression won't work under current mpp version %d", kv.MppVersionV0)) + } + + return nil + }, + }, + {Scope: ScopeGlobal | ScopeSession, Name: MppVersion, Type: TypeStr, Value: kv.MppVersionUnspecifiedName, + Validation: func(_ *SessionVars, normalizedValue string, originalValue string, _ ScopeFlag) (string, error) { + _, ok := kv.ToMppVersion(normalizedValue) + if ok { + return normalizedValue, nil + } + errMsg := fmt.Sprintf("incorrect value: %s. %s options: %d (unspecified)", + originalValue, MppVersion, kv.MppVersionUnspecified) + for i := kv.MppVersionV0; i <= kv.GetNewestMppVersion(); i += 1 { + errMsg = fmt.Sprintf("%s, %d", errMsg, i) + } + + return normalizedValue, errors.New(errMsg) + }, + SetSession: func(s *SessionVars, val string) error { + version, _ := kv.ToMppVersion(val) + s.mppVersion = version + return nil + }, + }, + { + Scope: ScopeGlobal, Name: TiDBTTLJobScheduleWindowStartTime, Value: DefTiDBTTLJobScheduleWindowStartTime, Type: TypeTime, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { + startTime, err := time.ParseInLocation(FullDayTimeFormat, s, time.UTC) + if err != nil { + return err + } + TTLJobScheduleWindowStartTime.Store(startTime) + return nil + }, GetGlobal: func(ctx context.Context, vars *SessionVars) (string, error) { + startTime := TTLJobScheduleWindowStartTime.Load() + return startTime.Format(FullDayTimeFormat), nil + }, + }, + { + Scope: ScopeGlobal, Name: TiDBTTLJobScheduleWindowEndTime, Value: DefTiDBTTLJobScheduleWindowEndTime, Type: TypeTime, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { + endTime, err := time.ParseInLocation(FullDayTimeFormat, s, time.UTC) + if err != nil { + return err + } + TTLJobScheduleWindowEndTime.Store(endTime) + return nil + }, GetGlobal: func(ctx context.Context, vars *SessionVars) (string, error) { + endTime := TTLJobScheduleWindowEndTime.Load() + return endTime.Format(FullDayTimeFormat), nil + }, + }, + { + Scope: ScopeGlobal, Name: TiDBTTLScanWorkerCount, Value: strconv.Itoa(DefTiDBTTLScanWorkerCount), Type: TypeUnsigned, MinValue: 1, MaxValue: 256, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + TTLScanWorkerCount.Store(int32(val)) + return nil + }, GetGlobal: func(ctx context.Context, vars *SessionVars) (string, error) { + return strconv.Itoa(int(TTLScanWorkerCount.Load())), nil + }, + }, + { + Scope: ScopeGlobal, Name: TiDBTTLDeleteWorkerCount, Value: strconv.Itoa(DefTiDBTTLDeleteWorkerCount), Type: TypeUnsigned, MinValue: 1, MaxValue: 256, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { + val, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + TTLDeleteWorkerCount.Store(int32(val)) + return nil + }, GetGlobal: func(ctx context.Context, vars *SessionVars) (string, error) { + return strconv.Itoa(int(TTLDeleteWorkerCount.Load())), nil + }, + }, + {Scope: ScopeGlobal, Name: TiDBEnableResourceControl, Value: BoolToOnOff(DefTiDBEnableResourceControl), Type: TypeBool, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { + if TiDBOptOn(s) != EnableResourceControl.Load() { + EnableResourceControl.Store(TiDBOptOn(s)) + (*SetGlobalResourceControl.Load())(TiDBOptOn(s)) + logutil.BgLogger().Info("set resource control", zap.Bool("enable", TiDBOptOn(s))) + } + return nil + }, GetGlobal: func(ctx context.Context, vars *SessionVars) (string, error) { + return BoolToOnOff(EnableResourceControl.Load()), nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBPessimisticTransactionAggressiveLocking, Value: BoolToOnOff(DefTiDBPessimisticTransactionAggressiveLocking), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + s.PessimisticTransactionAggressiveLocking = TiDBOptOn(val) + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnablePlanCacheForParamLimit, Value: BoolToOnOff(DefTiDBEnablePlanCacheForParamLimit), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + s.EnablePlanCacheForParamLimit = TiDBOptOn(val) + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableINLJoinInnerMultiPattern, Value: BoolToOnOff(false), Type: TypeBool, + SetSession: func(s *SessionVars, val string) error { + s.EnableINLJoinInnerMultiPattern = TiDBOptOn(val) + return nil + }, + GetSession: func(s *SessionVars) (string, error) { + return BoolToOnOff(s.EnableINLJoinInnerMultiPattern), nil + }, + }, +>>>>>>> 982a6163a1 (sysvar: introduce variable tidb_enable_inl_join_inner_multi_pattern (#41319)) } // FeedbackProbability points to the FeedbackProbability in statistics package. diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 647ce42218658..3b6530b566bb8 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -652,6 +652,79 @@ const ( // When set to false, ANALYZE reads the latest data. // When set to true, ANALYZE reads data on the snapshot at the beginning of ANALYZE. TiDBEnableAnalyzeSnapshot = "tidb_enable_analyze_snapshot" +<<<<<<< HEAD +======= + + // TiDBDefaultStrMatchSelectivity controls some special cardinality estimation strategy for string match functions (like and regexp). + // When set to 0, Selectivity() will try to evaluate those functions with TopN and NULL in the stats to estimate, + // and the default selectivity and the selectivity for the histogram part will be 0.1. + // When set to (0, 1], Selectivity() will use the value of this variable as the default selectivity of those + // functions instead of the selectionFactor (0.8). + TiDBDefaultStrMatchSelectivity = "tidb_default_string_match_selectivity" + + // TiDBEnablePrepPlanCache indicates whether to enable prepared plan cache + TiDBEnablePrepPlanCache = "tidb_enable_prepared_plan_cache" + // TiDBPrepPlanCacheSize indicates the number of cached statements. + TiDBPrepPlanCacheSize = "tidb_prepared_plan_cache_size" + // TiDBEnablePrepPlanCacheMemoryMonitor indicates whether to enable prepared plan cache monitor + TiDBEnablePrepPlanCacheMemoryMonitor = "tidb_enable_prepared_plan_cache_memory_monitor" + + // TiDBEnableNonPreparedPlanCache indicates whether to enable non-prepared plan cache. + TiDBEnableNonPreparedPlanCache = "tidb_enable_non_prepared_plan_cache" + // TiDBNonPreparedPlanCacheSize controls the size of non-prepared plan cache. + TiDBNonPreparedPlanCacheSize = "tidb_non_prepared_plan_cache_size" + + // TiDBConstraintCheckInPlacePessimistic controls whether to skip certain kinds of pessimistic locks. + TiDBConstraintCheckInPlacePessimistic = "tidb_constraint_check_in_place_pessimistic" + + // TiDBEnableForeignKey indicates whether to enable foreign key feature. + // TODO(crazycs520): remove this after foreign key GA. + TiDBEnableForeignKey = "tidb_enable_foreign_key" + + // TiDBOptRangeMaxSize is the max memory limit for ranges. When the optimizer estimates that the memory usage of complete + // ranges would exceed the limit, it chooses less accurate ranges such as full range. 0 indicates that there is no memory + // limit for ranges. + TiDBOptRangeMaxSize = "tidb_opt_range_max_size" + + // TiDBAnalyzePartitionConcurrency indicates concurrency for save/read partitions stats in Analyze + TiDBAnalyzePartitionConcurrency = "tidb_analyze_partition_concurrency" + // TiDBMergePartitionStatsConcurrency indicates the concurrency when merge partition stats into global stats + TiDBMergePartitionStatsConcurrency = "tidb_merge_partition_stats_concurrency" + + // TiDBOptPrefixIndexSingleScan indicates whether to do some optimizations to avoid double scan for prefix index. + // When set to true, `col is (not) null`(`col` is index prefix column) is regarded as index filter rather than table filter. + TiDBOptPrefixIndexSingleScan = "tidb_opt_prefix_index_single_scan" + + // TiDBEnableExternalTSRead indicates whether to enable read through an external ts + TiDBEnableExternalTSRead = "tidb_enable_external_ts_read" + + // TiDBEnablePlanReplayerCapture indicates whether to enable plan replayer capture + TiDBEnablePlanReplayerCapture = "tidb_enable_plan_replayer_capture" + + // TiDBEnablePlanReplayerContinuesCapture indicates whether to enable continues capture + TiDBEnablePlanReplayerContinuesCapture = "tidb_enable_plan_replayer_continues_capture" + // TiDBEnableReusechunk indicates whether to enable chunk alloc + TiDBEnableReusechunk = "tidb_enable_reuse_chunk" + + // TiDBStoreBatchSize indicates the batch size of coprocessor in the same store. + TiDBStoreBatchSize = "tidb_store_batch_size" + + // MppExchangeCompressionMode indicates the data compression method in mpp exchange operator + MppExchangeCompressionMode = "mpp_exchange_compression_mode" + + // MppVersion indicates the mpp-version used to build mpp plan + MppVersion = "mpp_version" + + // TiDBPessimisticTransactionAggressiveLocking controls whether aggressive locking for pessimistic transaction + // is enabled. + TiDBPessimisticTransactionAggressiveLocking = "tidb_pessimistic_txn_aggressive_locking" + + // TiDBEnablePlanCacheForParamLimit controls whether prepare statement with parameterized limit can be cached + TiDBEnablePlanCacheForParamLimit = "tidb_enable_plan_cache_for_param_limit" + + // TiDBEnableINLJoinInnerMultiPattern indicates whether enable multi pattern for inner side of inl join + TiDBEnableINLJoinInnerMultiPattern = "tidb_enable_inl_join_inner_multi_pattern" +>>>>>>> 982a6163a1 (sysvar: introduce variable tidb_enable_inl_join_inner_multi_pattern (#41319)) ) // TiDB vars that have only global scope