diff --git a/distsql/distsql_test.go b/distsql/distsql_test.go index 47ce2cc851d22..bb3324a291ba2 100644 --- a/distsql/distsql_test.go +++ b/distsql/distsql_test.go @@ -111,8 +111,13 @@ func TestSelectWithRuntimeStats(t *testing.T) { } func TestSelectResultRuntimeStats(t *testing.T) { +<<<<<<< HEAD t.Parallel() basic := &execdetails.BasicRuntimeStats{} +======= + stmtStats := execdetails.NewRuntimeStatsColl(nil) + basic := stmtStats.GetBasicRuntimeStats(1) +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) basic.Record(time.Second, 20) s1 := &selectResultRuntimeStats{ copRespTime: []time.Duration{time.Second, time.Millisecond}, @@ -124,8 +129,6 @@ func TestSelectResultRuntimeStats(t *testing.T) { } s2 := *s1 - stmtStats := execdetails.NewRuntimeStatsColl(nil) - stmtStats.RegisterStats(1, basic) stmtStats.RegisterStats(1, s1) stmtStats.RegisterStats(1, &s2) stats := stmtStats.GetRootStats(1) @@ -140,7 +143,11 @@ func TestSelectResultRuntimeStats(t *testing.T) { } stmtStats.RegisterStats(2, s1) stats = stmtStats.GetRootStats(2) +<<<<<<< HEAD expect = "cop_task: {num: 2, max: 1s, min: 1ms, avg: 500.5ms, p95: 1s, max_proc_keys: 200, p95_proc_keys: 200, tot_proc: 1s, tot_wait: 1s, rpc_num: 1, rpc_time: 1s, copr_cache_hit_ratio: 0.00}, backoff{RegionMiss: 1ms}" +======= + expect = "time:0s, loops:0, cop_task: {num: 2, max: 1s, min: 1ms, avg: 500.5ms, p95: 1s, max_proc_keys: 200, p95_proc_keys: 200, tot_proc: 1s, tot_wait: 1s, rpc_num: 1, rpc_time: 1s, copr_cache_hit_ratio: 0.00, distsql_concurrency: 15}, backoff{RegionMiss: 1ms}" +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) require.Equal(t, expect, stats.String()) // Test for idempotence. require.Equal(t, expect, stats.String()) diff --git a/distsql/select_result.go b/distsql/select_result.go index 3ac7f1db94a97..8a0eacf65b885 100644 --- a/distsql/select_result.go +++ b/distsql/select_result.go @@ -359,12 +359,10 @@ func (r *selectResult) updateCopRuntimeStats(ctx context.Context, copStats *copr } if r.stats == nil { - id := r.rootPlanID r.stats = &selectResultRuntimeStats{ backoffSleep: make(map[string]time.Duration), rpcStat: tikv.NewRegionRequestRuntimeStats(), } - r.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(id, r.stats) } r.stats.mergeCopRuntimeStats(copStats, respTime) @@ -455,6 +453,9 @@ func (r *selectResult) Close() error { if respSize > 0 { r.memConsume(-respSize) } + if r.stats != nil { + defer r.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(r.rootPlanID, r.stats) + } return r.resp.Close() } diff --git a/distsql/select_result_test.go b/distsql/select_result_test.go index 178d29bebeb22..a2eeebbd8240c 100644 --- a/distsql/select_result_test.go +++ b/distsql/select_result_test.go @@ -37,7 +37,7 @@ func TestUpdateCopRuntimeStats(t *testing.T) { require.Nil(t, ctx.GetSessionVars().StmtCtx.RuntimeStatsColl) sr.rootPlanID = 1234 - sr.updateCopRuntimeStats(context.Background(), &copr.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{CalleeAddress: "a"}}, 0) + sr.updateCopRuntimeStats(context.Background(), &copr.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{DetailsNeedP90: execdetails.DetailsNeedP90{CalleeAddress: "a"}}}, 0) ctx.GetSessionVars().StmtCtx.RuntimeStatsColl = execdetails.NewRuntimeStatsColl(nil) i := uint64(1) @@ -49,13 +49,13 @@ func TestUpdateCopRuntimeStats(t *testing.T) { require.NotEqual(t, len(sr.copPlanIDs), len(sr.selectResp.GetExecutionSummaries())) - sr.updateCopRuntimeStats(context.Background(), &copr.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{CalleeAddress: "callee"}}, 0) + sr.updateCopRuntimeStats(context.Background(), &copr.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{DetailsNeedP90: execdetails.DetailsNeedP90{CalleeAddress: "callee"}}}, 0) require.False(t, ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.ExistsCopStats(1234)) sr.copPlanIDs = []int{sr.rootPlanID} require.NotNil(t, ctx.GetSessionVars().StmtCtx.RuntimeStatsColl) require.Equal(t, len(sr.copPlanIDs), len(sr.selectResp.GetExecutionSummaries())) - sr.updateCopRuntimeStats(context.Background(), &copr.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{CalleeAddress: "callee"}}, 0) + sr.updateCopRuntimeStats(context.Background(), &copr.CopRuntimeStats{ExecDetails: execdetails.ExecDetails{DetailsNeedP90: execdetails.DetailsNeedP90{CalleeAddress: "callee"}}}, 0) require.Equal(t, "tikv_task:{time:1ns, loops:1}", ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.GetOrCreateCopStats(1234, "tikv").String()) } diff --git a/executor/aggregate.go b/executor/aggregate.go index 8a6b83d089e58..3b9b0b76fc25c 100644 --- a/executor/aggregate.go +++ b/executor/aggregate.go @@ -250,6 +250,9 @@ func (d *HashAggIntermData) getPartialResultBatch(sc *stmtctx.StatementContext, // Close implements the Executor Close interface. func (e *HashAggExec) Close() error { + if e.stats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } if e.isUnparallelExec { var firstErr error e.childResult = nil @@ -1102,7 +1105,6 @@ func (e *HashAggExec) initRuntimeStats() { stats.PartialStats = make([]*AggWorkerStat, 0, stats.PartialConcurrency) stats.FinalStats = make([]*AggWorkerStat, 0, stats.FinalConcurrency) e.stats = stats - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } } diff --git a/executor/batch_point_get.go b/executor/batch_point_get.go index 01efdf1097349..5040ee6be8c79 100644 --- a/executor/batch_point_get.go +++ b/executor/batch_point_get.go @@ -164,6 +164,9 @@ func (e *BatchPointGetExec) Open(context.Context) error { // Close implements the Executor interface. func (e *BatchPointGetExec) Close() error { + if e.runtimeStats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } if e.runtimeStats != nil && e.snapshot != nil { e.snapshot.SetOption(kv.CollectRuntimeStats, nil) } diff --git a/executor/builder.go b/executor/builder.go index be183f8ba99f7..6729da62f81ba 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -4390,6 +4390,19 @@ func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan b.err = err return nil } +<<<<<<< HEAD +======= + if e.ctx.GetSessionVars().IsReplicaReadClosestAdaptive() { + e.snapshot.SetOption(kv.ReplicaReadAdjuster, newReplicaReadAdjuster(e.ctx, plan.GetAvgRowSize())) + } + if e.runtimeStats != nil { + snapshotStats := &txnsnapshot.SnapshotRuntimeStats{} + e.stats = &runtimeStatsWithSnapshot{ + SnapshotRuntimeStats: snapshotStats, + } + e.snapshot.SetOption(kv.CollectRuntimeStats, snapshotStats) + } +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) startTS, err := b.getSnapshotTS() if err != nil { diff --git a/executor/distsql.go b/executor/distsql.go index bdcc7bd17c33b..259a8a31f02ef 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -669,7 +669,15 @@ func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, task *lookup // Close implements Exec Close interface. func (e *IndexLookUpExecutor) Close() error { +<<<<<<< HEAD if e.table.Meta().TempTableType != model.TempTableNone { +======= + if e.stats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } + e.kvRanges = e.kvRanges[:0] + if e.dummy { +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) return nil } @@ -820,7 +828,7 @@ func (w *indexWorker) fetchHandles(ctx context.Context, result distsql.SelectRes idxID := w.idxLookup.getIndexPlanRootID() if w.idxLookup.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl != nil { if idxID != w.idxLookup.id && w.idxLookup.stats != nil { - w.idxLookup.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(idxID, w.idxLookup.stats.indexScanBasicStats) + w.idxLookup.stats.indexScanBasicStats = w.idxLookup.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.GetBasicRuntimeStats(idxID) } } for { diff --git a/executor/executor.go b/executor/executor.go index 84419ee68c915..18da6811cf4af 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -232,8 +232,7 @@ func newBaseExecutor(ctx sessionctx.Context, schema *expression.Schema, id int, } if ctx.GetSessionVars().StmtCtx.RuntimeStatsColl != nil { if e.id > 0 { - e.runtimeStats = &execdetails.BasicRuntimeStats{} - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(id, e.runtimeStats) + e.runtimeStats = e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.GetBasicRuntimeStats(id) } } if schema != nil { diff --git a/executor/index_lookup_hash_join.go b/executor/index_lookup_hash_join.go index 9c16d1958493f..da01574428ea2 100644 --- a/executor/index_lookup_hash_join.go +++ b/executor/index_lookup_hash_join.go @@ -151,7 +151,6 @@ func (e *IndexNestedLoopHashJoin) Open(ctx context.Context) error { e.innerPtrBytes = make([][]byte, 0, 8) if e.runtimeStats != nil { e.stats = &indexLookUpJoinRuntimeStats{} - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } e.finished.Store(false) e.startWorkers(ctx) @@ -304,6 +303,9 @@ func (e *IndexNestedLoopHashJoin) isDryUpTasks(ctx context.Context) bool { // Close implements the IndexNestedLoopHashJoin Executor interface. func (e *IndexNestedLoopHashJoin) Close() error { + if e.stats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } if e.cancelFunc != nil { e.cancelFunc() e.cancelFunc = nil diff --git a/executor/index_lookup_join.go b/executor/index_lookup_join.go index b06de10a06ce5..432f26be89788 100644 --- a/executor/index_lookup_join.go +++ b/executor/index_lookup_join.go @@ -190,7 +190,6 @@ func (e *IndexLookUpJoin) Open(ctx context.Context) error { e.finished.Store(false) if e.runtimeStats != nil { e.stats = &indexLookUpJoinRuntimeStats{} - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } e.startWorkers(ctx) return nil @@ -779,6 +778,9 @@ func (iw *innerWorker) hasNullInJoinKey(row chunk.Row) bool { // Close implements the Executor interface. func (e *IndexLookUpJoin) Close() error { + if e.stats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } if e.cancelFunc != nil { e.cancelFunc() } diff --git a/executor/index_lookup_merge_join.go b/executor/index_lookup_merge_join.go index 746fc6a5733fc..a93fdb1e85861 100644 --- a/executor/index_lookup_merge_join.go +++ b/executor/index_lookup_merge_join.go @@ -736,6 +736,9 @@ func (imw *innerMergeWorker) fetchNextInnerResult(ctx context.Context, task *loo // Close implements the Executor interface. func (e *IndexLookUpMergeJoin) Close() error { + if e.runtimeStats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.runtimeStats) + } if e.cancelFunc != nil { e.cancelFunc() e.cancelFunc = nil diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index 60828bd514ac4..534ae18d226d6 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -398,7 +398,6 @@ func (e *IndexMergeReaderExecutor) initRuntimeStats() { e.stats = &IndexMergeRuntimeStat{ Concurrency: e.ctx.GetSessionVars().IndexLookupConcurrency(), } - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } } @@ -624,6 +623,9 @@ func (e *IndexMergeReaderExecutor) handleHandlesFetcherPanic(ctx context.Context // Close implements Exec Close interface. func (e *IndexMergeReaderExecutor) Close() error { + if e.stats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } if e.finished == nil { return nil } @@ -741,8 +743,7 @@ func (w *partialIndexWorker) fetchHandles( var basicStats *execdetails.BasicRuntimeStats if w.stats != nil { if w.idxID != 0 { - basicStats = &execdetails.BasicRuntimeStats{} - w.sc.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(w.idxID, basicStats) + basicStats = w.sc.GetSessionVars().StmtCtx.RuntimeStatsColl.GetBasicRuntimeStats(w.idxID) } } for { diff --git a/executor/insert.go b/executor/insert.go index 8e689a40625b6..32b6f126e6381 100644 --- a/executor/insert.go +++ b/executor/insert.go @@ -315,6 +315,13 @@ func (e *InsertExec) Next(ctx context.Context, req *chunk.Chunk) error { // Close implements the Executor Close interface. func (e *InsertExec) Close() error { +<<<<<<< HEAD +======= + if e.runtimeStats != nil && e.stats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } + defer e.memTracker.ReplaceBytesUsed(0) +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) e.ctx.GetSessionVars().CurrInsertValues = chunk.Row{} e.ctx.GetSessionVars().CurrInsertBatchExtraCols = e.ctx.GetSessionVars().CurrInsertBatchExtraCols[0:0:0] e.setMessage() diff --git a/executor/insert_common.go b/executor/insert_common.go index 9c7adbb3c4d1a..86fad208201dc 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -1031,7 +1031,6 @@ func (e *InsertValues) collectRuntimeStatsEnabled() bool { SnapshotRuntimeStats: snapshotStats, AllocatorRuntimeStats: autoid.NewAllocatorRuntimeStats(), } - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } return true } diff --git a/executor/join.go b/executor/join.go index 26a7e5f706fc2..c753fcc091a5f 100644 --- a/executor/join.go +++ b/executor/join.go @@ -152,6 +152,9 @@ func (e *HashJoinExec) Close() error { if e.stats != nil && e.rowContainer != nil { e.stats.hashStat = *e.rowContainer.stat } + if e.stats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } err := e.baseExecutor.Close() return err } @@ -183,7 +186,6 @@ func (e *HashJoinExec) Open(ctx context.Context) error { e.stats = &hashJoinRuntimeStats{ concurrent: cap(e.joiners), } - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } return nil } @@ -841,7 +843,6 @@ func (e *NestedLoopApplyExec) Close() error { e.memTracker = nil if e.runtimeStats != nil { runtimeStats := newJoinRuntimeStats() - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats) if e.canUseCache { var hitRatio float64 if e.cacheAccessCounter > 0 { @@ -851,6 +852,11 @@ func (e *NestedLoopApplyExec) Close() error { } else { runtimeStats.setCacheInfo(false, 0) } +<<<<<<< HEAD +======= + runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("Concurrency", 0)) + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats) +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) } return e.outerExec.Close() } @@ -1108,6 +1114,17 @@ func (e *joinRuntimeStats) Tp() int { return execdetails.TpJoinRuntimeStats } +func (e *joinRuntimeStats) Clone() execdetails.RuntimeStats { + newJRS := &joinRuntimeStats{ + RuntimeStatsWithConcurrencyInfo: e.RuntimeStatsWithConcurrencyInfo, + applyCache: e.applyCache, + cache: e.cache, + hasHashStat: e.hasHashStat, + hashStat: e.hashStat, + } + return newJRS +} + type hashJoinRuntimeStats struct { fetchAndBuildHashTable time.Duration hashStat hashStatistic diff --git a/executor/load_data.go b/executor/load_data.go index 7d124c0cacf3d..b91c376e74cb0 100644 --- a/executor/load_data.go +++ b/executor/load_data.go @@ -83,6 +83,9 @@ func (e *LoadDataExec) Next(ctx context.Context, req *chunk.Chunk) error { // Close implements the Executor Close interface. func (e *LoadDataExec) Close() error { + if e.runtimeStats != nil && e.loadDataInfo != nil && e.loadDataInfo.stats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.loadDataInfo.stats) + } return nil } diff --git a/executor/parallel_apply.go b/executor/parallel_apply.go index d0aa68af87d2b..0a01b9b4af3a8 100644 --- a/executor/parallel_apply.go +++ b/executor/parallel_apply.go @@ -176,7 +176,6 @@ func (e *ParallelNestedLoopApplyExec) Close() error { if e.runtimeStats != nil { runtimeStats := newJoinRuntimeStats() - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats) if e.useCache { var hitRatio float64 if e.cacheAccessCounter > 0 { @@ -187,6 +186,7 @@ func (e *ParallelNestedLoopApplyExec) Close() error { runtimeStats.setCacheInfo(false, 0) } runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("Concurrency", e.concurrency)) + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats) } return err } diff --git a/executor/point_get.go b/executor/point_get.go index 0d1de79d0d1cd..97d084322f879 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -60,7 +60,51 @@ func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { } e.base().initCap = 1 e.base().maxChunkSize = 1 +<<<<<<< HEAD e.Init(p, startTS) +======= + e.Init(p) + + e.snapshot, err = b.getSnapshot() + if err != nil { + b.err = err + return nil + } + if b.ctx.GetSessionVars().IsReplicaReadClosestAdaptive() { + e.snapshot.SetOption(kv.ReplicaReadAdjuster, newReplicaReadAdjuster(e.ctx, p.GetAvgRowSize())) + } + if e.runtimeStats != nil { + snapshotStats := &txnsnapshot.SnapshotRuntimeStats{} + e.stats = &runtimeStatsWithSnapshot{ + SnapshotRuntimeStats: snapshotStats, + } + e.snapshot.SetOption(kv.CollectRuntimeStats, snapshotStats) + } + + if p.IndexInfo != nil { + sctx := b.ctx.GetSessionVars().StmtCtx + sctx.IndexNames = append(sctx.IndexNames, p.TblInfo.Name.O+":"+p.IndexInfo.Name.O) + } + + failpoint.Inject("assertPointReplicaOption", func(val failpoint.Value) { + assertScope := val.(string) + if e.ctx.GetSessionVars().GetReplicaRead().IsClosestRead() && assertScope != e.readReplicaScope { + panic("point get replica option fail") + } + }) + + snapshotTS, err := b.getSnapshotTS() + if err != nil { + b.err = err + return nil + } + if p.TblInfo.TableCacheStatusType == model.TableCacheStatusEnable { + if cacheTable := b.getCacheTable(p.TblInfo, snapshotTS); cacheTable != nil { + e.snapshot = cacheTableSnapshot{e.snapshot, cacheTable} + } + } + +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) if e.lock { b.hasLock = true } @@ -188,6 +232,9 @@ func (e *PointGetExecutor) Open(context.Context) error { // Close implements the Executor interface. func (e *PointGetExecutor) Close() error { + if e.stats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } if e.runtimeStats != nil && e.snapshot != nil { e.snapshot.SetOption(kv.CollectRuntimeStats, nil) } diff --git a/executor/replace.go b/executor/replace.go index cd7a3d44a7bb4..3ccc7bd82f079 100644 --- a/executor/replace.go +++ b/executor/replace.go @@ -43,6 +43,9 @@ type ReplaceExec struct { // Close implements the Executor Close interface. func (e *ReplaceExec) Close() error { e.setMessage() + if e.runtimeStats != nil && e.stats != nil { + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + } if e.SelectExec != nil { return e.SelectExec.Close() } diff --git a/executor/update.go b/executor/update.go index e51d11be62dcb..fc5b464e8dead 100644 --- a/executor/update.go +++ b/executor/update.go @@ -423,6 +423,7 @@ func (e *UpdateExec) Close() error { if err == nil && txn.Valid() && txn.GetSnapshot() != nil { txn.GetSnapshot().SetOption(kv.CollectRuntimeStats, nil) } + defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } return e.children[0].Close() } @@ -452,7 +453,6 @@ func (e *UpdateExec) collectRuntimeStatsEnabled() bool { SnapshotRuntimeStats: &txnsnapshot.SnapshotRuntimeStats{}, AllocatorRuntimeStats: autoid.NewAllocatorRuntimeStats(), } - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) } return true } diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index 85ea1bfa6b373..dac64603cb8c7 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -118,7 +118,7 @@ type StatementContext struct { warnings []SQLWarn errorCount uint16 execDetails execdetails.ExecDetails - allExecDetails []*execdetails.ExecDetails + allExecDetails []*execdetails.DetailsNeedP90 } // PrevAffectedRows is the affected-rows value(DDL is 0, DML is the number of affected rows). PrevAffectedRows int64 @@ -567,7 +567,7 @@ func (sc *StatementContext) resetMuForRetry() { sc.mu.errorCount = 0 sc.mu.warnings = nil sc.mu.execDetails = execdetails.ExecDetails{} - sc.mu.allExecDetails = make([]*execdetails.ExecDetails, 0, 4) + sc.mu.allExecDetails = make([]*execdetails.DetailsNeedP90, 0, 4) } // ResetForRetry resets the changed states during execution. @@ -591,7 +591,13 @@ func (sc *StatementContext) MergeExecDetails(details *execdetails.ExecDetails, c sc.mu.execDetails.RequestCount++ sc.MergeScanDetail(details.ScanDetail) sc.MergeTimeDetail(details.TimeDetail) - sc.mu.allExecDetails = append(sc.mu.allExecDetails, details) + sc.mu.allExecDetails = append(sc.mu.allExecDetails, + &execdetails.DetailsNeedP90{ + BackoffSleep: details.BackoffSleep, + BackoffTimes: details.BackoffTimes, + CalleeAddress: details.CalleeAddress, + TimeDetail: details.TimeDetail, + }) } if commitDetails != nil { if sc.mu.execDetails.CommitDetail == nil { @@ -707,15 +713,25 @@ func (sc *StatementContext) CopTasksDetails() *CopTasksDetails { d.AvgProcessTime = sc.mu.execDetails.TimeDetail.ProcessTime / time.Duration(n) d.AvgWaitTime = sc.mu.execDetails.TimeDetail.WaitTime / time.Duration(n) +<<<<<<< HEAD sort.Slice(sc.mu.allExecDetails, func(i, j int) bool { return sc.mu.allExecDetails[i].TimeDetail.ProcessTime < sc.mu.allExecDetails[j].TimeDetail.ProcessTime +======= + slices.SortFunc(sc.mu.allExecDetails, func(i, j *execdetails.DetailsNeedP90) bool { + return i.TimeDetail.ProcessTime < j.TimeDetail.ProcessTime +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) }) d.P90ProcessTime = sc.mu.allExecDetails[n*9/10].TimeDetail.ProcessTime d.MaxProcessTime = sc.mu.allExecDetails[n-1].TimeDetail.ProcessTime d.MaxProcessAddress = sc.mu.allExecDetails[n-1].CalleeAddress +<<<<<<< HEAD sort.Slice(sc.mu.allExecDetails, func(i, j int) bool { return sc.mu.allExecDetails[i].TimeDetail.WaitTime < sc.mu.allExecDetails[j].TimeDetail.WaitTime +======= + slices.SortFunc(sc.mu.allExecDetails, func(i, j *execdetails.DetailsNeedP90) bool { + return i.TimeDetail.WaitTime < j.TimeDetail.WaitTime +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) }) d.P90WaitTime = sc.mu.allExecDetails[n*9/10].TimeDetail.WaitTime d.MaxWaitTime = sc.mu.allExecDetails[n-1].TimeDetail.WaitTime diff --git a/sessionctx/stmtctx/stmtctx_test.go b/sessionctx/stmtctx/stmtctx_test.go index f5bf2cca866be..f3e4b8af3ebcb 100644 --- a/sessionctx/stmtctx/stmtctx_test.go +++ b/sessionctx/stmtctx/stmtctx_test.go @@ -30,12 +30,14 @@ func TestCopTasksDetails(t *testing.T) { backoffs := []string{"tikvRPC", "pdRPC", "regionMiss"} for i := 0; i < 100; i++ { d := &execdetails.ExecDetails{ - CalleeAddress: fmt.Sprintf("%v", i+1), - BackoffSleep: make(map[string]time.Duration), - BackoffTimes: make(map[string]int), - TimeDetail: util.TimeDetail{ - ProcessTime: time.Second * time.Duration(i+1), - WaitTime: time.Millisecond * time.Duration(i+1), + DetailsNeedP90: execdetails.DetailsNeedP90{ + CalleeAddress: fmt.Sprintf("%v", i+1), + BackoffSleep: make(map[string]time.Duration), + BackoffTimes: make(map[string]int), + TimeDetail: util.TimeDetail{ + ProcessTime: time.Second * time.Duration(i+1), + WaitTime: time.Millisecond * time.Duration(i+1), + }, }, } for _, backoff := range backoffs { diff --git a/sessionctx/variable/session_test.go b/sessionctx/variable/session_test.go index efbaa8906b214..7a1b4108ed27f 100644 --- a/sessionctx/variable/session_test.go +++ b/sessionctx/variable/session_test.go @@ -164,9 +164,11 @@ func TestSlowLogFormat(t *testing.T) { ProcessedKeys: 20001, TotalKeys: 10000, }, - TimeDetail: util.TimeDetail{ - ProcessTime: time.Second * time.Duration(2), - WaitTime: time.Minute, + DetailsNeedP90: execdetails.DetailsNeedP90{ + TimeDetail: util.TimeDetail{ + ProcessTime: time.Second * time.Duration(2), + WaitTime: time.Minute, + }, }, } statsInfos := make(map[string]uint64) diff --git a/util/execdetails/execdetails.go b/util/execdetails/execdetails.go index 4265145c2d66a..9e1bb47d89afc 100644 --- a/util/execdetails/execdetails.go +++ b/util/execdetails/execdetails.go @@ -32,6 +32,7 @@ import ( // ExecDetails contains execution detail information. type ExecDetails struct { +<<<<<<< HEAD CalleeAddress string CopTime time.Duration BackoffTime time.Duration @@ -43,6 +44,24 @@ type ExecDetails struct { LockKeysDetail *util.LockKeysDetails ScanDetail *util.ScanDetail TimeDetail util.TimeDetail +======= + DetailsNeedP90 + CommitDetail *util.CommitDetails + LockKeysDetail *util.LockKeysDetails + ScanDetail *util.ScanDetail + CopTime time.Duration + BackoffTime time.Duration + LockKeysDuration time.Duration + RequestCount int +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) +} + +// DetailsNeedP90 contains execution detail information which need calculate P90. +type DetailsNeedP90 struct { + BackoffSleep map[string]time.Duration + BackoffTimes map[string]int + CalleeAddress string + TimeDetail util.TimeDetail } type stmtExecDetailKeyType struct{} @@ -270,8 +289,14 @@ func (d ExecDetails) ToZapFields() (fields []zap.Field) { type basicCopRuntimeStats struct { BasicRuntimeStats +<<<<<<< HEAD threads int32 storeType string +======= + threads int32 + totalTasks int32 + procTimes []time.Duration +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) } // String implements the RuntimeStats interface. @@ -288,6 +313,8 @@ func (e *basicCopRuntimeStats) Clone() RuntimeStats { BasicRuntimeStats: BasicRuntimeStats{loop: e.loop, consume: e.consume, rows: e.rows}, threads: e.threads, storeType: e.storeType, + totalTasks: e.totalTasks, + procTimes: e.procTimes, } } @@ -301,6 +328,13 @@ func (e *basicCopRuntimeStats) Merge(rs RuntimeStats) { e.consume += tmp.consume e.rows += tmp.rows e.threads += tmp.threads + e.totalTasks += tmp.totalTasks + if len(tmp.procTimes) > 0 { + e.procTimes = append(e.procTimes, tmp.procTimes...) + } else { + e.procTimes = append(e.procTimes, time.Duration(tmp.consume)) + } + e.tiflashScanContext.Merge(tmp.tiflashScanContext) } // Tp implements the RuntimeStats interface. @@ -317,7 +351,7 @@ type CopRuntimeStats struct { // have many region leaders, several coprocessor tasks can be sent to the // same tikv-server instance. We have to use a list to maintain all tasks // executed on each instance. - stats map[string][]*basicCopRuntimeStats + stats map[string]*basicCopRuntimeStats scanDetail *util.ScanDetail // do not use kv.StoreType because it will meet cycle import error storeType string @@ -327,20 +361,42 @@ type CopRuntimeStats struct { func (crs *CopRuntimeStats) RecordOneCopTask(address string, summary *tipb.ExecutorExecutionSummary) { crs.Lock() defer crs.Unlock() +<<<<<<< HEAD crs.stats[address] = append(crs.stats[address], &basicCopRuntimeStats{BasicRuntimeStats: BasicRuntimeStats{loop: int32(*summary.NumIterations), consume: int64(*summary.TimeProcessedNs), rows: int64(*summary.NumProducedRows)}, threads: int32(summary.GetConcurrency()), storeType: crs.storeType}) +======= + + if crs.stats[address] == nil { + crs.stats[address] = &basicCopRuntimeStats{ + storeType: crs.storeType, + } + } + crs.stats[address].Merge(&basicCopRuntimeStats{ + storeType: crs.storeType, + BasicRuntimeStats: BasicRuntimeStats{loop: int32(*summary.NumIterations), + consume: int64(*summary.TimeProcessedNs), + rows: int64(*summary.NumProducedRows), + tiflashScanContext: TiFlashScanContext{ + totalDmfileScannedPacks: summary.GetTiflashScanContext().GetTotalDmfileScannedPacks(), + totalDmfileSkippedPacks: summary.GetTiflashScanContext().GetTotalDmfileSkippedPacks(), + totalDmfileScannedRows: summary.GetTiflashScanContext().GetTotalDmfileScannedRows(), + totalDmfileSkippedRows: summary.GetTiflashScanContext().GetTotalDmfileSkippedRows(), + totalDmfileRoughSetIndexLoadTimeMs: summary.GetTiflashScanContext().GetTotalDmfileRoughSetIndexLoadTimeMs(), + totalDmfileReadTimeMs: summary.GetTiflashScanContext().GetTotalDmfileReadTimeMs(), + totalCreateSnapshotTimeMs: summary.GetTiflashScanContext().GetTotalCreateSnapshotTimeMs()}}, threads: int32(summary.GetConcurrency()), + totalTasks: 1, + }) +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) } // GetActRows return total rows of CopRuntimeStats. func (crs *CopRuntimeStats) GetActRows() (totalRows int64) { for _, instanceStats := range crs.stats { - for _, stat := range instanceStats { - totalRows += stat.rows - } + totalRows += instanceStats.rows } return totalRows } @@ -355,12 +411,21 @@ func (crs *CopRuntimeStats) String() string { var totalThreads int32 procTimes := make([]time.Duration, 0, 32) for _, instanceStats := range crs.stats { +<<<<<<< HEAD for _, stat := range instanceStats { procTimes = append(procTimes, time.Duration(stat.consume)*time.Nanosecond) totalIters += stat.loop totalThreads += stat.threads totalTasks++ } +======= + procTimes = append(procTimes, instanceStats.procTimes...) + totalTime += time.Duration(instanceStats.consume) + totalLoops += instanceStats.loop + totalThreads += instanceStats.threads + totalTiFlashScanContext.Merge(instanceStats.tiflashScanContext) + totalTasks += instanceStats.totalTasks +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) } isTiFlashCop := crs.storeType == "tiflash" @@ -479,10 +544,11 @@ func (e *BasicRuntimeStats) Tp() int { // RootRuntimeStats is the executor runtime stats that combine with multiple runtime stats. type RootRuntimeStats struct { - basics []*BasicRuntimeStats - groupRss [][]RuntimeStats + basic *BasicRuntimeStats + groupRss []RuntimeStats } +<<<<<<< HEAD // GetActRows return total rows of RootRuntimeStats. func (e *RootRuntimeStats) GetActRows() int64 { num := int64(0) @@ -490,6 +556,21 @@ func (e *RootRuntimeStats) GetActRows() int64 { num += basic.GetActRows() } return num +======= +// NewRootRuntimeStats returns a new RootRuntimeStats +func NewRootRuntimeStats() *RootRuntimeStats { + return &RootRuntimeStats{basic: &BasicRuntimeStats{}} +} + +// GetActRows return total rows of RootRuntimeStats. +func (e *RootRuntimeStats) GetActRows() int64 { + return e.basic.rows +} + +// MergeStats merges stats in the RootRuntimeStats and return the stats suitable for display directly. +func (e *RootRuntimeStats) MergeStats() (basic *BasicRuntimeStats, groups []RuntimeStats) { + return e.basic, e.groupRss +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) } // String implements the RuntimeStats interface. @@ -587,38 +668,46 @@ func (e *RuntimeStatsColl) RegisterStats(planID int, info RuntimeStats) { e.mu.Lock() stats, ok := e.rootStats[planID] if !ok { - stats = &RootRuntimeStats{} + stats = NewRootRuntimeStats() e.rootStats[planID] = stats } - if basic, ok := info.(*BasicRuntimeStats); ok { - stats.basics = append(stats.basics, basic) - } else { - tp := info.Tp() - found := false - for i, rss := range stats.groupRss { - if len(rss) == 0 { - continue - } - if rss[0].Tp() == tp { - stats.groupRss[i] = append(stats.groupRss[i], info) - found = true - break - } - } - if !found { - stats.groupRss = append(stats.groupRss, []RuntimeStats{info}) + tp := info.Tp() + found := false + for _, rss := range stats.groupRss { + if rss.Tp() == tp { + rss.Merge(info) + found = true + break } } + if !found { + stats.groupRss = append(stats.groupRss, info.Clone()) + } e.mu.Unlock() } +// GetBasicRuntimeStats gets basicRuntimeStats for a executor. +func (e *RuntimeStatsColl) GetBasicRuntimeStats(planID int) *BasicRuntimeStats { + e.mu.Lock() + defer e.mu.Unlock() + stats, ok := e.rootStats[planID] + if !ok { + stats = NewRootRuntimeStats() + e.rootStats[planID] = stats + } + if stats.basic == nil { + stats.basic = &BasicRuntimeStats{} + } + return stats.basic +} + // GetRootStats gets execStat for a executor. func (e *RuntimeStatsColl) GetRootStats(planID int) *RootRuntimeStats { e.mu.Lock() defer e.mu.Unlock() runtimeStats, exists := e.rootStats[planID] if !exists { - runtimeStats = &RootRuntimeStats{} + runtimeStats = NewRootRuntimeStats() e.rootStats[planID] = runtimeStats } return runtimeStats @@ -642,7 +731,7 @@ func (e *RuntimeStatsColl) GetOrCreateCopStats(planID int, storeType string) *Co copStats, ok := e.copStats[planID] if !ok { copStats = &CopRuntimeStats{ - stats: make(map[string][]*basicCopRuntimeStats), + stats: make(map[string]*basicCopRuntimeStats), scanDetail: &util.ScanDetail{}, storeType: storeType, } diff --git a/util/execdetails/execdetails_test.go b/util/execdetails/execdetails_test.go index 468f7dac1f54c..5a361d9d4a9de 100644 --- a/util/execdetails/execdetails_test.go +++ b/util/execdetails/execdetails_test.go @@ -63,10 +63,10 @@ func TestString(t *testing.T) { RocksdbBlockReadCount: 1, RocksdbBlockReadByte: 100, }, - TimeDetail: util.TimeDetail{ + DetailsNeedP90: DetailsNeedP90{TimeDetail: util.TimeDetail{ ProcessTime: 2*time.Second + 5*time.Millisecond, WaitTime: time.Second, - }, + }}, } expected := "Cop_time: 1.003 Process_time: 2.005 Wait_time: 1 Backoff_time: 1 Request_count: 1 Prewrite_time: 1 Commit_time: 1 " + "Get_commit_ts_time: 1 Commit_backoff_time: 1 Backoff_types: [backoff1 backoff2] Resolve_lock_time: 1 Local_latch_wait_time: 1 Write_keys: 1 Write_size: 1 Prewrite_region: 1 Txn_retry: 1 " + @@ -117,10 +117,19 @@ func TestCopRuntimeStats(t *testing.T) { copStats := cop.stats["8.8.8.8"] require.NotNil(t, copStats) +<<<<<<< HEAD copStats[0].SetRowNum(10) copStats[0].Record(time.Second, 10) require.Equal(t, "time:1s, loops:2", copStats[0].String()) require.Equal(t, "tikv_task:{proc max:4ns, min:3ns, p80:4ns, p95:4ns, iters:7, tasks:2}", stats.GetOrCreateCopStats(aggID, "tikv").String()) +======= + newCopStats := &basicCopRuntimeStats{} + newCopStats.SetRowNum(10) + newCopStats.Record(time.Second, 10) + copStats.Merge(newCopStats) + require.Equal(t, "time:1s, loops:2", copStats.String()) + require.Equal(t, "tikv_task:{proc max:4ns, min:3ns, avg: 3ns, p80:4ns, p95:4ns, iters:7, tasks:2}", stats.GetOrCreateCopStats(aggID, "tikv").String()) +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) rootStats := stats.GetRootStats(tableReaderID) require.NotNil(t, rootStats) @@ -131,8 +140,13 @@ func TestCopRuntimeStats(t *testing.T) { cop.scanDetail.RocksdbKeySkippedCount = 0 cop.scanDetail.RocksdbBlockReadCount = 0 // Print all fields even though the value of some fields is 0. +<<<<<<< HEAD str := "tikv_task:{proc max:1s, min:2ns, p80:1s, p95:1s, iters:4, tasks:2}, " + "scan_detail: {total_process_keys: 0, total_process_keys_size: 0, total_keys: 15, rocksdb: {delete_skipped_count: 5, key_skipped_count: 0, block: {cache_hit_count: 10, read_count: 0, read_byte: 100 Bytes}}}" +======= + str := "tikv_task:{proc max:1s, min:1ns, avg: 500ms, p80:1s, p95:1s, iters:4, tasks:2}, " + + "scan_detail: {total_keys: 15, rocksdb: {delete_skipped_count: 5, block: {cache_hit_count: 10, read_byte: 100 Bytes}}}" +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) require.Equal(t, str, cop.String()) zeroScanDetail := util.ScanDetail{} @@ -167,10 +181,17 @@ func TestCopRuntimeStatsForTiFlash(t *testing.T) { copStats := cop.stats["8.8.8.8"] require.NotNil(t, copStats) +<<<<<<< HEAD copStats[0].SetRowNum(10) copStats[0].Record(time.Second, 10) require.Equal(t, "time:1s, loops:2, threads:1", copStats[0].String()) expected := "tiflash_task:{proc max:4ns, min:3ns, p80:4ns, p95:4ns, iters:7, tasks:2, threads:2}" +======= + copStats.SetRowNum(10) + copStats.Record(time.Second, 10) + require.Equal(t, "time:1s, loops:2, threads:1, tiflash_scan:{dmfile:{total_scanned_packs:1, total_skipped_packs:0, total_scanned_rows:8192, total_skipped_rows:0, total_rough_set_index_load_time: 15ms, total_read_time: 200ms}, total_create_snapshot_time: 40ms}", copStats.String()) + expected := "tiflash_task:{proc max:4ns, min:3ns, avg: 3ns, p80:4ns, p95:4ns, iters:7, tasks:2, threads:2}, tiflash_scan:{dmfile:{total_scanned_packs:3, total_skipped_packs:11, total_scanned_rows:20192, total_skipped_rows:86000, total_rough_set_index_load_time: 100ms, total_read_time: 3000ms}, total_create_snapshot_time: 50ms}" +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) require.Equal(t, expected, stats.GetOrCreateCopStats(aggID, "tiflash").String()) rootStats := stats.GetRootStats(tableReaderID) @@ -230,18 +251,22 @@ func TestRuntimeStatsWithCommit(t *testing.T) { } func TestRootRuntimeStats(t *testing.T) { +<<<<<<< HEAD t.Parallel() basic1 := &BasicRuntimeStats{} basic2 := &BasicRuntimeStats{} basic1.Record(time.Second, 20) basic2.Record(time.Second*2, 30) +======= +>>>>>>> 23543a4805 (*: merge the runtime stats in time to avoid using too many memory (#39394)) pid := 1 stmtStats := NewRuntimeStatsColl(nil) - stmtStats.RegisterStats(pid, basic1) - stmtStats.RegisterStats(pid, basic2) + basic1 := stmtStats.GetBasicRuntimeStats(pid) + basic2 := stmtStats.GetBasicRuntimeStats(pid) + basic1.Record(time.Second, 20) + basic2.Record(time.Second*2, 30) concurrency := &RuntimeStatsWithConcurrencyInfo{} concurrency.SetConcurrencyInfo(NewConcurrencyInfo("worker", 15)) - stmtStats.RegisterStats(pid, concurrency) commitDetail := &util.CommitDetails{ GetCommitTsTime: time.Second, PrewriteTime: time.Second, @@ -251,6 +276,7 @@ func TestRootRuntimeStats(t *testing.T) { PrewriteRegionNum: 5, TxnRetry: 2, } + stmtStats.RegisterStats(pid, concurrency) stmtStats.RegisterStats(pid, &RuntimeStatsWithCommit{ Commit: commitDetail, }) diff --git a/util/stmtsummary/statement_summary_test.go b/util/stmtsummary/statement_summary_test.go index 000a7e0a9c087..df475253ca905 100644 --- a/util/stmtsummary/statement_summary_test.go +++ b/util/stmtsummary/statement_summary_test.go @@ -178,9 +178,8 @@ func TestAddStatement(t *testing.T) { MaxWaitTime: 2500, }, ExecDetail: &execdetails.ExecDetails{ - CalleeAddress: "202", - BackoffTime: 180, - RequestCount: 20, + BackoffTime: 180, + RequestCount: 20, CommitDetail: &util.CommitDetails{ GetCommitTsTime: 500, PrewriteTime: 50000, @@ -209,9 +208,11 @@ func TestAddStatement(t *testing.T) { RocksdbBlockReadCount: 10, RocksdbBlockReadByte: 1000, }, - TimeDetail: util.TimeDetail{ - ProcessTime: 1500, - WaitTime: 150, + DetailsNeedP90: execdetails.DetailsNeedP90{ + TimeDetail: util.TimeDetail{ + ProcessTime: 1500, + WaitTime: 150, + }, CalleeAddress: "202", }, }, StmtCtx: &stmtctx.StatementContext{ @@ -308,9 +309,8 @@ func TestAddStatement(t *testing.T) { MaxWaitTime: 250, }, ExecDetail: &execdetails.ExecDetails{ - CalleeAddress: "302", - BackoffTime: 18, - RequestCount: 2, + BackoffTime: 18, + RequestCount: 2, CommitDetail: &util.CommitDetails{ GetCommitTsTime: 50, PrewriteTime: 5000, @@ -339,9 +339,12 @@ func TestAddStatement(t *testing.T) { RocksdbBlockReadCount: 10, RocksdbBlockReadByte: 1000, }, - TimeDetail: util.TimeDetail{ - ProcessTime: 150, - WaitTime: 15, + DetailsNeedP90: execdetails.DetailsNeedP90{ + TimeDetail: util.TimeDetail{ + ProcessTime: 150, + WaitTime: 15, + }, + CalleeAddress: "302", }, }, StmtCtx: &stmtctx.StatementContext{ @@ -592,9 +595,8 @@ func generateAnyExecInfo() *StmtExecInfo { MaxWaitTime: 1500, }, ExecDetail: &execdetails.ExecDetails{ - CalleeAddress: "129", - BackoffTime: 80, - RequestCount: 10, + BackoffTime: 80, + RequestCount: 10, CommitDetail: &util.CommitDetails{ GetCommitTsTime: 100, PrewriteTime: 10000, @@ -623,9 +625,12 @@ func generateAnyExecInfo() *StmtExecInfo { RocksdbBlockReadCount: 10, RocksdbBlockReadByte: 1000, }, - TimeDetail: util.TimeDetail{ - ProcessTime: 500, - WaitTime: 50, + DetailsNeedP90: execdetails.DetailsNeedP90{ + TimeDetail: util.TimeDetail{ + ProcessTime: 500, + WaitTime: 50, + }, + CalleeAddress: "129", }, }, StmtCtx: &stmtctx.StatementContext{