From 7b13e7cfd768adc7f38cbdb854568af1a6333295 Mon Sep 17 00:00:00 2001 From: ekexium Date: Thu, 30 Mar 2023 15:15:12 +0800 Subject: [PATCH 1/6] update client-go; format Signed-off-by: ekexium --- go.mod | 2 + go.sum | 4 +- txnkv/transaction/pessimistic.go | 128 +++++++++++++++++++++---------- 3 files changed, 90 insertions(+), 44 deletions(-) diff --git a/go.mod b/go.mod index 2012ded3e..d3de8d350 100644 --- a/go.mod +++ b/go.mod @@ -60,3 +60,5 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +replace github.com/pingcap/kvproto => github.com/ekexium/kvproto v0.0.0-20230330070143-2647c215acdb diff --git a/go.sum b/go.sum index 669bfe683..a3f26df3f 100644 --- a/go.sum +++ b/go.sum @@ -34,6 +34,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/ekexium/kvproto v0.0.0-20230330070143-2647c215acdb h1:qWyo4c9992RlLL2zmnmcANJBiphtXleWymk/3W42LVs= +github.com/ekexium/kvproto v0.0.0-20230330070143-2647c215acdb/go.mod h1:RjuuhxITxwATlt5adgTedg3ehKk01M03L1U4jNHdeeQ= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -147,8 +149,6 @@ github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c h1:CgbKAHto5CQgW github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= -github.com/pingcap/kvproto v0.0.0-20230317010544-b47a4830141f h1:P4MWntrAwXARSLRVgnJ8W2zqIhHWvOSSLK4DjNyiN4A= -github.com/pingcap/kvproto v0.0.0-20230317010544-b47a4830141f/go.mod h1:KUrW1FGoznGMMTssYBu0czfAhn6vQcIrHyZoSC6T990= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/txnkv/transaction/pessimistic.go b/txnkv/transaction/pessimistic.go index fbdda2735..2adf2f45e 100644 --- a/txnkv/transaction/pessimistic.go +++ b/txnkv/transaction/pessimistic.go @@ -100,7 +100,9 @@ type diagnosticContext struct { reqDuration time.Duration } -func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo *retry.Backoffer, batch batchMutations) error { +func (action actionPessimisticLock) handleSingleBatch( + c *twoPhaseCommitter, bo *retry.Backoffer, batch batchMutations, +) error { convertMutationsToPb := func(committerMutations CommitterMutations) []*kvrpcpb.Mutation { mutations := make([]*kvrpcpb.Mutation, committerMutations.Len()) c.txn.GetMemBuffer().RLock() @@ -120,26 +122,28 @@ func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo * m := batch.mutations mutations := convertMutationsToPb(m) - req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticLock, &kvrpcpb.PessimisticLockRequest{ - Mutations: mutations, - PrimaryLock: c.primary(), - StartVersion: c.startTS, - ForUpdateTs: c.forUpdateTS, - IsFirstLock: c.isFirstLock, - WaitTimeout: action.LockWaitTime(), - ReturnValues: action.ReturnValues, - CheckExistence: action.CheckExistence, - MinCommitTs: c.forUpdateTS + 1, - WakeUpMode: action.wakeUpMode, - LockOnlyIfExists: action.LockOnlyIfExists, - }, kvrpcpb.Context{ - Priority: c.priority, - SyncLog: c.syncLog, - ResourceGroupTag: action.LockCtx.ResourceGroupTag, - MaxExecutionDurationMs: uint64(client.MaxWriteExecutionTime.Milliseconds()), - RequestSource: c.txn.GetRequestSource(), - ResourceGroupName: c.resourceGroupName, - }) + req := tikvrpc.NewRequest( + tikvrpc.CmdPessimisticLock, &kvrpcpb.PessimisticLockRequest{ + Mutations: mutations, + PrimaryLock: c.primary(), + StartVersion: c.startTS, + ForUpdateTs: c.forUpdateTS, + IsFirstLock: c.isFirstLock, + WaitTimeout: action.LockWaitTime(), + ReturnValues: action.ReturnValues, + CheckExistence: action.CheckExistence, + MinCommitTs: c.forUpdateTS + 1, + WakeUpMode: action.wakeUpMode, + LockOnlyIfExists: action.LockOnlyIfExists, + }, kvrpcpb.Context{ + Priority: c.priority, + SyncLog: c.syncLog, + ResourceGroupTag: action.LockCtx.ResourceGroupTag, + MaxExecutionDurationMs: uint64(client.MaxWriteExecutionTime.Milliseconds()), + RequestSource: c.txn.GetRequestSource(), + ResourceGroupName: c.resourceGroupName, + }, + ) if action.LockCtx.ResourceGroupTag == nil && action.LockCtx.ResourceGroupTagger != nil { req.ResourceGroupTag = action.LockCtx.ResourceGroupTagger(req.Req.(*kvrpcpb.PessimisticLockRequest)) } @@ -168,8 +172,10 @@ func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo * for _, m := range mutations { keys = append(keys, hex.EncodeToString(m.Key)) } - logutil.BgLogger().Info("[failpoint] injected lock ttl = 1 on pessimistic lock", - zap.Uint64("txnStartTS", c.startTS), zap.Strings("keys", keys)) + logutil.BgLogger().Info( + "[failpoint] injected lock ttl = 1 on pessimistic lock", + zap.Uint64("txnStartTS", c.startTS), zap.Strings("keys", keys), + ) } req.PessimisticLock().LockTtl = ttl if _, err := util.EvalFailpoint("PessimisticLockErrWriteConflict"); err == nil { @@ -221,7 +227,9 @@ func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo * } } -func (action actionPessimisticLock) handleRegionError(c *twoPhaseCommitter, bo *retry.Backoffer, batch *batchMutations, regionErr *errorpb.Error) (finished bool, err error) { +func (action actionPessimisticLock) handleRegionError( + c *twoPhaseCommitter, bo *retry.Backoffer, batch *batchMutations, regionErr *errorpb.Error, +) (finished bool, err error) { // For other region error and the fake region error, backoff because // there's something wrong. // For the real EpochNotMatch error, don't backoff. @@ -242,7 +250,9 @@ func (action actionPessimisticLock) handleRegionError(c *twoPhaseCommitter, bo * return true, err } -func (action actionPessimisticLock) handleKeyError(c *twoPhaseCommitter, keyErrs []*kvrpcpb.KeyError) (locks []*txnlock.Lock, finished bool, err error) { +func (action actionPessimisticLock) handleKeyError( + c *twoPhaseCommitter, keyErrs []*kvrpcpb.KeyError, +) (locks []*txnlock.Lock, finished bool, err error) { for _, keyErr := range keyErrs { // Check already exists error if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil { @@ -263,7 +273,10 @@ func (action actionPessimisticLock) handleKeyError(c *twoPhaseCommitter, keyErrs return locks, false, nil } -func (action actionPessimisticLock) handlePessimisticLockResponseNormalMode(c *twoPhaseCommitter, bo *retry.Backoffer, batch *batchMutations, mutationsPb []*kvrpcpb.Mutation, resp *tikvrpc.Response, diagCtx *diagnosticContext) (finished bool, err error) { +func (action actionPessimisticLock) handlePessimisticLockResponseNormalMode( + c *twoPhaseCommitter, bo *retry.Backoffer, batch *batchMutations, mutationsPb []*kvrpcpb.Mutation, + resp *tikvrpc.Response, diagCtx *diagnosticContext, +) (finished bool, err error) { regionErr, err := resp.GetRegionError() if err != nil { return true, err @@ -283,7 +296,12 @@ func (action actionPessimisticLock) handlePessimisticLockResponseNormalMode(c *t if len(keyErrs) == 0 { if action.LockCtx.Stats != nil { - action.LockCtx.Stats.MergeReqDetails(diagCtx.reqDuration, batch.region.GetID(), diagCtx.sender.GetStoreAddr(), lockResp.ExecDetailsV2) + action.LockCtx.Stats.MergeReqDetails( + diagCtx.reqDuration, + batch.region.GetID(), + diagCtx.sender.GetStoreAddr(), + lockResp.ExecDetailsV2, + ) } if batch.isPrimary { @@ -314,6 +332,7 @@ func (action actionPessimisticLock) handlePessimisticLockResponseNormalMode(c *t } return true, nil } + locks, finished, err := action.handleKeyError(c, keyErrs) if err != nil { return finished, err @@ -360,7 +379,10 @@ func (action actionPessimisticLock) handlePessimisticLockResponseNormalMode(c *t return false, nil } -func (action actionPessimisticLock) handlePessimisticLockResponseForceLockMode(c *twoPhaseCommitter, bo *retry.Backoffer, batch *batchMutations, mutationsPb []*kvrpcpb.Mutation, resp *tikvrpc.Response, diagCtx *diagnosticContext) (finished bool, err error) { +func (action actionPessimisticLock) handlePessimisticLockResponseForceLockMode( + c *twoPhaseCommitter, bo *retry.Backoffer, batch *batchMutations, mutationsPb []*kvrpcpb.Mutation, + resp *tikvrpc.Response, diagCtx *diagnosticContext, +) (finished bool, err error) { regionErr, err := resp.GetRegionError() if err != nil { return true, err @@ -376,7 +398,9 @@ func (action actionPessimisticLock) handlePessimisticLockResponseForceLockMode(c if len(mutationsPb) > 1 || len(lockResp.Results) > 1 { panic("unreachable") } - if batch.isPrimary && len(lockResp.Results) > 0 && lockResp.Results[0].Type != kvrpcpb.PessimisticLockKeyResultType_LockResultFailed { + if batch.isPrimary && + len(lockResp.Results) > 0 && + lockResp.Results[0].Type != kvrpcpb.PessimisticLockKeyResultType_LockResultFailed { // After locking the primary key, we should protect the primary lock from expiring. c.run(c, action.LockCtx) } @@ -422,7 +446,12 @@ func (action actionPessimisticLock) handlePessimisticLockResponseForceLockMode(c if len(lockResp.Results) > 0 && !isMutationFailed { if action.LockCtx.Stats != nil { - action.LockCtx.Stats.MergeReqDetails(diagCtx.reqDuration, batch.region.GetID(), diagCtx.sender.GetStoreAddr(), lockResp.ExecDetailsV2) + action.LockCtx.Stats.MergeReqDetails( + diagCtx.reqDuration, + batch.region.GetID(), + diagCtx.sender.GetStoreAddr(), + lockResp.ExecDetailsV2, + ) } } @@ -497,16 +526,20 @@ func (action actionPessimisticLock) handlePessimisticLockResponseForceLockMode(c return true, nil } -func (actionPessimisticRollback) handleSingleBatch(c *twoPhaseCommitter, bo *retry.Backoffer, batch batchMutations) error { +func (actionPessimisticRollback) handleSingleBatch( + c *twoPhaseCommitter, bo *retry.Backoffer, batch batchMutations, +) error { forUpdateTS := c.forUpdateTS if c.maxLockedWithConflictTS > forUpdateTS { forUpdateTS = c.maxLockedWithConflictTS } - req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticRollback, &kvrpcpb.PessimisticRollbackRequest{ - StartVersion: c.startTS, - ForUpdateTs: forUpdateTS, - Keys: batch.mutations.GetKeys(), - }) + req := tikvrpc.NewRequest( + tikvrpc.CmdPessimisticRollback, &kvrpcpb.PessimisticRollbackRequest{ + StartVersion: c.startTS, + ForUpdateTs: forUpdateTS, + Keys: batch.mutations.GetKeys(), + }, + ) req.RequestSource = util.RequestSourceFromCtx(bo.GetCtx()) req.MaxExecutionDurationMs = uint64(client.MaxWriteExecutionTime.Milliseconds()) resp, err := c.store.SendReq(bo, req, batch.region, client.ReadTimeoutShort) @@ -528,7 +561,10 @@ func (actionPessimisticRollback) handleSingleBatch(c *twoPhaseCommitter, bo *ret return nil } -func (c *twoPhaseCommitter) pessimisticLockMutations(bo *retry.Backoffer, lockCtx *kv.LockCtx, lockWaitMode kvrpcpb.PessimisticLockWakeUpMode, mutations CommitterMutations) error { +func (c *twoPhaseCommitter) pessimisticLockMutations( + bo *retry.Backoffer, lockCtx *kv.LockCtx, lockWaitMode kvrpcpb.PessimisticLockWakeUpMode, + mutations CommitterMutations, +) error { if c.sessionID > 0 { if val, err := util.EvalFailpoint("beforePessimisticLock"); err == nil { // Pass multiple instructions in one string, delimited by commas, to trigger multiple behaviors, like @@ -537,19 +573,27 @@ func (c *twoPhaseCommitter) pessimisticLockMutations(bo *retry.Backoffer, lockCt for _, action := range strings.Split(v, ",") { if action == "delay" { duration := time.Duration(rand.Int63n(int64(time.Second) * 5)) - logutil.Logger(bo.GetCtx()).Info("[failpoint] injected delay at pessimistic lock", - zap.Uint64("txnStartTS", c.startTS), zap.Duration("duration", duration)) + logutil.Logger(bo.GetCtx()).Info( + "[failpoint] injected delay at pessimistic lock", + zap.Uint64("txnStartTS", c.startTS), zap.Duration("duration", duration), + ) time.Sleep(duration) } else if action == "fail" { - logutil.Logger(bo.GetCtx()).Info("[failpoint] injected failure at pessimistic lock", - zap.Uint64("txnStartTS", c.startTS)) + logutil.Logger(bo.GetCtx()).Info( + "[failpoint] injected failure at pessimistic lock", + zap.Uint64("txnStartTS", c.startTS), + ) return errors.New("injected failure at pessimistic lock") } } } } } - return c.doActionOnMutations(bo, actionPessimisticLock{LockCtx: lockCtx, wakeUpMode: lockWaitMode, isInternal: c.txn.isInternal()}, mutations) + return c.doActionOnMutations( + bo, + actionPessimisticLock{LockCtx: lockCtx, wakeUpMode: lockWaitMode, isInternal: c.txn.isInternal()}, + mutations, + ) } func (c *twoPhaseCommitter) pessimisticRollbackMutations(bo *retry.Backoffer, mutations CommitterMutations) error { From 380744c9b29313637b7d50b841d1b616406cd72f Mon Sep 17 00:00:00 2001 From: ekexium Date: Thu, 30 Mar 2023 15:22:02 +0800 Subject: [PATCH 2/6] feat: do not resolve lock if duration_to_last_updated is short Signed-off-by: ekexium --- txnkv/transaction/pessimistic.go | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/txnkv/transaction/pessimistic.go b/txnkv/transaction/pessimistic.go index 2adf2f45e..7db258112 100644 --- a/txnkv/transaction/pessimistic.go +++ b/txnkv/transaction/pessimistic.go @@ -250,7 +250,11 @@ func (action actionPessimisticLock) handleRegionError( return true, err } -func (action actionPessimisticLock) handleKeyError( +// When handling wait timeout, if the current lock is updated within the threshold, do not try to resolve lock +// The value is the same as the default timeout in TiKV. +const skipResolveThresholdMs = 1000 + +func (action actionPessimisticLock) handleKeyErrorForResolve( c *twoPhaseCommitter, keyErrs []*kvrpcpb.KeyError, ) (locks []*txnlock.Lock, finished bool, err error) { for _, keyErr := range keyErrs { @@ -263,6 +267,17 @@ func (action actionPessimisticLock) handleKeyError( return nil, true, errors.WithStack(&tikverr.ErrDeadlock{Deadlock: deadlock}) } + // Do not resolve the lock if the lock was recently updated which indicates the txn holding the lock is + // much likely alive. + // This should only happen for wait timeout. + if lockInfo := keyErr.GetLocked(); lockInfo != nil && + lockInfo.DurationToLastUpdateMs > 0 && + lockInfo.DurationToLastUpdateMs < skipResolveThresholdMs { + return nil, true, tikverr.NewErrWriteConflictWithArgs( + c.startTS, lockInfo.LockVersion, lockInfo.LockForUpdateTs, lockInfo.Key, kvrpcpb.WriteConflict_Unknown, + ) + } + // Extract lock from key error lock, err1 := txnlock.ExtractLockFromKeyErr(keyErr) if err1 != nil { @@ -270,6 +285,9 @@ func (action actionPessimisticLock) handleKeyError( } locks = append(locks, lock) } + if len(locks) == 0 { + return nil, true, nil + } return locks, false, nil } @@ -333,7 +351,7 @@ func (action actionPessimisticLock) handlePessimisticLockResponseNormalMode( return true, nil } - locks, finished, err := action.handleKeyError(c, keyErrs) + locks, finished, err := action.handleKeyErrorForResolve(c, keyErrs) if err != nil { return finished, err } @@ -455,7 +473,7 @@ func (action actionPessimisticLock) handlePessimisticLockResponseForceLockMode( } } - locks, finished, err := action.handleKeyError(c, keyErrs) + locks, finished, err := action.handleKeyErrorForResolve(c, keyErrs) if err != nil { return finished, err } From 956ce1fcf87282d8bb654b9ec2fa680e58144543 Mon Sep 17 00:00:00 2001 From: ekexium Date: Fri, 31 Mar 2023 15:02:36 +0800 Subject: [PATCH 3/6] adjust the threshold to 1200ms to allow small deviation Signed-off-by: ekexium --- txnkv/transaction/pessimistic.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/txnkv/transaction/pessimistic.go b/txnkv/transaction/pessimistic.go index 7db258112..efd9c3231 100644 --- a/txnkv/transaction/pessimistic.go +++ b/txnkv/transaction/pessimistic.go @@ -252,7 +252,7 @@ func (action actionPessimisticLock) handleRegionError( // When handling wait timeout, if the current lock is updated within the threshold, do not try to resolve lock // The value is the same as the default timeout in TiKV. -const skipResolveThresholdMs = 1000 +const skipResolveThresholdMs = 1200 func (action actionPessimisticLock) handleKeyErrorForResolve( c *twoPhaseCommitter, keyErrs []*kvrpcpb.KeyError, From 0ea96510eac633893cff0548aec7c3b7f75ff7be Mon Sep 17 00:00:00 2001 From: ekexium Date: Fri, 31 Mar 2023 15:42:44 +0800 Subject: [PATCH 4/6] fix: don't treat it as WriteConflict, simply retry Signed-off-by: ekexium --- txnkv/transaction/pessimistic.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/txnkv/transaction/pessimistic.go b/txnkv/transaction/pessimistic.go index efd9c3231..95a725cba 100644 --- a/txnkv/transaction/pessimistic.go +++ b/txnkv/transaction/pessimistic.go @@ -273,9 +273,7 @@ func (action actionPessimisticLock) handleKeyErrorForResolve( if lockInfo := keyErr.GetLocked(); lockInfo != nil && lockInfo.DurationToLastUpdateMs > 0 && lockInfo.DurationToLastUpdateMs < skipResolveThresholdMs { - return nil, true, tikverr.NewErrWriteConflictWithArgs( - c.startTS, lockInfo.LockVersion, lockInfo.LockForUpdateTs, lockInfo.Key, kvrpcpb.WriteConflict_Unknown, - ) + continue } // Extract lock from key error @@ -286,7 +284,7 @@ func (action actionPessimisticLock) handleKeyErrorForResolve( locks = append(locks, lock) } if len(locks) == 0 { - return nil, true, nil + return nil, false, nil } return locks, false, nil } @@ -355,6 +353,9 @@ func (action actionPessimisticLock) handlePessimisticLockResponseNormalMode( if err != nil { return finished, err } + if len(locks) == 0 { + return false, nil + } // Because we already waited on tikv, no need to Backoff here. // tikv default will wait 3s(also the maximum wait value) when lock error occurs @@ -524,9 +525,9 @@ func (action actionPessimisticLock) handlePessimisticLockResponseForceLockMode( return false, nil } - // If the failedMutations is not empty and the error is not KeyIsLocked, the function should have already - // returned before. So this is an unreachable path. - return true, errors.New("Pessimistic lock response corrupted") + // This can be the situation where KeyIsLocked errors are generated by timeout, + // and we decide not to resolve them. Instead, just retry + return false, nil } if len(locks) != 0 { From 03a785caead1bdddc7bc59455ed45d631f36ffbf Mon Sep 17 00:00:00 2001 From: ekexium Date: Thu, 6 Apr 2023 15:39:37 +0800 Subject: [PATCH 5/6] update kvproto Signed-off-by: ekexium --- go.mod | 4 +--- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index b8481ce91..5acc25998 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 - github.com/pingcap/kvproto v0.0.0-20230317010544-b47a4830141f + github.com/pingcap/kvproto v0.0.0-20230331024443-349815129e6d github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 @@ -60,5 +60,3 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) - -replace github.com/pingcap/kvproto => github.com/ekexium/kvproto v0.0.0-20230330070143-2647c215acdb diff --git a/go.sum b/go.sum index 15546905c..8a8360f24 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/ekexium/kvproto v0.0.0-20230330070143-2647c215acdb h1:qWyo4c9992RlLL2zmnmcANJBiphtXleWymk/3W42LVs= -github.com/ekexium/kvproto v0.0.0-20230330070143-2647c215acdb/go.mod h1:RjuuhxITxwATlt5adgTedg3ehKk01M03L1U4jNHdeeQ= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -149,6 +147,8 @@ github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c h1:CgbKAHto5CQgW github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= +github.com/pingcap/kvproto v0.0.0-20230331024443-349815129e6d h1:QR9Gk/Hi7DU399ec81cG7b3X/Umwv8FIcyx5WwD+O7M= +github.com/pingcap/kvproto v0.0.0-20230331024443-349815129e6d/go.mod h1:RjuuhxITxwATlt5adgTedg3ehKk01M03L1U4jNHdeeQ= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= From a755dfa70bd82222626a239b8babdcecad064ede Mon Sep 17 00:00:00 2001 From: ekexium Date: Wed, 12 Apr 2023 19:40:08 +0800 Subject: [PATCH 6/6] set the threshold to 300ms Signed-off-by: ekexium --- txnkv/transaction/pessimistic.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/txnkv/transaction/pessimistic.go b/txnkv/transaction/pessimistic.go index 65a400602..7f79ee0fd 100644 --- a/txnkv/transaction/pessimistic.go +++ b/txnkv/transaction/pessimistic.go @@ -251,8 +251,8 @@ func (action actionPessimisticLock) handleRegionError( } // When handling wait timeout, if the current lock is updated within the threshold, do not try to resolve lock -// The value is the same as the default timeout in TiKV. -const skipResolveThresholdMs = 1200 +// The default timeout in TiKV is 1 second. 300ms should be appropriate for common hot update workloads. +const skipResolveThresholdMs = 300 func (action actionPessimisticLock) handleKeyErrorForResolve( c *twoPhaseCommitter, keyErrs []*kvrpcpb.KeyError,