From 0a43d2a28aa1a2fd0a4504192981a2a7c58652f0 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Wed, 22 Jan 2020 14:28:16 +0800 Subject: [PATCH 01/46] restore: merge tidb-tools/pkg/restore-util (#146) * restore-util: Implement split/scatter (#274) * implement split/scatter Signed-off-by: 5kbpers * init test Signed-off-by: 5kbpers * redesign output/input of the lib Signed-off-by: 5kbpers * update dependency Signed-off-by: 5kbpers * add commments and more tests Signed-off-by: 5kbpers * add ScanRegions interface to Client Signed-off-by: 5kbpers * fix potential data race Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * Apply suggestions from code review Co-Authored-By: kennytm * Update pkg/restore-util/client.go Co-Authored-By: kennytm * address comments Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * update dependency Signed-off-by: 5kbpers * resolve conflicts Signed-off-by: 5kbpers * fix prefix rewrite Signed-off-by: 5kbpers * add RewriteRule/skip failed scatter region/retry the SplitRegion Signed-off-by: 5kbpers * fix test Signed-off-by: 5kbpers * check if region has peer Signed-off-by: 5kbpers * more logs Signed-off-by: 5kbpers * restore-util: add split retry interval (#277) * reset dependencies to release-3.1 * add split retry interval Signed-off-by: 5kbpers * fix go.sum Signed-off-by: 5kbpers * restore-util: wait for scatter region sequentially (#279) * wait for scatter region sequentially Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * restore-util: add on split hook (#281) * restore-util: add on split hook Signed-off-by: Neil Shen * Nil check onSplit Co-Authored-By: kennytm * restore-util: fix returned new region is nil (#283) * restore-util: fix returned new region is nil Signed-off-by: 5kbpers * more logs Signed-off-by: 5kbpers * *: gofmt Signed-off-by: 5kbpers * Apply suggestions from code review Co-Authored-By: kennytm * fix log Signed-off-by: 5kbpers * restore-util: call onSplit on splitByRewriteRules (#285) Signed-off-by: Neil Shen * restore-util: fix overlapped error message (#293) * restore-util: fix overlapped error message Signed-off-by: 5kbpers * fix log message Signed-off-by: 5kbpers * reduce error trace Signed-off-by: 5kbpers * fix test Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * restore-util: log warning when cannot find matched rewrite rule (#299) * restore-util: add method to set placement rules and store labels (#301) * restore-util: add method to set placement rules and store labels Signed-off-by: disksing * minor fix Signed-off-by: disksing * address comment Signed-off-by: disksing * add GetPlacementRules Signed-off-by: disksing * fix test Signed-off-by: disksing * restore-util: support batch split (#300) * restore-util: support batch split Signed-off-by: 5kbpers * go fmt Signed-off-by: 5kbpers * Apply suggestions from code review Co-Authored-By: kennytm * address commits Signed-off-by: 5kbpers * Update pkg/restore-util/split.go Co-Authored-By: kennytm * add onSplit callback Signed-off-by: 5kbpers * fix test Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * restore-util: add upper bound time for waiting for scatter (#305) * restore: fix scatter regions failed Signed-off-by: 5kbpers * add log Signed-off-by: 5kbpers * stop waiting for scatter after 3min Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * restore-util: fix wrong url (#306) Signed-off-by: disksing * restore-util: add warning about unmatched table id (#313) * restore-util: support table partition Signed-off-by: 5kbpers * fix log Signed-off-by: 5kbpers * warn table id does not match Signed-off-by: 5kbpers * add unit tests Signed-off-by: 5kbpers * Apply suggestions from code review Co-Authored-By: Neil Shen * fix compile error Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * fix test Signed-off-by: 5kbpers Co-authored-by: Ian Co-authored-by: Neil Shen * *: prune tidb-tools Signed-off-by: Neil Shen * restore: address linters suggestions Signed-off-by: Neil Shen * restore: merge restoreutil into restore Signed-off-by: Neil Shen * address comment Signed-off-by: Neil Shen Co-authored-by: 5kbpers <20279863+5kbpers@users.noreply.github.com> Co-authored-by: kennytm Co-authored-by: disksing Co-authored-by: Ian --- .golangci.yml | 7 +- cmd/validate.go | 9 +- go.mod | 1 - go.sum | 3 +- pkg/restore/client.go | 13 +- pkg/restore/import.go | 33 ++-- pkg/restore/range.go | 148 +++++++++++++++ pkg/restore/range_test.go | 75 ++++++++ pkg/restore/split.go | 305 +++++++++++++++++++++++++++++++ pkg/restore/split_client.go | 353 ++++++++++++++++++++++++++++++++++++ pkg/restore/split_test.go | 301 ++++++++++++++++++++++++++++++ pkg/restore/util.go | 29 ++- pkg/restore/util_test.go | 3 +- 13 files changed, 1228 insertions(+), 52 deletions(-) create mode 100644 pkg/restore/range.go create mode 100644 pkg/restore/range_test.go create mode 100644 pkg/restore/split.go create mode 100644 pkg/restore/split_client.go create mode 100644 pkg/restore/split_test.go diff --git a/.golangci.yml b/.golangci.yml index 969cac759..1b025678e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,7 +9,8 @@ issues: text: "Potential HTTP request made with variable url" linters: - gosec - - path: .go - text: "Use of weak random number generator" + # TODO Remove it. + - path: split_client.go + text: "SA1019:" linters: - - gosec + - staticcheck diff --git a/cmd/validate.go b/cmd/validate.go index dd1e11fb0..8ba72b372 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -15,7 +15,6 @@ import ( "github.com/pingcap/log" "github.com/pingcap/parser/model" "github.com/pingcap/pd/pkg/mock/mockid" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" "github.com/spf13/cobra" "go.uber.org/zap" @@ -187,15 +186,15 @@ func newBackupMetaCommand() *cobra.Command { tables = append(tables, db.Tables...) } // Check if the ranges of files overlapped - rangeTree := restore_util.NewRangeTree() + rangeTree := restore.NewRangeTree() for _, file := range files { - if out := rangeTree.InsertRange(restore_util.Range{ + if out := rangeTree.InsertRange(restore.Range{ StartKey: file.GetStartKey(), EndKey: file.GetEndKey(), }); out != nil { log.Error( "file ranges overlapped", - zap.Stringer("out", out.(*restore_util.Range)), + zap.Stringer("out", out.(*restore.Range)), zap.Stringer("file", file), ) } @@ -206,7 +205,7 @@ func newBackupMetaCommand() *cobra.Command { for offset := uint64(0); offset < tableIDOffset; offset++ { _, _ = tableIDAllocator.Alloc() // Ignore error } - rewriteRules := &restore_util.RewriteRules{ + rewriteRules := &restore.RewriteRules{ Table: make([]*import_sstpb.RewriteRule, 0), Data: make([]*import_sstpb.RewriteRule, 0), } diff --git a/go.mod b/go.mod index 8e50bbf35..9951c2922 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,6 @@ require ( github.com/pingcap/parser v0.0.0-20191210060830-bdf23a7ade01 github.com/pingcap/pd v1.1.0-beta.0.20191212045800-234784c7a9c5 github.com/pingcap/tidb v1.1.0-beta.0.20191213040028-9009da737834 - github.com/pingcap/tidb-tools v3.1.0-beta.0.20191223064326-e9c7a23a8dcb+incompatible github.com/pingcap/tipb v0.0.0-20191209145133-44f75c9bef33 github.com/prometheus/client_golang v1.0.0 github.com/sirupsen/logrus v1.4.2 diff --git a/go.sum b/go.sum index 696ccee81..085e00355 100644 --- a/go.sum +++ b/go.sum @@ -283,9 +283,8 @@ github.com/pingcap/sysutil v0.0.0-20191126040022-986c5b3ed9a3 h1:HCNif3lukL83gNC github.com/pingcap/sysutil v0.0.0-20191126040022-986c5b3ed9a3/go.mod h1:Futrrmuw98pEsbEmoPsjw8aKLCmixwHEmT2rF+AsXGw= github.com/pingcap/tidb v1.1.0-beta.0.20191213040028-9009da737834 h1:eNf7bDY39moIzzcs5+PhLLW0BM2D2yrzFbjW/X42y0s= github.com/pingcap/tidb v1.1.0-beta.0.20191213040028-9009da737834/go.mod h1:VWx47QOXISBHHtZeWrDQlBOdbvth9TE9gei6QpoqJ4g= +github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible h1:H1jg0aDWz2SLRh3hNBo2HFtnuHtudIUvBumU7syRkic= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tidb-tools v3.1.0-beta.0.20191223064326-e9c7a23a8dcb+incompatible h1:GxWxXVqA2aAZIgS+bEpasJkkspu9Jom1/oB2NmP7t/o= -github.com/pingcap/tidb-tools v3.1.0-beta.0.20191223064326-e9c7a23a8dcb+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20191209145133-44f75c9bef33 h1:cTSaVv1hue17BCPqt+sURADTFSMpSD26ZuvKRyYIjJs= github.com/pingcap/tipb v0.0.0-20191209145133-44f75c9bef33/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 9714edc2a..3030ba857 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -13,7 +13,6 @@ import ( "github.com/pingcap/log" "github.com/pingcap/parser/model" pd "github.com/pingcap/pd/client" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/tikv/oracle" @@ -108,7 +107,7 @@ func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup. rc.databases = databases rc.backupMeta = backupMeta - metaClient := restore_util.NewClient(rc.pdClient) + metaClient := NewSplitClient(rc.pdClient) importClient := NewImportClient(metaClient) rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, rc.rateLimit) return nil @@ -189,8 +188,8 @@ func (rc *Client) CreateTables( dom *domain.Domain, tables []*utils.Table, newTS uint64, -) (*restore_util.RewriteRules, []*model.TableInfo, error) { - rewriteRules := &restore_util.RewriteRules{ +) (*RewriteRules, []*model.TableInfo, error) { + rewriteRules := &RewriteRules{ Table: make([]*import_sstpb.RewriteRule, 0), Data: make([]*import_sstpb.RewriteRule, 0), } @@ -232,7 +231,7 @@ func (rc *Client) setSpeedLimit() error { // RestoreTable tries to restore the data of a table. func (rc *Client) RestoreTable( table *utils.Table, - rewriteRules *restore_util.RewriteRules, + rewriteRules *RewriteRules, updateCh chan<- struct{}, ) (err error) { start := time.Now() @@ -300,7 +299,7 @@ func (rc *Client) RestoreTable( // RestoreDatabase tries to restore the data of a database func (rc *Client) RestoreDatabase( db *utils.Database, - rewriteRules *restore_util.RewriteRules, + rewriteRules *RewriteRules, updateCh chan<- struct{}, ) (err error) { start := time.Now() @@ -336,7 +335,7 @@ func (rc *Client) RestoreDatabase( // RestoreAll tries to restore all the data of backup files. func (rc *Client) RestoreAll( - rewriteRules *restore_util.RewriteRules, + rewriteRules *RewriteRules, updateCh chan<- struct{}, ) (err error) { start := time.Now() diff --git a/pkg/restore/import.go b/pkg/restore/import.go index fc09b7b16..77273ebab 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -12,7 +12,6 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/log" "github.com/pingcap/pd/pkg/codec" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" "go.uber.org/zap" "google.golang.org/grpc" @@ -60,12 +59,12 @@ type ImporterClient interface { type importClient struct { mu sync.Mutex - metaClient restore_util.Client + metaClient SplitClient clients map[uint64]import_sstpb.ImportSSTClient } // NewImportClient returns a new ImporterClient -func NewImportClient(metaClient restore_util.Client) ImporterClient { +func NewImportClient(metaClient SplitClient) ImporterClient { return &importClient{ metaClient: metaClient, clients: make(map[uint64]import_sstpb.ImportSSTClient), @@ -133,7 +132,7 @@ func (ic *importClient) getImportClient( // FileImporter used to import a file to TiKV. type FileImporter struct { - metaClient restore_util.Client + metaClient SplitClient importClient ImporterClient backend *backup.StorageBackend rateLimit uint64 @@ -145,7 +144,7 @@ type FileImporter struct { // NewFileImporter returns a new file importClient. func NewFileImporter( ctx context.Context, - metaClient restore_util.Client, + metaClient SplitClient, importClient ImporterClient, backend *backup.StorageBackend, rateLimit uint64, @@ -163,7 +162,7 @@ func NewFileImporter( // Import tries to import a file. // All rules must contain encoded keys. -func (importer *FileImporter) Import(file *backup.File, rewriteRules *restore_util.RewriteRules) error { +func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRules) error { log.Debug("import file", zap.Stringer("file", file)) // Rewrite the start key and end key of file to scan regions startKey, endKey, err := rewriteFileKeys(file, rewriteRules) @@ -179,9 +178,9 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *restore_ut ctx, cancel := context.WithTimeout(importer.ctx, importScanResgionTime) defer cancel() // Scan regions covered by the file range - regionInfos, err := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) - if err != nil { - return errors.Trace(err) + regionInfos, err1 := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) + if err1 != nil { + return errors.Trace(err1) } log.Debug("scan regions", zap.Stringer("file", file), zap.Int("count", len(regionInfos))) // Try to download and ingest the file in every region @@ -190,20 +189,20 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *restore_ut info := regionInfo // Try to download file. err = withRetry(func() error { - var err error + var err2 error var isEmpty bool - downloadMeta, isEmpty, err = importer.downloadSST(info, file, rewriteRules) - if err != nil { + downloadMeta, isEmpty, err2 = importer.downloadSST(info, file, rewriteRules) + if err2 != nil { if err != errRewriteRuleNotFound { log.Warn("download file failed", zap.Stringer("file", file), zap.Stringer("region", info.Region), zap.Binary("startKey", startKey), zap.Binary("endKey", endKey), - zap.Error(err), + zap.Error(err2), ) } - return err + return err2 } if isEmpty { log.Info( @@ -255,9 +254,9 @@ func (importer *FileImporter) setDownloadSpeedLimit(storeID uint64) error { } func (importer *FileImporter) downloadSST( - regionInfo *restore_util.RegionInfo, + regionInfo *RegionInfo, file *backup.File, - rewriteRules *restore_util.RewriteRules, + rewriteRules *RewriteRules, ) (*import_sstpb.SSTMeta, bool, error) { id, err := uuid.New().MarshalBinary() if err != nil { @@ -312,7 +311,7 @@ func (importer *FileImporter) downloadSST( func (importer *FileImporter) ingestSST( sstMeta *import_sstpb.SSTMeta, - regionInfo *restore_util.RegionInfo, + regionInfo *RegionInfo, ) error { leader := regionInfo.Leader if leader == nil { diff --git a/pkg/restore/range.go b/pkg/restore/range.go new file mode 100644 index 000000000..f3914539e --- /dev/null +++ b/pkg/restore/range.go @@ -0,0 +1,148 @@ +package restore + +import ( + "bytes" + "fmt" + + "github.com/google/btree" + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/tablecodec" + "go.uber.org/zap" +) + +// Range represents a range of keys. +type Range struct { + StartKey []byte + EndKey []byte +} + +// String formats a range to a string +func (r *Range) String() string { + return fmt.Sprintf("[%x %x]", r.StartKey, r.EndKey) +} + +// Less compares a range with a btree.Item +func (r *Range) Less(than btree.Item) bool { + t := than.(*Range) + return len(r.EndKey) != 0 && bytes.Compare(r.EndKey, t.StartKey) <= 0 +} + +// contains returns if a key is included in the range. +func (r *Range) contains(key []byte) bool { + start, end := r.StartKey, r.EndKey + return bytes.Compare(key, start) >= 0 && + (len(end) == 0 || bytes.Compare(key, end) < 0) +} + +// sortRanges checks if the range overlapped and sort them +func sortRanges(ranges []Range, rewriteRules *RewriteRules) ([]Range, error) { + rangeTree := NewRangeTree() + for _, rg := range ranges { + if rewriteRules != nil { + startID := tablecodec.DecodeTableID(rg.StartKey) + endID := tablecodec.DecodeTableID(rg.EndKey) + var rule *import_sstpb.RewriteRule + if startID == endID { + rg.StartKey, rule = replacePrefix(rg.StartKey, rewriteRules) + if rule == nil { + log.Warn("cannot find rewrite rule", zap.Binary("key", rg.StartKey)) + } else { + log.Debug( + "rewrite start key", + zap.Binary("key", rg.StartKey), + zap.Stringer("rule", rule)) + } + rg.EndKey, rule = replacePrefix(rg.EndKey, rewriteRules) + if rule == nil { + log.Warn("cannot find rewrite rule", zap.Binary("key", rg.EndKey)) + } else { + log.Debug( + "rewrite end key", + zap.Binary("key", rg.EndKey), + zap.Stringer("rule", rule)) + } + } else { + log.Warn("table id does not match", + zap.Binary("startKey", rg.StartKey), + zap.Binary("endKey", rg.EndKey), + zap.Int64("startID", startID), + zap.Int64("endID", endID)) + return nil, errors.New("table id does not match") + } + } + if out := rangeTree.InsertRange(rg); out != nil { + return nil, errors.Errorf("ranges overlapped: %s, %s", out, rg) + } + } + sortedRanges := make([]Range, 0, len(ranges)) + rangeTree.Ascend(func(rg *Range) bool { + if rg == nil { + return false + } + sortedRanges = append(sortedRanges, *rg) + return true + }) + return sortedRanges, nil +} + +// RangeTree stores the ranges in an orderly manner. +// All the ranges it stored do not overlap. +type RangeTree struct { + tree *btree.BTree +} + +// NewRangeTree returns a new RangeTree. +func NewRangeTree() *RangeTree { + return &RangeTree{tree: btree.New(32)} +} + +// Find returns nil or a range in the range tree +func (rt *RangeTree) Find(key []byte) *Range { + var ret *Range + r := &Range{ + StartKey: key, + } + rt.tree.DescendLessOrEqual(r, func(i btree.Item) bool { + ret = i.(*Range) + return false + }) + if ret == nil || !ret.contains(key) { + return nil + } + return ret +} + +// InsertRange inserts ranges into the range tree. +// it returns true if all ranges inserted successfully. +// it returns false if there are some overlapped ranges. +func (rt *RangeTree) InsertRange(rg Range) btree.Item { + return rt.tree.ReplaceOrInsert(&rg) +} + +// RangeIterator allows callers of Ascend to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend function will immediately return. +type RangeIterator func(rg *Range) bool + +// Ascend calls the iterator for every value in the tree within [first, last], +// until the iterator returns false. +func (rt *RangeTree) Ascend(iterator RangeIterator) { + rt.tree.Ascend(func(i btree.Item) bool { + return iterator(i.(*Range)) + }) +} + +// RegionInfo includes a region and the leader of the region. +type RegionInfo struct { + Region *metapb.Region + Leader *metapb.Peer +} + +// RewriteRules contains rules for rewriting keys of tables. +type RewriteRules struct { + Table []*import_sstpb.RewriteRule + Data []*import_sstpb.RewriteRule +} diff --git a/pkg/restore/range_test.go b/pkg/restore/range_test.go new file mode 100644 index 000000000..a9edc5b82 --- /dev/null +++ b/pkg/restore/range_test.go @@ -0,0 +1,75 @@ +package restore + +import ( + "bytes" + + . "github.com/pingcap/check" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/tidb/tablecodec" +) + +type testRangeSuite struct{} + +var _ = Suite(&testRangeSuite{}) + +type rangeEquals struct { + *CheckerInfo +} + +var RangeEquals Checker = &rangeEquals{ + &CheckerInfo{Name: "RangeEquals", Params: []string{"obtained", "expected"}}, +} + +func (checker *rangeEquals) Check(params []interface{}, names []string) (result bool, error string) { + obtained := params[0].([]Range) + expected := params[1].([]Range) + if len(obtained) != len(expected) { + return false, "" + } + for i := range obtained { + if !bytes.Equal(obtained[i].StartKey, expected[i].StartKey) || + !bytes.Equal(obtained[i].EndKey, expected[i].EndKey) { + return false, "" + } + } + return true, "" +} + +func (s *testRangeSuite) TestSortRange(c *C) { + dataRules := []*import_sstpb.RewriteRule{ + {OldKeyPrefix: tablecodec.GenTableRecordPrefix(1), NewKeyPrefix: tablecodec.GenTableRecordPrefix(4)}, + {OldKeyPrefix: tablecodec.GenTableRecordPrefix(2), NewKeyPrefix: tablecodec.GenTableRecordPrefix(5)}, + } + rewriteRules := &RewriteRules{ + Table: make([]*import_sstpb.RewriteRule, 0), + Data: dataRules, + } + ranges1 := []Range{ + {append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), + append(tablecodec.GenTableRecordPrefix(1), []byte("bbb")...)}, + } + rs1, err := sortRanges(ranges1, rewriteRules) + c.Assert(err, IsNil, Commentf("sort range1 failed: %v", err)) + c.Assert(rs1, RangeEquals, []Range{ + {append(tablecodec.GenTableRecordPrefix(4), []byte("aaa")...), + append(tablecodec.GenTableRecordPrefix(4), []byte("bbb")...)}, + }) + + ranges2 := []Range{ + {append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), + append(tablecodec.GenTableRecordPrefix(2), []byte("bbb")...)}, + } + _, err = sortRanges(ranges2, rewriteRules) + c.Assert(err, ErrorMatches, ".*table id does not match.*") + + ranges3 := initRanges() + rewriteRules1 := initRewriteRules() + rs3, err := sortRanges(ranges3, rewriteRules1) + c.Assert(err, IsNil, Commentf("sort range1 failed: %v", err)) + c.Assert(rs3, RangeEquals, []Range{ + {[]byte("bbd"), []byte("bbf")}, + {[]byte("bbf"), []byte("bbj")}, + {[]byte("xxa"), []byte("xxe")}, + {[]byte("xxe"), []byte("xxz")}, + }) +} diff --git a/pkg/restore/split.go b/pkg/restore/split.go new file mode 100644 index 000000000..31b23a60f --- /dev/null +++ b/pkg/restore/split.go @@ -0,0 +1,305 @@ +package restore + +import ( + "bytes" + "context" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/util/codec" + "go.uber.org/zap" +) + +// Constants for split retry machinery. +const ( + SplitRetryTimes = 32 + SplitRetryInterval = 50 * time.Millisecond + SplitMaxRetryInterval = time.Second + + SplitCheckMaxRetryTimes = 64 + SplitCheckInterval = 8 * time.Millisecond + SplitMaxCheckInterval = time.Second + + ScatterWaitMaxRetryTimes = 64 + ScatterWaitInterval = 50 * time.Millisecond + ScatterMaxWaitInterval = time.Second + + ScatterWaitUpperInterval = 180 * time.Second +) + +// RegionSplitter is a executor of region split by rules. +type RegionSplitter struct { + client SplitClient +} + +// NewRegionSplitter returns a new RegionSplitter. +func NewRegionSplitter(client SplitClient) *RegionSplitter { + return &RegionSplitter{ + client: client, + } +} + +// OnSplitFunc is called before split a range. +type OnSplitFunc func(key [][]byte) + +// Split executes a region split. It will split regions by the rewrite rules, +// then it will split regions by the end key of each range. +// tableRules includes the prefix of a table, since some ranges may have +// a prefix with record sequence or index sequence. +// note: all ranges and rewrite rules must have raw key. +func (rs *RegionSplitter) Split( + ctx context.Context, + ranges []Range, + rewriteRules *RewriteRules, + onSplit OnSplitFunc, +) error { + if len(ranges) == 0 { + return nil + } + startTime := time.Now() + // Sort the range for getting the min and max key of the ranges + sortedRanges, err := sortRanges(ranges, rewriteRules) + if err != nil { + return errors.Trace(err) + } + minKey := codec.EncodeBytes([]byte{}, sortedRanges[0].StartKey) + maxKey := codec.EncodeBytes([]byte{}, sortedRanges[len(sortedRanges)-1].EndKey) + for _, rule := range rewriteRules.Table { + if bytes.Compare(minKey, rule.GetNewKeyPrefix()) > 0 { + minKey = rule.GetNewKeyPrefix() + } + if bytes.Compare(maxKey, rule.GetNewKeyPrefix()) < 0 { + maxKey = rule.GetNewKeyPrefix() + } + } + for _, rule := range rewriteRules.Data { + if bytes.Compare(minKey, rule.GetNewKeyPrefix()) > 0 { + minKey = rule.GetNewKeyPrefix() + } + if bytes.Compare(maxKey, rule.GetNewKeyPrefix()) < 0 { + maxKey = rule.GetNewKeyPrefix() + } + } + interval := SplitRetryInterval + scatterRegions := make([]*RegionInfo, 0) +SplitRegions: + for i := 0; i < SplitRetryTimes; i++ { + var regions []*RegionInfo + regions, err = rs.client.ScanRegions(ctx, minKey, maxKey, 0) + if err != nil { + return errors.Trace(err) + } + if len(regions) == 0 { + log.Warn("cannot scan any region") + return nil + } + splitKeyMap := getSplitKeys(rewriteRules, sortedRanges, regions) + regionMap := make(map[uint64]*RegionInfo) + for _, region := range regions { + regionMap[region.Region.GetId()] = region + } + for regionID, keys := range splitKeyMap { + var newRegions []*RegionInfo + newRegions, err = rs.splitAndScatterRegions(ctx, regionMap[regionID], keys) + if err != nil { + interval = 2 * interval + if interval > SplitMaxRetryInterval { + interval = SplitMaxRetryInterval + } + time.Sleep(interval) + if i > 3 { + log.Warn("splitting regions failed, retry it", zap.Error(err)) + } + continue SplitRegions + } + scatterRegions = append(scatterRegions, newRegions...) + onSplit(keys) + } + break + } + if err != nil { + return errors.Trace(err) + } + log.Info("splitting regions done, wait for scattering regions", + zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime))) + startTime = time.Now() + scatterCount := 0 + for _, region := range scatterRegions { + rs.waitForScatterRegion(ctx, region) + if time.Since(startTime) > ScatterWaitUpperInterval { + break + } + scatterCount++ + } + if scatterCount == len(scatterRegions) { + log.Info("waiting for scattering regions done", + zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime))) + } else { + log.Warn("waiting for scattering regions timeout", + zap.Int("scatterCount", scatterCount), + zap.Int("regions", len(scatterRegions)), + zap.Duration("take", time.Since(startTime))) + } + return nil +} + +func (rs *RegionSplitter) hasRegion(ctx context.Context, regionID uint64) (bool, error) { + regionInfo, err := rs.client.GetRegionByID(ctx, regionID) + if err != nil { + return false, err + } + return regionInfo != nil, nil +} + +func (rs *RegionSplitter) isScatterRegionFinished(ctx context.Context, regionID uint64) (bool, error) { + resp, err := rs.client.GetOperator(ctx, regionID) + if err != nil { + return false, err + } + // Heartbeat may not be sent to PD + if respErr := resp.GetHeader().GetError(); respErr != nil { + if respErr.GetType() == pdpb.ErrorType_REGION_NOT_FOUND { + return true, nil + } + return false, errors.Errorf("get operator error: %s", respErr.GetType()) + } + retryTimes := ctx.Value(retryTimes).(int) + if retryTimes > 3 { + log.Warn("get operator", zap.Uint64("regionID", regionID), zap.Stringer("resp", resp)) + } + // If the current operator of the region is not 'scatter-region', we could assume + // that 'scatter-operator' has finished or timeout + ok := string(resp.GetDesc()) != "scatter-region" || resp.GetStatus() != pdpb.OperatorStatus_RUNNING + return ok, nil +} + +func (rs *RegionSplitter) waitForSplit(ctx context.Context, regionID uint64) { + interval := SplitCheckInterval + for i := 0; i < SplitCheckMaxRetryTimes; i++ { + ok, err := rs.hasRegion(ctx, regionID) + if err != nil { + log.Warn("wait for split failed", zap.Error(err)) + return + } + if ok { + break + } + interval = 2 * interval + if interval > SplitMaxCheckInterval { + interval = SplitMaxCheckInterval + } + time.Sleep(interval) + } +} + +type retryTimeKey struct{} + +var retryTimes = new(retryTimeKey) + +func (rs *RegionSplitter) waitForScatterRegion(ctx context.Context, regionInfo *RegionInfo) { + interval := ScatterWaitInterval + regionID := regionInfo.Region.GetId() + for i := 0; i < ScatterWaitMaxRetryTimes; i++ { + ctx1 := context.WithValue(ctx, retryTimes, i) + ok, err := rs.isScatterRegionFinished(ctx1, regionID) + if err != nil { + log.Warn("scatter region failed: do not have the region", + zap.Stringer("region", regionInfo.Region)) + return + } + if ok { + break + } + interval = 2 * interval + if interval > ScatterMaxWaitInterval { + interval = ScatterMaxWaitInterval + } + time.Sleep(interval) + } +} + +func (rs *RegionSplitter) splitAndScatterRegions( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) ([]*RegionInfo, error) { + newRegions, err := rs.client.BatchSplitRegions(ctx, regionInfo, keys) + if err != nil { + return nil, err + } + for _, region := range newRegions { + // Wait for a while until the regions successfully splits. + rs.waitForSplit(ctx, region.Region.Id) + if err = rs.client.ScatterRegion(ctx, region); err != nil { + log.Warn("scatter region failed", zap.Stringer("region", region.Region), zap.Error(err)) + } + } + return newRegions, nil +} + +// getSplitKeys checks if the regions should be split by the new prefix of the rewrites rule and the end key of +// the ranges, groups the split keys by region id +func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionInfo) map[uint64][][]byte { + splitKeyMap := make(map[uint64][][]byte) + checkKeys := make([][]byte, 0) + for _, rule := range rewriteRules.Table { + checkKeys = append(checkKeys, rule.GetNewKeyPrefix()) + } + for _, rule := range rewriteRules.Data { + checkKeys = append(checkKeys, rule.GetNewKeyPrefix()) + } + for _, rg := range ranges { + checkKeys = append(checkKeys, rg.EndKey) + } + for _, key := range checkKeys { + if region := needSplit(key, regions); region != nil { + splitKeys, ok := splitKeyMap[region.Region.GetId()] + if !ok { + splitKeys = make([][]byte, 0, 1) + } + splitKeyMap[region.Region.GetId()] = append(splitKeys, key) + } + } + return splitKeyMap +} + +// needSplit checks whether a key is necessary to split, if true returns the split region +func needSplit(splitKey []byte, regions []*RegionInfo) *RegionInfo { + // If splitKey is the max key. + if len(splitKey) == 0 { + return nil + } + splitKey = codec.EncodeBytes([]byte{}, splitKey) + for _, region := range regions { + // If splitKey is the boundary of the region + if bytes.Equal(splitKey, region.Region.GetStartKey()) { + return nil + } + // If splitKey is in a region + if bytes.Compare(splitKey, region.Region.GetStartKey()) > 0 && beforeEnd(splitKey, region.Region.GetEndKey()) { + return region + } + } + return nil +} + +func beforeEnd(key []byte, end []byte) bool { + return bytes.Compare(key, end) < 0 || len(end) == 0 +} + +func replacePrefix(s []byte, rewriteRules *RewriteRules) ([]byte, *import_sstpb.RewriteRule) { + // We should search the dataRules firstly. + for _, rule := range rewriteRules.Data { + if bytes.HasPrefix(s, rule.GetOldKeyPrefix()) { + return append(append([]byte{}, rule.GetNewKeyPrefix()...), s[len(rule.GetOldKeyPrefix()):]...), rule + } + } + for _, rule := range rewriteRules.Table { + if bytes.HasPrefix(s, rule.GetOldKeyPrefix()) { + return append(append([]byte{}, rule.GetNewKeyPrefix()...), s[len(rule.GetOldKeyPrefix()):]...), rule + } + } + + return s, nil +} diff --git a/pkg/restore/split_client.go b/pkg/restore/split_client.go new file mode 100644 index 000000000..8a618a191 --- /dev/null +++ b/pkg/restore/split_client.go @@ -0,0 +1,353 @@ +package restore + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "path" + "strconv" + "strings" + "sync" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/kvproto/pkg/tikvpb" + pd "github.com/pingcap/pd/client" + "github.com/pingcap/pd/server/schedule/placement" + "google.golang.org/grpc" +) + +// SplitClient is an external client used by RegionSplitter. +type SplitClient interface { + // GetStore gets a store by a store id. + GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) + // GetRegion gets a region which includes a specified key. + GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) + // GetRegionByID gets a region by a region id. + GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) + // SplitRegion splits a region from a key, if key is not included in the region, it will return nil. + // note: the key should not be encoded + SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) + // BatchSplitRegions splits a region from a batch of keys. + // note: the keys should not be encoded + BatchSplitRegions(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) ([]*RegionInfo, error) + // ScatterRegion scatters a specified region. + ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error + // GetOperator gets the status of operator of the specified region. + GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) + // ScanRegion gets a list of regions, starts from the region that contains key. + // Limit limits the maximum number of regions returned. + ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) + // GetPlacementRule loads a placement rule from PD. + GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) + // SetPlacementRule insert or update a placement rule to PD. + SetPlacementRule(ctx context.Context, rule placement.Rule) error + // DeletePlacementRule removes a placement rule from PD. + DeletePlacementRule(ctx context.Context, groupID, ruleID string) error + // SetStoreLabel add or update specified label of stores. If labelValue + // is empty, it clears the label. + SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error +} + +// pdClient is a wrapper of pd client, can be used by RegionSplitter. +type pdClient struct { + mu sync.Mutex + client pd.Client + storeCache map[uint64]*metapb.Store +} + +// NewSplitClient returns a client used by RegionSplitter. +func NewSplitClient(client pd.Client) SplitClient { + return &pdClient{ + client: client, + storeCache: make(map[uint64]*metapb.Store), + } +} + +func (c *pdClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + c.mu.Lock() + defer c.mu.Unlock() + store, ok := c.storeCache[storeID] + if ok { + return store, nil + } + store, err := c.client.GetStore(ctx, storeID) + if err != nil { + return nil, err + } + c.storeCache[storeID] = store + return store, nil + +} + +func (c *pdClient) GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) { + region, leader, err := c.client.GetRegion(ctx, key) + if err != nil { + return nil, err + } + if region == nil { + return nil, nil + } + return &RegionInfo{ + Region: region, + Leader: leader, + }, nil +} + +func (c *pdClient) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) { + region, leader, err := c.client.GetRegionByID(ctx, regionID) + if err != nil { + return nil, err + } + if region == nil { + return nil, nil + } + return &RegionInfo{ + Region: region, + Leader: leader, + }, nil +} + +func (c *pdClient) SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) { + var peer *metapb.Peer + if regionInfo.Leader != nil { + peer = regionInfo.Leader + } else { + if len(regionInfo.Region.Peers) == 0 { + return nil, errors.New("region does not have peer") + } + peer = regionInfo.Region.Peers[0] + } + storeID := peer.GetStoreId() + store, err := c.GetStore(ctx, storeID) + if err != nil { + return nil, err + } + conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + if err != nil { + return nil, err + } + defer conn.Close() + + client := tikvpb.NewTikvClient(conn) + resp, err := client.SplitRegion(ctx, &kvrpcpb.SplitRegionRequest{ + Context: &kvrpcpb.Context{ + RegionId: regionInfo.Region.Id, + RegionEpoch: regionInfo.Region.RegionEpoch, + Peer: peer, + }, + SplitKey: key, + }) + if err != nil { + return nil, err + } + if resp.RegionError != nil { + return nil, errors.Errorf("split region failed: region=%v, key=%x, err=%v", regionInfo.Region, key, resp.RegionError) + } + + // BUG: Left is deprecated, it may be nil even if split is succeed! + // Assume the new region is the left one. + newRegion := resp.GetLeft() + if newRegion == nil { + regions := resp.GetRegions() + for _, r := range regions { + if bytes.Equal(r.GetStartKey(), regionInfo.Region.GetStartKey()) { + newRegion = r + break + } + } + } + if newRegion == nil { + return nil, errors.New("split region failed: new region is nil") + } + var leader *metapb.Peer + // Assume the leaders will be at the same store. + if regionInfo.Leader != nil { + for _, p := range newRegion.GetPeers() { + if p.GetStoreId() == regionInfo.Leader.GetStoreId() { + leader = p + break + } + } + } + return &RegionInfo{ + Region: newRegion, + Leader: leader, + }, nil +} + +func (c *pdClient) BatchSplitRegions( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) ([]*RegionInfo, error) { + var peer *metapb.Peer + if regionInfo.Leader != nil { + peer = regionInfo.Leader + } else { + if len(regionInfo.Region.Peers) == 0 { + return nil, errors.New("region does not have peer") + } + peer = regionInfo.Region.Peers[0] + } + + storeID := peer.GetStoreId() + store, err := c.GetStore(ctx, storeID) + if err != nil { + return nil, err + } + conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + if err != nil { + return nil, err + } + defer conn.Close() + client := tikvpb.NewTikvClient(conn) + resp, err := client.SplitRegion(ctx, &kvrpcpb.SplitRegionRequest{ + Context: &kvrpcpb.Context{ + RegionId: regionInfo.Region.Id, + RegionEpoch: regionInfo.Region.RegionEpoch, + Peer: peer, + }, + SplitKeys: keys, + }) + if err != nil { + return nil, err + } + if resp.RegionError != nil { + return nil, errors.Errorf("split region failed: region=%v, err=%v", regionInfo.Region, resp.RegionError) + } + + regions := resp.GetRegions() + newRegionInfos := make([]*RegionInfo, 0, len(regions)) + for _, region := range regions { + // Skip the original region + if region.GetId() == regionInfo.Region.GetId() { + continue + } + var leader *metapb.Peer + // Assume the leaders will be at the same store. + if regionInfo.Leader != nil { + for _, p := range region.GetPeers() { + if p.GetStoreId() == regionInfo.Leader.GetStoreId() { + leader = p + break + } + } + } + newRegionInfos = append(newRegionInfos, &RegionInfo{ + Region: region, + Leader: leader, + }) + } + return newRegionInfos, nil +} + +func (c *pdClient) ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error { + return c.client.ScatterRegion(ctx, regionInfo.Region.GetId()) +} + +func (c *pdClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) { + return c.client.GetOperator(ctx, regionID) +} + +func (c *pdClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { + regions, leaders, err := c.client.ScanRegions(ctx, key, endKey, limit) + if err != nil { + return nil, err + } + regionInfos := make([]*RegionInfo, 0, len(regions)) + for i := range regions { + regionInfos = append(regionInfos, &RegionInfo{ + Region: regions[i], + Leader: leaders[i], + }) + } + return regionInfos, nil +} + +func (c *pdClient) GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) { + var rule placement.Rule + addr := c.getPDAPIAddr() + if addr == "" { + return rule, errors.New("failed to add stores labels: no leader") + } + req, _ := http.NewRequestWithContext(ctx, "GET", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) + res, err := http.DefaultClient.Do(req) + if err != nil { + return rule, errors.WithStack(err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return rule, errors.WithStack(err) + } + res.Body.Close() + err = json.Unmarshal(b, &rule) + if err != nil { + return rule, errors.WithStack(err) + } + return rule, nil +} + +func (c *pdClient) SetPlacementRule(ctx context.Context, rule placement.Rule) error { + addr := c.getPDAPIAddr() + if addr == "" { + return errors.New("failed to add stores labels: no leader") + } + m, _ := json.Marshal(rule) + req, _ := http.NewRequestWithContext(ctx, "POST", addr+path.Join("/pd/api/v1/config/rule"), bytes.NewReader(m)) + res, err := http.DefaultClient.Do(req) + if err != nil { + return errors.WithStack(err) + } + return errors.Trace(res.Body.Close()) +} + +func (c *pdClient) DeletePlacementRule(ctx context.Context, groupID, ruleID string) error { + addr := c.getPDAPIAddr() + if addr == "" { + return errors.New("failed to add stores labels: no leader") + } + req, _ := http.NewRequestWithContext(ctx, "DELETE", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) + res, err := http.DefaultClient.Do(req) + if err != nil { + return errors.WithStack(err) + } + return errors.Trace(res.Body.Close()) +} + +func (c *pdClient) SetStoresLabel( + ctx context.Context, stores []uint64, labelKey, labelValue string, +) error { + b := []byte(fmt.Sprintf(`{"%s": "%s"}`, labelKey, labelValue)) + addr := c.getPDAPIAddr() + if addr == "" { + return errors.New("failed to add stores labels: no leader") + } + for _, id := range stores { + req, _ := http.NewRequestWithContext( + ctx, "POST", + addr+path.Join("/pd/api/v1/store", strconv.FormatUint(id, 10), "label"), + bytes.NewReader(b), + ) + res, err := http.DefaultClient.Do(req) + if err != nil { + return errors.WithStack(err) + } + err = res.Body.Close() + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +func (c *pdClient) getPDAPIAddr() string { + addr := c.client.GetLeaderAddr() + if addr != "" && !strings.HasPrefix(addr, "http") { + addr = "http://" + addr + } + return strings.TrimRight(addr, "/") +} diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go new file mode 100644 index 000000000..509c4cfa0 --- /dev/null +++ b/pkg/restore/split_test.go @@ -0,0 +1,301 @@ +package restore + +import ( + "bytes" + "context" + "sync" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/pd/server/schedule/placement" + "github.com/pingcap/tidb/util/codec" +) + +type testClient struct { + mu sync.RWMutex + stores map[uint64]*metapb.Store + regions map[uint64]*RegionInfo + nextRegionID uint64 +} + +func newTestClient(stores map[uint64]*metapb.Store, regions map[uint64]*RegionInfo, nextRegionID uint64) *testClient { + return &testClient{ + stores: stores, + regions: regions, + nextRegionID: nextRegionID, + } +} + +func (c *testClient) GetAllRegions() map[uint64]*RegionInfo { + c.mu.RLock() + defer c.mu.RUnlock() + return c.regions +} + +func (c *testClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + c.mu.RLock() + defer c.mu.RUnlock() + store, ok := c.stores[storeID] + if !ok { + return nil, errors.Errorf("store not found") + } + return store, nil +} + +func (c *testClient) GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) { + c.mu.RLock() + defer c.mu.RUnlock() + for _, region := range c.regions { + if bytes.Compare(key, region.Region.StartKey) >= 0 && + (len(region.Region.EndKey) == 0 || bytes.Compare(key, region.Region.EndKey) < 0) { + return region, nil + } + } + return nil, errors.Errorf("region not found: key=%s", string(key)) +} + +func (c *testClient) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) { + c.mu.RLock() + defer c.mu.RUnlock() + region, ok := c.regions[regionID] + if !ok { + return nil, errors.Errorf("region not found: id=%d", regionID) + } + return region, nil +} + +func (c *testClient) SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + var target *RegionInfo + splitKey := codec.EncodeBytes([]byte{}, key) + for _, region := range c.regions { + if bytes.Compare(splitKey, region.Region.StartKey) >= 0 && + (len(region.Region.EndKey) == 0 || bytes.Compare(splitKey, region.Region.EndKey) < 0) { + target = region + } + } + if target == nil { + return nil, errors.Errorf("region not found: key=%s", string(key)) + } + newRegion := &RegionInfo{ + Region: &metapb.Region{ + Peers: target.Region.Peers, + Id: c.nextRegionID, + StartKey: target.Region.StartKey, + EndKey: splitKey, + }, + } + c.regions[c.nextRegionID] = newRegion + c.nextRegionID++ + target.Region.StartKey = splitKey + c.regions[target.Region.Id] = target + return newRegion, nil +} + +func (c *testClient) BatchSplitRegions( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) ([]*RegionInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + newRegions := make([]*RegionInfo, 0) + for _, key := range keys { + var target *RegionInfo + splitKey := codec.EncodeBytes([]byte{}, key) + for _, region := range c.regions { + if bytes.Compare(splitKey, region.Region.GetStartKey()) > 0 && + beforeEnd(splitKey, region.Region.GetEndKey()) { + target = region + } + } + if target == nil { + continue + } + newRegion := &RegionInfo{ + Region: &metapb.Region{ + Peers: target.Region.Peers, + Id: c.nextRegionID, + StartKey: target.Region.StartKey, + EndKey: splitKey, + }, + } + c.regions[c.nextRegionID] = newRegion + c.nextRegionID++ + target.Region.StartKey = splitKey + c.regions[target.Region.Id] = target + newRegions = append(newRegions, newRegion) + } + return newRegions, nil +} + +func (c *testClient) ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error { + return nil +} + +func (c *testClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) { + return &pdpb.GetOperatorResponse{ + Header: new(pdpb.ResponseHeader), + }, nil +} + +func (c *testClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { + regions := make([]*RegionInfo, 0) + for _, region := range c.regions { + if limit > 0 && len(regions) >= limit { + break + } + if (len(region.Region.GetEndKey()) != 0 && bytes.Compare(region.Region.GetEndKey(), key) <= 0) || + bytes.Compare(region.Region.GetStartKey(), endKey) > 0 { + continue + } + regions = append(regions, region) + } + return regions, nil +} + +func (c *testClient) GetPlacementRule(ctx context.Context, groupID, ruleID string) (r placement.Rule, err error) { + return +} + +func (c *testClient) SetPlacementRule(ctx context.Context, rule placement.Rule) error { + return nil +} + +func (c *testClient) DeletePlacementRule(ctx context.Context, groupID, ruleID string) error { + return nil +} + +func (c *testClient) SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error { + return nil +} + +// region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) +// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) +// rewrite rules: aa -> xx, cc -> bb +// expected regions after split: +// [, aay), [aay, bb), [bb, bba), [bba, bbf), [bbf, bbh), [bbh, bbj), +// [bbj, cca), [cca, xx), [xx, xxe), [xxe, xxz), [xxz, ) +func (s *testRestoreUtilSuite) TestSplit(c *C) { + client := initTestClient() + ranges := initRanges() + rewriteRules := initRewriteRules() + regionSplitter := NewRegionSplitter(client) + + ctx := context.Background() + err := regionSplitter.Split(ctx, ranges, rewriteRules, func(key [][]byte) {}) + if err != nil { + c.Assert(err, IsNil, Commentf("split regions failed: %v", err)) + } + regions := client.GetAllRegions() + if !validateRegions(regions) { + for _, region := range regions { + c.Logf("region: %v\n", region.Region) + } + c.Log("get wrong result") + c.Fail() + } +} + +// region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) +func initTestClient() *testClient { + peers := make([]*metapb.Peer, 1) + peers[0] = &metapb.Peer{ + Id: 1, + StoreId: 1, + } + keys := [6]string{"", "aay", "bba", "bbh", "cca", ""} + regions := make(map[uint64]*RegionInfo) + for i := uint64(1); i < 6; i++ { + startKey := []byte(keys[i-1]) + if len(startKey) != 0 { + startKey = codec.EncodeBytes([]byte{}, startKey) + } + endKey := []byte(keys[i]) + if len(endKey) != 0 { + endKey = codec.EncodeBytes([]byte{}, endKey) + } + regions[i] = &RegionInfo{ + Region: &metapb.Region{ + Id: i, + Peers: peers, + StartKey: startKey, + EndKey: endKey, + }, + } + } + stores := make(map[uint64]*metapb.Store) + stores[1] = &metapb.Store{ + Id: 1, + } + return newTestClient(stores, regions, 6) +} + +// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) +func initRanges() []Range { + var ranges [4]Range + ranges[0] = Range{ + StartKey: []byte("aaa"), + EndKey: []byte("aae"), + } + ranges[1] = Range{ + StartKey: []byte("aae"), + EndKey: []byte("aaz"), + } + ranges[2] = Range{ + StartKey: []byte("ccd"), + EndKey: []byte("ccf"), + } + ranges[3] = Range{ + StartKey: []byte("ccf"), + EndKey: []byte("ccj"), + } + return ranges[:] +} + +func initRewriteRules() *RewriteRules { + var rules [2]*import_sstpb.RewriteRule + rules[0] = &import_sstpb.RewriteRule{ + OldKeyPrefix: []byte("aa"), + NewKeyPrefix: []byte("xx"), + } + rules[1] = &import_sstpb.RewriteRule{ + OldKeyPrefix: []byte("cc"), + NewKeyPrefix: []byte("bb"), + } + return &RewriteRules{ + Table: rules[:], + Data: rules[:], + } +} + +// expected regions after split: +// [, aay), [aay, bb), [bb, bba), [bba, bbf), [bbf, bbh), [bbh, bbj), +// [bbj, cca), [cca, xx), [xx, xxe), [xxe, xxz), [xxz, ) +func validateRegions(regions map[uint64]*RegionInfo) bool { + keys := [12]string{"", "aay", "bb", "bba", "bbf", "bbh", "bbj", "cca", "xx", "xxe", "xxz", ""} + if len(regions) != 11 { + return false + } +FindRegion: + for i := 1; i < 12; i++ { + for _, region := range regions { + startKey := []byte(keys[i-1]) + if len(startKey) != 0 { + startKey = codec.EncodeBytes([]byte{}, startKey) + } + endKey := []byte(keys[i]) + if len(endKey) != 0 { + endKey = codec.EncodeBytes([]byte{}, endKey) + } + if bytes.Equal(region.Region.GetStartKey(), startKey) && + bytes.Equal(region.Region.GetEndKey(), endKey) { + continue FindRegion + } + } + return false + } + return true +} diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 126e864fd..a2e9e3e38 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -13,7 +13,6 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" @@ -76,7 +75,7 @@ func GetRewriteRules( newTable *model.TableInfo, oldTable *model.TableInfo, newTimeStamp uint64, -) *restore_util.RewriteRules { +) *RewriteRules { tableIDs := make(map[int64]int64) tableIDs[oldTable.ID] = newTable.ID if oldTable.Partition != nil { @@ -119,7 +118,7 @@ func GetRewriteRules( } } - return &restore_util.RewriteRules{ + return &RewriteRules{ Table: tableRules, Data: dataRules, } @@ -196,9 +195,9 @@ func withRetry( // ValidateFileRanges checks and returns the ranges of the files. func ValidateFileRanges( files []*backup.File, - rewriteRules *restore_util.RewriteRules, -) ([]restore_util.Range, error) { - ranges := make([]restore_util.Range, 0, len(files)) + rewriteRules *RewriteRules, +) ([]Range, error) { + ranges := make([]Range, 0, len(files)) fileAppended := make(map[string]bool) for _, file := range files { @@ -217,7 +216,7 @@ func ValidateFileRanges( zap.Stringer("file", file)) return nil, errors.New("table ids dont match") } - ranges = append(ranges, restore_util.Range{ + ranges = append(ranges, Range{ StartKey: file.GetStartKey(), EndKey: file.GetEndKey(), }) @@ -228,7 +227,7 @@ func ValidateFileRanges( } // ValidateFileRewriteRule uses rewrite rules to validate the ranges of a file -func ValidateFileRewriteRule(file *backup.File, rewriteRules *restore_util.RewriteRules) error { +func ValidateFileRewriteRule(file *backup.File, rewriteRules *RewriteRules) error { // Check if the start key has a matched rewrite key _, startRule := rewriteRawKey(file.GetStartKey(), rewriteRules) if rewriteRules != nil && startRule == nil { @@ -269,7 +268,7 @@ func ValidateFileRewriteRule(file *backup.File, rewriteRules *restore_util.Rewri } // Rewrites a raw key and returns a encoded key -func rewriteRawKey(key []byte, rewriteRules *restore_util.RewriteRules) ([]byte, *import_sstpb.RewriteRule) { +func rewriteRawKey(key []byte, rewriteRules *RewriteRules) ([]byte, *import_sstpb.RewriteRule) { if rewriteRules == nil { return codec.EncodeBytes([]byte{}, key), nil } @@ -281,7 +280,7 @@ func rewriteRawKey(key []byte, rewriteRules *restore_util.RewriteRules) ([]byte, return nil, nil } -func matchOldPrefix(key []byte, rewriteRules *restore_util.RewriteRules) *import_sstpb.RewriteRule { +func matchOldPrefix(key []byte, rewriteRules *RewriteRules) *import_sstpb.RewriteRule { for _, rule := range rewriteRules.Data { if bytes.HasPrefix(key, rule.GetOldKeyPrefix()) { return rule @@ -295,7 +294,7 @@ func matchOldPrefix(key []byte, rewriteRules *restore_util.RewriteRules) *import return nil } -func matchNewPrefix(key []byte, rewriteRules *restore_util.RewriteRules) *import_sstpb.RewriteRule { +func matchNewPrefix(key []byte, rewriteRules *RewriteRules) *import_sstpb.RewriteRule { for _, rule := range rewriteRules.Data { if bytes.HasPrefix(key, rule.GetNewKeyPrefix()) { return rule @@ -319,8 +318,8 @@ func truncateTS(key []byte) []byte { func SplitRanges( ctx context.Context, client *Client, - ranges []restore_util.Range, - rewriteRules *restore_util.RewriteRules, + ranges []Range, + rewriteRules *RewriteRules, updateCh chan<- struct{}, ) error { start := time.Now() @@ -328,7 +327,7 @@ func SplitRanges( elapsed := time.Since(start) summary.CollectDuration("split region", elapsed) }() - splitter := restore_util.NewRegionSplitter(restore_util.NewClient(client.GetPDClient())) + splitter := NewRegionSplitter(NewSplitClient(client.GetPDClient())) return splitter.Split(ctx, ranges, rewriteRules, func(keys [][]byte) { for range keys { updateCh <- struct{}{} @@ -336,7 +335,7 @@ func SplitRanges( }) } -func rewriteFileKeys(file *backup.File, rewriteRules *restore_util.RewriteRules) (startKey, endKey []byte, err error) { +func rewriteFileKeys(file *backup.File, rewriteRules *RewriteRules) (startKey, endKey []byte, err error) { startID := tablecodec.DecodeTableID(file.GetStartKey()) endID := tablecodec.DecodeTableID(file.GetEndKey()) var rule *import_sstpb.RewriteRule diff --git a/pkg/restore/util_test.go b/pkg/restore/util_test.go index 5da5c9ab7..bc4da9168 100644 --- a/pkg/restore/util_test.go +++ b/pkg/restore/util_test.go @@ -5,7 +5,6 @@ import ( "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" "github.com/pingcap/tidb/tablecodec" ) @@ -34,7 +33,7 @@ func (s *testRestoreUtilSuite) TestGetSSTMetaFromFile(c *C) { } func (s *testRestoreUtilSuite) TestValidateFileRanges(c *C) { - rules := &restore_util.RewriteRules{ + rules := &RewriteRules{ Table: []*import_sstpb.RewriteRule{&import_sstpb.RewriteRule{ OldKeyPrefix: []byte(tablecodec.EncodeTablePrefix(1)), NewKeyPrefix: []byte(tablecodec.EncodeTablePrefix(2)), From 3863a3a477ee9875944ba851285a3fd42b34a5e5 Mon Sep 17 00:00:00 2001 From: Kolbe Kegel Date: Fri, 31 Jan 2020 23:40:50 -0800 Subject: [PATCH 02/46] Fixed handling for a dbName that do not exist in the backup being restored (#148) * Fixed handling for a dbName that do not exist in the backup being restored * Fixed handling for a dbName that do not exist in the backup being restored --- cmd/restore.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/restore.go b/cmd/restore.go index eee65ba86..4f66e47de 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -111,6 +111,9 @@ func runRestore(flagSet *flag.FlagSet, cmdName, dbName, tableName string) error case len(dbName) != 0 && len(tableName) == 0: // database restore db := client.GetDatabase(dbName) + if db == nil { + return errors.Errorf("database %s not found in backup", dbName) + } err = client.CreateDatabase(db.Schema) if err != nil { return errors.Trace(err) @@ -122,6 +125,9 @@ func runRestore(flagSet *flag.FlagSet, cmdName, dbName, tableName string) error case len(dbName) != 0 && len(tableName) != 0: // table restore db := client.GetDatabase(dbName) + if db == nil { + return errors.Errorf("database %s not found in backup", dbName) + } err = client.CreateDatabase(db.Schema) if err != nil { return errors.Trace(err) From 6b65080b967947c6d816a8a1320ee594b921fb54 Mon Sep 17 00:00:00 2001 From: 3pointer Date: Wed, 5 Feb 2020 10:09:01 +0800 Subject: [PATCH 03/46] validate: fix debug meta test ci (#153) * validate: fix debug meta test ci --- tests/br_debug_meta/run.sh | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/tests/br_debug_meta/run.sh b/tests/br_debug_meta/run.sh index 1dcfccefe..8dc3ef5a3 100644 --- a/tests/br_debug_meta/run.sh +++ b/tests/br_debug_meta/run.sh @@ -15,28 +15,33 @@ set -eu DB="$TEST_NAME" +TABLE="usertable1" run_sql "CREATE DATABASE $DB;" -run_sql "CREATE TABLE $DB.usertable1 ( \ +run_sql "CREATE TABLE $DB.$TABLE( \ YCSB_KEY varchar(64) NOT NULL, \ FIELD0 varchar(1) DEFAULT NULL, \ PRIMARY KEY (YCSB_KEY) \ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" -run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" -run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" +run_sql "INSERT INTO $DB.$TABLE VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.$TABLE VALUES (\"aa\", \"b\");" + +row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') # backup table echo "backup start..." run_br --pd $PD_ADDR backup table --db $DB --table usertable1 -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 +run_sql "DROP DATABASE $DB;" + # Test validate decode run_br validate decode -s "local://$TEST_DIR/$DB" # should generate backupmeta.json if [ ! -f "$TEST_DIR/$DB/backupmeta.json" ]; then - echo "TEST: [$TEST_NAME] failed!" + echo "TEST: [$TEST_NAME] decode failed!" exit 1 fi @@ -45,14 +50,21 @@ run_br validate encode -s "local://$TEST_DIR/$DB" # should generate backupmeta_from_json if [ ! -f "$TEST_DIR/$DB/backupmeta_from_json" ]; then - echo "TEST: [$TEST_NAME] failed!" + echo "TEST: [$TEST_NAME] encode failed!" exit 1 fi -DIFF=$(diff $TEST_DIR/$DB/backupmeta_from_json $TEST_DIR/$DB/backupmeta) -if [ "$DIFF" != "" ] -then - echo "TEST: [$TEST_NAME] failed!" +# replace backupmeta +mv "$TEST_DIR/$DB/backupmeta_from_json" "$TEST_DIR/$DB/backupmeta" + +# restore table +echo "restore start..." +run_br --pd $PD_ADDR restore table --db $DB --table usertable1 -s "local://$TEST_DIR/$DB" + +row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') + +if [ "${row_count_ori}" != "${row_count_new}" ];then + echo "TEST: [$TEST_NAME] failed!, row count not equal after restore" exit 1 fi From 0dfe82d7cacc8553f8744b6b54852b9466852542 Mon Sep 17 00:00:00 2001 From: kennytm Date: Mon, 10 Feb 2020 14:36:31 +0800 Subject: [PATCH 04/46] *: extracts runBackup/runRestore in cmd into pkg/task (#156) * *: extracts runBackup/runRestore in cmd into pkg/task Defines a "Config" structure to store the parsed flags. Use the "black-white-list" structure to define what tables/databases to backup/restore. * go.mod: update tidb to v4.0.0-beta --- cmd/backup.go | 203 ++---------------------- cmd/cmd.go | 62 +------- cmd/restore.go | 322 ++------------------------------------ cmd/validate.go | 75 +++------ go.mod | 19 +-- go.sum | 96 ++++++++---- pkg/backup/client.go | 87 +++------- pkg/backup/client_test.go | 20 +-- pkg/backup/schema_test.go | 34 ++-- pkg/restore/db_test.go | 2 +- pkg/restore/util.go | 7 +- pkg/storage/flags.go | 46 +----- pkg/storage/gcs.go | 28 ++-- pkg/storage/gcs_test.go | 17 +- pkg/storage/s3.go | 29 ++-- pkg/storage/s3_test.go | 4 +- pkg/storage/storage.go | 6 +- pkg/task/backup.go | 157 +++++++++++++++++++ pkg/task/common.go | 236 ++++++++++++++++++++++++++++ pkg/task/restore.go | 254 ++++++++++++++++++++++++++++++ 20 files changed, 871 insertions(+), 833 deletions(-) create mode 100644 pkg/task/backup.go create mode 100644 pkg/task/common.go create mode 100644 pkg/task/restore.go diff --git a/cmd/backup.go b/cmd/backup.go index 73ae6106f..39aa4fd28 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -1,176 +1,21 @@ package cmd import ( - "context" - - "github.com/pingcap/errors" - "github.com/pingcap/log" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/session" "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/pingcap/br/pkg/backup" - "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) -const ( - flagBackupTimeago = "timeago" - flagBackupRateLimit = "ratelimit" - flagBackupRateLimitUnit = "ratelimit-unit" - flagBackupConcurrency = "concurrency" - flagBackupChecksum = "checksum" - flagLastBackupTS = "lastbackupts" -) - -func defineBackupFlags(flagSet *pflag.FlagSet) { - flagSet.StringP( - flagBackupTimeago, "", "", - "The history version of the backup task, e.g. 1m, 1h. Do not exceed GCSafePoint") - flagSet.Uint64P( - flagBackupRateLimit, "", 0, "The rate limit of the backup task, MB/s per node") - flagSet.Uint32P( - flagBackupConcurrency, "", 4, "The size of thread pool on each node that execute the backup task") - flagSet.BoolP(flagBackupChecksum, "", true, - "Run checksum after backup") - flagSet.Uint64P(flagLastBackupTS, "", 0, "the last time backup ts") - _ = flagSet.MarkHidden(flagLastBackupTS) - - // Test only flag. - flagSet.Uint64P( - flagBackupRateLimitUnit, "", utils.MB, "The unit of rate limit of the backup task") - _ = flagSet.MarkHidden(flagBackupRateLimitUnit) -} - -func runBackup(flagSet *pflag.FlagSet, cmdName, db, table string) error { - ctx, cancel := context.WithCancel(defaultContext) - defer cancel() - - mgr, err := GetDefaultMgr() - if err != nil { - return err - } - defer mgr.Close() - - timeago, err := flagSet.GetString(flagBackupTimeago) - if err != nil { - return err - } - - ratelimit, err := flagSet.GetUint64(flagBackupRateLimit) - if err != nil { - return err - } - ratelimitUnit, err := flagSet.GetUint64(flagBackupRateLimitUnit) - if err != nil { - return err - } - ratelimit *= ratelimitUnit - - concurrency, err := flagSet.GetUint32(flagBackupConcurrency) - if err != nil { - return err - } - if concurrency == 0 { - err = errors.New("at least one thread required") +func runBackupCommand(command *cobra.Command, cmdName string) error { + cfg := task.BackupConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { return err } - - checksum, err := flagSet.GetBool(flagBackupChecksum) - if err != nil { - return err - } - - lastBackupTS, err := flagSet.GetUint64(flagLastBackupTS) - if err != nil { - return nil - } - - u, err := storage.ParseBackendFromFlags(flagSet, FlagStorage) - if err != nil { - return err - } - - client, err := backup.NewBackupClient(ctx, mgr) - if err != nil { - return nil - } - - err = client.SetStorage(ctx, u) - if err != nil { - return err - } - - backupTS, err := client.GetTS(ctx, timeago) - if err != nil { - return err - } - - defer summary.Summary(cmdName) - - ranges, backupSchemas, err := backup.BuildBackupRangeAndSchema( - mgr.GetDomain(), mgr.GetTiKV(), backupTS, db, table) - if err != nil { - return err - } - - // The number of regions need to backup - approximateRegions := 0 - for _, r := range ranges { - var regionCount int - regionCount, err = mgr.GetRegionCount(ctx, r.StartKey, r.EndKey) - if err != nil { - return err - } - approximateRegions += regionCount - } - - summary.CollectInt("backup total regions", approximateRegions) - // Backup - // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( - ctx, cmdName, int64(approximateRegions), !HasLogFile()) - err = client.BackupRanges( - ctx, ranges, lastBackupTS, backupTS, ratelimit, concurrency, updateCh) - if err != nil { - return err - } - // Backup has finished - close(updateCh) - - // Checksum - backupSchemasConcurrency := backup.DefaultSchemaConcurrency - if backupSchemas.Len() < backupSchemasConcurrency { - backupSchemasConcurrency = backupSchemas.Len() - } - updateCh = utils.StartProgress( - ctx, "Checksum", int64(backupSchemas.Len()), !HasLogFile()) - backupSchemas.SetSkipChecksum(!checksum) - backupSchemas.Start( - ctx, mgr.GetTiKV(), backupTS, uint(backupSchemasConcurrency), updateCh) - - err = client.CompleteMeta(backupSchemas) - if err != nil { - return err - } - - valid, err := client.FastChecksum() - if err != nil { - return err - } - if !valid { - log.Error("backup FastChecksum failed!") - } - // Checksum has finished - close(updateCh) - - err = client.SaveBackupMeta(ctx) - if err != nil { - return err - } - return nil + return task.RunBackup(GetDefaultContext(), cmdName, &cfg) } // NewBackupCommand return a full backup subcommand. @@ -200,7 +45,7 @@ func NewBackupCommand() *cobra.Command { newTableBackupCommand(), ) - defineBackupFlags(command.PersistentFlags()) + task.DefineBackupFlags(command.PersistentFlags()) return command } @@ -211,7 +56,7 @@ func newFullBackupCommand() *cobra.Command { Short: "backup all database", RunE: func(command *cobra.Command, _ []string) error { // empty db/table means full backup. - return runBackup(command.Flags(), "Full backup", "", "") + return runBackupCommand(command, "Full backup") }, } return command @@ -223,19 +68,10 @@ func newDbBackupCommand() *cobra.Command { Use: "db", Short: "backup a database", RunE: func(command *cobra.Command, _ []string) error { - db, err := command.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.Errorf("empty database name is not allowed") - } - return runBackup(command.Flags(), "Database backup", db, "") + return runBackupCommand(command, "Database backup") }, } - command.Flags().StringP(flagDatabase, "", "", "backup a table in the specific db") - _ = command.MarkFlagRequired(flagDatabase) - + task.DefineDatabaseFlags(command) return command } @@ -245,26 +81,9 @@ func newTableBackupCommand() *cobra.Command { Use: "table", Short: "backup a table", RunE: func(command *cobra.Command, _ []string) error { - db, err := command.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.Errorf("empty database name is not allowed") - } - table, err := command.Flags().GetString(flagTable) - if err != nil { - return err - } - if len(table) == 0 { - return errors.Errorf("empty table name is not allowed") - } - return runBackup(command.Flags(), "Table backup", db, table) + return runBackupCommand(command, "Table backup") }, } - command.Flags().StringP(flagDatabase, "", "", "backup a table in the specific db") - command.Flags().StringP(flagTable, "t", "", "backup the specific table") - _ = command.MarkFlagRequired(flagDatabase) - _ = command.MarkFlagRequired(flagTable) + task.DefineTableFlags(command) return command } diff --git a/cmd/cmd.go b/cmd/cmd.go index 468c35232..fdadaa6f8 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -2,47 +2,28 @@ package cmd import ( "context" - "fmt" "net/http" "net/http/pprof" "sync" "sync/atomic" - "github.com/pingcap/errors" "github.com/pingcap/log" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/logutil" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "go.uber.org/zap" - "github.com/pingcap/br/pkg/conn" - "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) var ( initOnce = sync.Once{} defaultContext context.Context - pdAddress string hasLogFile uint64 - - connOnce = sync.Once{} - defaultMgr *conn.Mgr ) const ( - // FlagPD is the name of url flag. - FlagPD = "pd" - // FlagCA is the name of CA flag. - FlagCA = "ca" - // FlagCert is the name of cert flag. - FlagCert = "cert" - // FlagKey is the name of key flag. - FlagKey = "key" - // FlagStorage is the name of storage flag. - FlagStorage = "storage" // FlagLogLevel is the name of log-level flag. FlagLogLevel = "log-level" // FlagLogFile is the name of log-file flag. @@ -52,9 +33,6 @@ const ( // FlagSlowLogFile is the name of slow-log-file flag. FlagSlowLogFile = "slow-log-file" - flagDatabase = "db" - flagTable = "table" - flagVersion = "version" flagVersionShort = "V" ) @@ -65,19 +43,13 @@ func AddFlags(cmd *cobra.Command) { cmd.Flags().BoolP(flagVersion, flagVersionShort, false, "Display version information about BR") cmd.SetVersionTemplate("{{printf \"%s\" .Version}}\n") - cmd.PersistentFlags().StringP(FlagPD, "u", "127.0.0.1:2379", "PD address") - cmd.PersistentFlags().String(FlagCA, "", "CA certificate path for TLS connection") - cmd.PersistentFlags().String(FlagCert, "", "Certificate path for TLS connection") - cmd.PersistentFlags().String(FlagKey, "", "Private key path for TLS connection") - cmd.PersistentFlags().StringP(FlagStorage, "s", "", - `specify the url where backup storage, eg, "local:///path/to/save"`) cmd.PersistentFlags().StringP(FlagLogLevel, "L", "info", "Set the log level") cmd.PersistentFlags().String(FlagLogFile, "", "Set the log file path. If not set, logs will output to stdout") cmd.PersistentFlags().String(FlagStatusAddr, "", "Set the HTTP listening address for the status report service. Set to empty string to disable") - storage.DefineFlags(cmd.PersistentFlags()) + task.DefineCommonFlags(cmd.PersistentFlags()) cmd.PersistentFlags().StringP(FlagSlowLogFile, "", "", "Set the slow log file path. If not set, discard slow logs") @@ -140,12 +112,6 @@ func Init(cmd *cobra.Command) (err error) { } } }() - // Set the PD server address. - pdAddress, e = cmd.Flags().GetString(FlagPD) - if e != nil { - err = e - return - } }) return err } @@ -155,30 +121,6 @@ func HasLogFile() bool { return atomic.LoadUint64(&hasLogFile) != uint64(0) } -// GetDefaultMgr returns the default mgr for command line usage. -func GetDefaultMgr() (*conn.Mgr, error) { - if pdAddress == "" { - return nil, errors.New("pd address can not be empty") - } - - // Lazy initialize and defaultMgr - var err error - connOnce.Do(func() { - var storage kv.Storage - storage, err = tikv.Driver{}.Open( - // Disable GC because TiDB enables GC already. - fmt.Sprintf("tikv://%s?disableGC=true", pdAddress)) - if err != nil { - return - } - defaultMgr, err = conn.NewMgr(defaultContext, pdAddress, storage.(tikv.Storage)) - }) - if err != nil { - return nil, err - } - return defaultMgr, nil -} - // SetDefaultContext sets the default context for command line usage. func SetDefaultContext(ctx context.Context) { defaultContext = ctx diff --git a/cmd/restore.go b/cmd/restore.go index 4f66e47de..2dfec9846 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -1,33 +1,20 @@ package cmd import ( - "context" - "strings" - - "github.com/gogo/protobuf/proto" - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/backup" - "github.com/pingcap/log" "github.com/pingcap/tidb/session" "github.com/spf13/cobra" - flag "github.com/spf13/pflag" - "go.uber.org/zap" - "github.com/pingcap/br/pkg/conn" - "github.com/pingcap/br/pkg/restore" - "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) -var schedulers = map[string]struct{}{ - "balance-leader-scheduler": {}, - "balance-hot-region-scheduler": {}, - "balance-region-scheduler": {}, - - "shuffle-leader-scheduler": {}, - "shuffle-region-scheduler": {}, - "shuffle-hot-region-scheduler": {}, +func runRestoreCommand(command *cobra.Command, cmdName string) error { + cfg := task.RestoreConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + return err + } + return task.RunRestore(GetDefaultContext(), cmdName, &cfg) } // NewRestoreCommand returns a restore subcommand @@ -54,172 +41,17 @@ func NewRestoreCommand() *cobra.Command { newDbRestoreCommand(), newTableRestoreCommand(), ) - - command.PersistentFlags().Uint("concurrency", 128, - "The size of thread pool that execute the restore task") - command.PersistentFlags().Uint64("ratelimit", 0, - "The rate limit of the restore task, MB/s per node. Set to 0 for unlimited speed.") - command.PersistentFlags().BoolP("checksum", "", true, - "Run checksum after restore") - command.PersistentFlags().BoolP("online", "", false, - "Whether online when restore") - // TODO remove hidden flag if it's stable - _ = command.PersistentFlags().MarkHidden("online") + task.DefineRestoreFlags(command.PersistentFlags()) return command } -func runRestore(flagSet *flag.FlagSet, cmdName, dbName, tableName string) error { - ctx, cancel := context.WithCancel(GetDefaultContext()) - defer cancel() - - mgr, err := GetDefaultMgr() - if err != nil { - return err - } - defer mgr.Close() - - client, err := restore.NewRestoreClient( - ctx, mgr.GetPDClient(), mgr.GetTiKV()) - if err != nil { - return errors.Trace(err) - } - defer client.Close() - err = initRestoreClient(ctx, client, flagSet) - if err != nil { - return errors.Trace(err) - } - - files := make([]*backup.File, 0) - tables := make([]*utils.Table, 0) - - defer summary.Summary(cmdName) - - switch { - case len(dbName) == 0 && len(tableName) == 0: - // full restore - for _, db := range client.GetDatabases() { - err = client.CreateDatabase(db.Schema) - if err != nil { - return errors.Trace(err) - } - for _, table := range db.Tables { - files = append(files, table.Files...) - } - tables = append(tables, db.Tables...) - } - case len(dbName) != 0 && len(tableName) == 0: - // database restore - db := client.GetDatabase(dbName) - if db == nil { - return errors.Errorf("database %s not found in backup", dbName) - } - err = client.CreateDatabase(db.Schema) - if err != nil { - return errors.Trace(err) - } - for _, table := range db.Tables { - files = append(files, table.Files...) - } - tables = db.Tables - case len(dbName) != 0 && len(tableName) != 0: - // table restore - db := client.GetDatabase(dbName) - if db == nil { - return errors.Errorf("database %s not found in backup", dbName) - } - err = client.CreateDatabase(db.Schema) - if err != nil { - return errors.Trace(err) - } - table := db.GetTable(tableName) - files = table.Files - tables = append(tables, table) - default: - return errors.New("must set db when table was set") - } - var newTS uint64 - if client.IsIncremental() { - newTS, err = client.GetTS(ctx) - if err != nil { - return err - } - } - summary.CollectInt("restore files", len(files)) - rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) - if err != nil { - return errors.Trace(err) - } - ranges, err := restore.ValidateFileRanges(files, rewriteRules) - if err != nil { - return err - } - summary.CollectInt("restore ranges", len(ranges)) - - // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( - ctx, - cmdName, - // Split/Scatter + Download/Ingest - int64(len(ranges)+len(files)), - !HasLogFile()) - - err = restore.SplitRanges(ctx, client, ranges, rewriteRules, updateCh) - if err != nil { - log.Error("split regions failed", zap.Error(err)) - return errors.Trace(err) - } - - if !client.IsIncremental() { - var pdAddr string - pdAddr, err = flagSet.GetString(FlagPD) - if err != nil { - return errors.Trace(err) - } - pdAddrs := strings.Split(pdAddr, ",") - err = client.ResetTS(pdAddrs) - if err != nil { - log.Error("reset pd TS failed", zap.Error(err)) - return errors.Trace(err) - } - } - - removedSchedulers, err := RestorePrepareWork(ctx, client, mgr) - if err != nil { - return errors.Trace(err) - } - - err = client.RestoreAll(rewriteRules, updateCh) - if err != nil { - return errors.Trace(err) - } - - err = RestorePostWork(ctx, client, mgr, removedSchedulers) - if err != nil { - return errors.Trace(err) - } - // Restore has finished. - close(updateCh) - - // Checksum - updateCh = utils.StartProgress( - ctx, "Checksum", int64(len(newTables)), !HasLogFile()) - err = client.ValidateChecksum( - ctx, mgr.GetTiKV().GetClient(), tables, newTables, updateCh) - if err != nil { - return err - } - close(updateCh) - - return nil -} - func newFullRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "full", Short: "restore all tables", RunE: func(cmd *cobra.Command, _ []string) error { - return runRestore(cmd.Flags(), "Full Restore", "", "") + return runRestoreCommand(cmd, "Full restore") }, } return command @@ -230,18 +62,10 @@ func newDbRestoreCommand() *cobra.Command { Use: "db", Short: "restore tables in a database", RunE: func(cmd *cobra.Command, _ []string) error { - db, err := cmd.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.New("empty database name is not allowed") - } - return runRestore(cmd.Flags(), "Database Restore", db, "") + return runRestoreCommand(cmd, "Database restore") }, } - command.Flags().String(flagDatabase, "", "database name") - _ = command.MarkFlagRequired(flagDatabase) + task.DefineDatabaseFlags(command) return command } @@ -250,129 +74,9 @@ func newTableRestoreCommand() *cobra.Command { Use: "table", Short: "restore a table", RunE: func(cmd *cobra.Command, _ []string) error { - db, err := cmd.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.New("empty database name is not allowed") - } - table, err := cmd.Flags().GetString(flagTable) - if err != nil { - return err - } - if len(table) == 0 { - return errors.New("empty table name is not allowed") - } - return runRestore(cmd.Flags(), "Table Restore", db, table) + return runRestoreCommand(cmd, "Table restore") }, } - - command.Flags().String(flagDatabase, "", "database name") - command.Flags().String(flagTable, "", "table name") - - _ = command.MarkFlagRequired(flagDatabase) - _ = command.MarkFlagRequired(flagTable) + task.DefineTableFlags(command) return command } - -func initRestoreClient(ctx context.Context, client *restore.Client, flagSet *flag.FlagSet) error { - u, err := storage.ParseBackendFromFlags(flagSet, FlagStorage) - if err != nil { - return err - } - rateLimit, err := flagSet.GetUint64("ratelimit") - if err != nil { - return err - } - client.SetRateLimit(rateLimit * utils.MB) - s, err := storage.Create(ctx, u) - if err != nil { - return errors.Trace(err) - } - metaData, err := s.Read(ctx, utils.MetaFile) - if err != nil { - return errors.Trace(err) - } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(metaData, backupMeta) - if err != nil { - return errors.Trace(err) - } - err = client.InitBackupMeta(backupMeta, u) - if err != nil { - return errors.Trace(err) - } - - concurrency, err := flagSet.GetUint("concurrency") - if err != nil { - return err - } - client.SetConcurrency(concurrency) - - isOnline, err := flagSet.GetBool("online") - if err != nil { - return err - } - if isOnline { - client.EnableOnline() - } - - return nil -} - -// RestorePrepareWork execute some prepare work before restore -func RestorePrepareWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr) ([]string, error) { - if client.IsOnline() { - return nil, nil - } - err := client.SwitchToImportMode(ctx) - if err != nil { - return nil, errors.Trace(err) - } - existSchedulers, err := mgr.ListSchedulers(ctx) - if err != nil { - return nil, errors.Trace(err) - } - needRemoveSchedulers := make([]string, 0, len(existSchedulers)) - for _, s := range existSchedulers { - if _, ok := schedulers[s]; ok { - needRemoveSchedulers = append(needRemoveSchedulers, s) - } - } - return removePDLeaderScheduler(ctx, mgr, needRemoveSchedulers) -} - -func removePDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, existSchedulers []string) ([]string, error) { - removedSchedulers := make([]string, 0, len(existSchedulers)) - for _, scheduler := range existSchedulers { - err := mgr.RemoveScheduler(ctx, scheduler) - if err != nil { - return nil, err - } - removedSchedulers = append(removedSchedulers, scheduler) - } - return removedSchedulers, nil -} - -// RestorePostWork execute some post work after restore -func RestorePostWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr, removedSchedulers []string) error { - if client.IsOnline() { - return nil - } - err := client.SwitchToNormalMode(ctx) - if err != nil { - return errors.Trace(err) - } - return addPDLeaderScheduler(ctx, mgr, removedSchedulers) -} - -func addPDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, removedSchedulers []string) error { - for _, scheduler := range removedSchedulers { - err := mgr.AddScheduler(ctx, scheduler) - if err != nil { - return err - } - } - return nil -} diff --git a/cmd/validate.go b/cmd/validate.go index 8ba72b372..559cb9983 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -19,7 +19,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/restore" - "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) @@ -54,24 +54,14 @@ func newCheckSumCommand() *cobra.Command { ctx, cancel := context.WithCancel(GetDefaultContext()) defer cancel() - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { + var cfg task.Config + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { return err } - s, err := storage.Create(ctx, u) - if err != nil { - return errors.Trace(err) - } - - metaData, err := s.Read(ctx, utils.MetaFile) - if err != nil { - return errors.Trace(err) - } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(metaData, backupMeta) + _, s, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) if err != nil { - return errors.Trace(err) + return err } dbs, err := utils.LoadBackupTables(backupMeta) @@ -152,24 +142,14 @@ func newBackupMetaCommand() *cobra.Command { if err != nil { return err } - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { - return err - } - s, err := storage.Create(ctx, u) - if err != nil { - log.Error("create storage failed", zap.Error(err)) - return errors.Trace(err) - } - data, err := s.Read(ctx, utils.MetaFile) - if err != nil { - log.Error("load backupmeta failed", zap.Error(err)) + + var cfg task.Config + if err = cfg.ParseFromFlags(cmd.Flags()); err != nil { return err } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(data, backupMeta) + _, _, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) if err != nil { - log.Error("parse backupmeta failed", zap.Error(err)) + log.Error("read backupmeta failed", zap.Error(err)) return err } dbs, err := utils.LoadBackupTables(backupMeta) @@ -241,8 +221,7 @@ func newBackupMetaCommand() *cobra.Command { return nil }, } - command.Flags().String("path", "", "the path of backupmeta") - command.Flags().Uint64P("offset", "", 0, "the offset of table id alloctor") + command.Flags().Uint64("offset", 0, "the offset of table id alloctor") command.Hidden = true return command } @@ -254,24 +233,16 @@ func decodeBackupMetaCommand() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithCancel(GetDefaultContext()) defer cancel() - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { - return errors.Trace(err) - } - s, err := storage.Create(ctx, u) - if err != nil { - return errors.Trace(err) + + var cfg task.Config + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err } - metaData, err := s.Read(ctx, utils.MetaFile) + _, s, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) if err != nil { - return errors.Trace(err) + return err } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(metaData, backupMeta) - if err != nil { - return errors.Trace(err) - } backupMetaJSON, err := json.Marshal(backupMeta) if err != nil { return errors.Trace(err) @@ -309,14 +280,16 @@ func encodeBackupMetaCommand() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithCancel(GetDefaultContext()) defer cancel() - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { - return errors.Trace(err) + + var cfg task.Config + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err } - s, err := storage.Create(ctx, u) + _, s, err := task.GetStorage(ctx, &cfg) if err != nil { - return errors.Trace(err) + return err } + metaData, err := s.Read(ctx, utils.MetaJSONFile) if err != nil { return errors.Trace(err) diff --git a/go.mod b/go.mod index 9951c2922..850750f09 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( github.com/cheggaaa/pb/v3 v3.0.1 github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect - github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 // indirect github.com/fsouza/fake-gcs-server v1.15.0 github.com/go-sql-driver/mysql v1.4.1 github.com/gogo/protobuf v1.3.1 @@ -17,25 +16,23 @@ require ( github.com/google/uuid v1.1.1 github.com/onsi/ginkgo v1.10.3 // indirect github.com/onsi/gomega v1.7.1 // indirect - github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4 - github.com/pingcap/errors v0.11.4 - github.com/pingcap/kvproto v0.0.0-20191212110315-d6a9d626988c + github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 + github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 + github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5 github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 - github.com/pingcap/parser v0.0.0-20191210060830-bdf23a7ade01 - github.com/pingcap/pd v1.1.0-beta.0.20191212045800-234784c7a9c5 - github.com/pingcap/tidb v1.1.0-beta.0.20191213040028-9009da737834 - github.com/pingcap/tipb v0.0.0-20191209145133-44f75c9bef33 + github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d + github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d + github.com/pingcap/tidb v1.1.0-beta.0.20200110130413-8c3ee37c1938 + github.com/pingcap/tidb-tools v4.0.0-beta+incompatible + github.com/pingcap/tipb v0.0.0-20191227083941-3996eff010dc github.com/prometheus/client_golang v1.0.0 github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.3 go.opencensus.io v0.22.2 // indirect - go.uber.org/atomic v1.5.1 // indirect go.uber.org/zap v1.13.0 - golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/tools v0.0.0-20191213032237-7093a17b0467 // indirect google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 ) diff --git a/go.sum b/go.sum index 085e00355..d5e9c891d 100644 --- a/go.sum +++ b/go.sum @@ -20,6 +20,8 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= @@ -36,6 +38,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d h1:rQlvB2AYWme2bIB18r/SipGiMEVJYE9U0z+MGoU/LtQ= github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cheggaaa/pb/v3 v3.0.1 h1:m0BngUk2LuSRYdx4fujDKNRXNDpbNCfptPfVT2m6OJY= github.com/cheggaaa/pb/v3 v3.0.1/go.mod h1:SqqeMF/pMOIu3xgGoxtPYhMNQP258xE4x/XRTYua+KU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -48,10 +52,12 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 h1:3jFq2xL4ZajGK4aZY8jz+DAF0FHjI51BXjjSwCzS1Dk= github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -60,18 +66,23 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbp github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM= github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 h1:LpMLYGyy67BoAFGda1NeOBQwqlv7nUXpm+rIVHGxZZ4= github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/ristretto v0.0.1 h1:cJwdnj42uV8Jg4+KLrYovLiCgIfz9wtWm6E6KA+1tLs= +github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -122,6 +133,7 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -169,7 +181,9 @@ github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpg github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -191,6 +205,7 @@ github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSg github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -219,7 +234,9 @@ github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vq github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -237,9 +254,11 @@ github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3 github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= @@ -248,45 +267,48 @@ github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKw github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.3.0 h1:e5+lF2E4Y2WCIxBefVowBuB0iHrUH4HZ8q+6mGF7fJc= github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4 h1:iRtOAQ6FXkY/BGvst3CDfTva4nTqh6CL8WXvanLdbu0= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= +github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 h1:rfD9v3+ppLPzoQBgZev0qYCpegrwyFx/BUpkApEiKdY= +github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c h1:hvQd3aOLKLF7xvRV6DzvPkKY4QXzfVbjU1BhW0d9yL8= -github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0hl9LcYo6cZoGBGiLtefMQMF/vo3XLgQ= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d h1:F8vp38kTAckN+v8Jlc98uMBvKIzr1a+UhnLyVYn8Q5Q= +github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d h1:rCmRK0lCRrHMUbS99BKFYhK9YxJDNw0xB033cQbYo0s= github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ= -github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= -github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= -github.com/pingcap/kvproto v0.0.0-20191030021250-51b332bcb20b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20191121022655-4c654046831d/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20191202044712-32be31591b03 h1:IyJl+qesVPf3UfFFmKtX69y1K5KC8uXlot3U0QgH7V4= -github.com/pingcap/kvproto v0.0.0-20191202044712-32be31591b03/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20191212110315-d6a9d626988c h1:CwVCq7XA/NvTQ6X9ZAhZlvcEvseUsHiPFQf2mL3LVl4= -github.com/pingcap/kvproto v0.0.0-20191212110315-d6a9d626988c/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd h1:hWDol43WY5PGhsh3+8794bFHY1bPrmu6bTalpssCrGg= -github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= +github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= +github.com/pingcap/kvproto v0.0.0-20191213111810-93cb7c623c8b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= +github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5 h1:RUxQExD5yubAjWGnw8kcxfO9abbiVHIE1rbuCyQCWDE= +github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20191210060830-bdf23a7ade01 h1:q1rGnV/296//bArDP7cDWWaSrhaeEKZY+gIo+Jb0Gyk= -github.com/pingcap/parser v0.0.0-20191210060830-bdf23a7ade01/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= -github.com/pingcap/pd v1.1.0-beta.0.20191210055626-676ddd3fbd2d/go.mod h1:Z/VMtXHpkOP+MnYnk4TL5VHc3ZwO1qHwc89zDuf5n8Q= -github.com/pingcap/pd v1.1.0-beta.0.20191212045800-234784c7a9c5 h1:sbpL1uNynq4yjGh0Xxb8MMePaOOXb9fdml3kB1NMQu4= -github.com/pingcap/pd v1.1.0-beta.0.20191212045800-234784c7a9c5/go.mod h1:NJYtcyKOqSWTJXoMF9CDdQc1xymxyBuQ8QSH6jJWqgc= -github.com/pingcap/sysutil v0.0.0-20191126040022-986c5b3ed9a3 h1:HCNif3lukL83gNC2EBAoh2Qbz36+2p0bm0LjgnNfl1s= -github.com/pingcap/sysutil v0.0.0-20191126040022-986c5b3ed9a3/go.mod h1:Futrrmuw98pEsbEmoPsjw8aKLCmixwHEmT2rF+AsXGw= -github.com/pingcap/tidb v1.1.0-beta.0.20191213040028-9009da737834 h1:eNf7bDY39moIzzcs5+PhLLW0BM2D2yrzFbjW/X42y0s= -github.com/pingcap/tidb v1.1.0-beta.0.20191213040028-9009da737834/go.mod h1:VWx47QOXISBHHtZeWrDQlBOdbvth9TE9gei6QpoqJ4g= +github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d h1:4QwSJRxmBjTB9ssJNWg2f2bDm5rqnHCUUjMh4N1QOOY= +github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d h1:Ui80aiLTyd0EZD56o2tjFRYpHfhazBjtBdKeR8UoTFY= +github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d/go.mod h1:CML+b1JVjN+VbDijaIcUSmuPgpDjXEY7UiOx5yDP8eE= +github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b h1:EEyo/SCRswLGuSk+7SB86Ak1p8bS6HL1Mi4Dhyuv6zg= +github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/tidb v1.1.0-beta.0.20200110130413-8c3ee37c1938 h1:Jt9ENNiS1ZNC9jV2Pd3wdegXQYFq3U6z1xFlzZNMNC8= +github.com/pingcap/tidb v1.1.0-beta.0.20200110130413-8c3ee37c1938/go.mod h1:DlMN+GGqC/WpREnzcH8xgxbXnntjybLhT84AbUSvMVM= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible h1:H1jg0aDWz2SLRh3hNBo2HFtnuHtudIUvBumU7syRkic= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tipb v0.0.0-20191209145133-44f75c9bef33 h1:cTSaVv1hue17BCPqt+sURADTFSMpSD26ZuvKRyYIjJs= -github.com/pingcap/tipb v0.0.0-20191209145133-44f75c9bef33/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pingcap/tidb-tools v4.0.0-beta+incompatible h1:+XJdcVLCM8GDgXiMS6lFV59N3XPVOqtNHeWNLVrb2pg= +github.com/pingcap/tidb-tools v4.0.0-beta+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= +github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pingcap/tipb v0.0.0-20191227083941-3996eff010dc h1:IOKsFObJ4GZwAgyuhdJKg3oKCzWcoBFfHhpq2TOn5H0= +github.com/pingcap/tipb v0.0.0-20191227083941-3996eff010dc/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -297,7 +319,6 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -310,14 +331,16 @@ github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7 h1:FUL3b97ZY2EPqg2NbXKuMHs5pXJB9hjj1fDHnF2vl28= -github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.19.10+incompatible h1:lA4Pi29JEVIQIgATSeftHSY0rMGI9CLrl2ZvDLiahto= github.com/shirou/gopsutil v2.19.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca h1:3fECS8atRjByijiI8yYiuwLwQ2ZxXobW7ua/8GRB3pI= @@ -331,11 +354,14 @@ github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= @@ -348,7 +374,6 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= @@ -374,6 +399,7 @@ github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKn github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yookoala/realpath v1.0.0 h1:7OA9pj4FZd+oZDsyvXWQvjn5oBdcHRTV44PpdMSuImQ= github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= @@ -382,6 +408,7 @@ go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2 go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -415,6 +442,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -442,6 +470,7 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -457,6 +486,7 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191011234655-491137f69257 h1:ry8e2D+cwaV6hk7lb3aRTjjZo24shrbK0e11QEOkTIg= golang.org/x/net v0.0.0-20191011234655-491137f69257/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -525,9 +555,10 @@ golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191213032237-7093a17b0467 h1:Jybbe55FT+YYZIJGWmJIA4ZGcglFuZOduakIW3+gHXY= -golang.org/x/tools v0.0.0-20191213032237-7093a17b0467/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 h1:Toz2IK7k8rbltAXwNAxKcn9OzqyNfMUhUNjz3sL0NMk= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -585,6 +616,7 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 5cba2d9bf..49e48638d 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -16,12 +16,14 @@ import ( "github.com/pingcap/log" "github.com/pingcap/parser/model" pd "github.com/pingcap/pd/client" + "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/ranger" "go.uber.org/zap" @@ -67,21 +69,17 @@ func NewBackupClient(ctx context.Context, mgr ClientMgr) (*Client, error) { } // GetTS returns the latest timestamp. -func (bc *Client) GetTS(ctx context.Context, timeAgo string) (uint64, error) { +func (bc *Client) GetTS(ctx context.Context, duration time.Duration) (uint64, error) { p, l, err := bc.mgr.GetPDClient().GetTS(ctx) if err != nil { return 0, errors.Trace(err) } backupTS := oracle.ComposeTS(p, l) - if timeAgo != "" { - duration, err := time.ParseDuration(timeAgo) - if err != nil { - return 0, errors.Trace(err) - } - if duration <= 0 { - return 0, errors.New("negative timeago is not allowed") - } + switch { + case duration < 0: + return 0, errors.New("negative timeago is not allowed") + case duration > 0: log.Info("backup time ago", zap.Duration("timeago", duration)) backupTime := oracle.GetTimeFromTS(backupTS) @@ -102,9 +100,9 @@ func (bc *Client) GetTS(ctx context.Context, timeAgo string) (uint64, error) { } // SetStorage set ExternalStorage for client -func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend) error { +func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) error { var err error - bc.storage, err = storage.Create(ctx, backend) + bc.storage, err = storage.Create(ctx, backend, sendCreds) if err != nil { return err } @@ -173,63 +171,27 @@ func appendRanges(tbl *model.TableInfo, tblID int64) ([]kv.KeyRange, error) { func BuildBackupRangeAndSchema( dom *domain.Domain, storage kv.Storage, + tableFilter *filter.Filter, backupTS uint64, - dbName, tableName string, ) ([]Range, *Schemas, error) { - SystemDatabases := [3]string{ - "information_schema", - "performance_schema", - "mysql", - } - info, err := dom.GetSnapshotInfoSchema(backupTS) if err != nil { return nil, nil, errors.Trace(err) } - var dbInfos []*model.DBInfo - var cTableName model.CIStr - switch { - case len(dbName) == 0 && len(tableName) != 0: - return nil, nil, errors.New("no database is not specified") - case len(dbName) != 0 && len(tableName) == 0: - // backup database - cDBName := model.NewCIStr(dbName) - dbInfo, exist := info.SchemaByName(cDBName) - if !exist { - return nil, nil, errors.Errorf("schema %s not found", dbName) - } - dbInfos = append(dbInfos, dbInfo) - case len(dbName) != 0 && len(tableName) != 0: - // backup table - cTableName = model.NewCIStr(tableName) - cDBName := model.NewCIStr(dbName) - dbInfo, exist := info.SchemaByName(cDBName) - if !exist { - return nil, nil, errors.Errorf("schema %s not found", dbName) - } - dbInfos = append(dbInfos, dbInfo) - case len(dbName) == 0 && len(tableName) == 0: - // backup full - dbInfos = info.AllSchemas() - } ranges := make([]Range, 0) backupSchemas := newBackupSchemas() -LoadDb: - for _, dbInfo := range dbInfos { + for _, dbInfo := range info.AllSchemas() { // skip system databases - for _, sysDbName := range SystemDatabases { - if sysDbName == dbInfo.Name.L { - continue LoadDb - } - } - dbData, err := json.Marshal(dbInfo) - if err != nil { - return nil, nil, errors.Trace(err) + if util.IsMemOrSysDB(dbInfo.Name.L) { + continue } - idAlloc := autoid.NewAllocator(storage, dbInfo.ID, false) + + var dbData []byte + idAlloc := autoid.NewAllocator(storage, dbInfo.ID, false, autoid.RowIDAllocType) + for _, tableInfo := range dbInfo.Tables { - if len(cTableName.L) != 0 && cTableName.L != tableInfo.Name.L { + if !tableFilter.Match(&filter.Table{Schema: dbInfo.Name.L, Name: tableInfo.Name.L}) { // Skip tables other than the given table. continue } @@ -243,6 +205,12 @@ LoadDb: zap.Stringer("table", tableInfo.Name), zap.Int64("AutoIncID", globalAutoID)) + if dbData == nil { + dbData, err = json.Marshal(dbInfo) + if err != nil { + return nil, nil, errors.Trace(err) + } + } tableData, err := json.Marshal(tableInfo) if err != nil { return nil, nil, errors.Trace(err) @@ -267,11 +235,8 @@ LoadDb: } } - if len(cTableName.L) != 0 { - // Must find the given table. - if backupSchemas.Len() == 0 { - return nil, nil, errors.Errorf("table %s not found", cTableName) - } + if backupSchemas.Len() == 0 { + return nil, nil, errors.New("nothing to backup") } return ranges, backupSchemas, nil } diff --git a/pkg/backup/client_test.go b/pkg/backup/client_test.go index 44ca1ad5a..ddff45299 100644 --- a/pkg/backup/client_test.go +++ b/pkg/backup/client_test.go @@ -50,16 +50,10 @@ func (r *testBackup) TestGetTS(c *C) { deviation = 100 ) - // timeago not valid - timeAgo := "invalid" - _, err = r.backupClient.GetTS(r.ctx, timeAgo) - c.Assert(err, ErrorMatches, "time: invalid duration invalid") - // timeago not work - timeAgo = "" expectedDuration := 0 currentTs := time.Now().UnixNano() / int64(time.Millisecond) - ts, err := r.backupClient.GetTS(r.ctx, timeAgo) + ts, err := r.backupClient.GetTS(r.ctx, 0) c.Assert(err, IsNil) pdTs := oracle.ExtractPhysical(ts) duration := int(currentTs - pdTs) @@ -67,10 +61,9 @@ func (r *testBackup) TestGetTS(c *C) { c.Assert(duration, Less, expectedDuration+deviation) // timeago = "1.5m" - timeAgo = "1.5m" expectedDuration = 90000 currentTs = time.Now().UnixNano() / int64(time.Millisecond) - ts, err = r.backupClient.GetTS(r.ctx, timeAgo) + ts, err = r.backupClient.GetTS(r.ctx, 90*time.Second) c.Assert(err, IsNil) pdTs = oracle.ExtractPhysical(ts) duration = int(currentTs - pdTs) @@ -78,13 +71,11 @@ func (r *testBackup) TestGetTS(c *C) { c.Assert(duration, Less, expectedDuration+deviation) // timeago = "-1m" - timeAgo = "-1m" - _, err = r.backupClient.GetTS(r.ctx, timeAgo) + _, err = r.backupClient.GetTS(r.ctx, -time.Minute) c.Assert(err, ErrorMatches, "negative timeago is not allowed") // timeago = "1000000h" overflows - timeAgo = "1000000h" - _, err = r.backupClient.GetTS(r.ctx, timeAgo) + _, err = r.backupClient.GetTS(r.ctx, 1000000*time.Hour) c.Assert(err, ErrorMatches, "backup ts overflow.*") // timeago = "10h" exceed GCSafePoint @@ -93,8 +84,7 @@ func (r *testBackup) TestGetTS(c *C) { now := oracle.ComposeTS(p, l) _, err = r.backupClient.mgr.GetPDClient().UpdateGCSafePoint(r.ctx, now) c.Assert(err, IsNil) - timeAgo = "10h" - _, err = r.backupClient.GetTS(r.ctx, timeAgo) + _, err = r.backupClient.GetTS(r.ctx, 10*time.Hour) c.Assert(err, ErrorMatches, "GC safepoint [0-9]+ exceed TS [0-9]+") } diff --git a/pkg/backup/schema_test.go b/pkg/backup/schema_test.go index 3d10fd967..f657310bf 100644 --- a/pkg/backup/schema_test.go +++ b/pkg/backup/schema_test.go @@ -5,6 +5,7 @@ import ( "math" . "github.com/pingcap/check" + "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" @@ -34,28 +35,32 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { tk := testkit.NewTestKit(c, s.mock.Storage) // Table t1 is not exist. + testFilter, err := filter.New(false, &filter.Rules{ + DoTables: []*filter.Table{{Schema: "test", Name: "t1"}}, + }) + c.Assert(err, IsNil) _, backupSchemas, err := BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "test", "t1") + s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) c.Assert(err, NotNil) c.Assert(backupSchemas, IsNil) // Database is not exist. + fooFilter, err := filter.New(false, &filter.Rules{ + DoTables: []*filter.Table{{Schema: "foo", Name: "t1"}}, + }) + c.Assert(err, IsNil) _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "foo", "t1") + s.mock.Domain, s.mock.Storage, fooFilter, math.MaxUint64) c.Assert(err, NotNil) c.Assert(backupSchemas, IsNil) // Empty databse. - _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "", "") - c.Assert(err, IsNil) - c.Assert(backupSchemas, NotNil) - c.Assert(backupSchemas.Len(), Equals, 0) - updateCh := make(chan struct{}, 2) - backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 1, updateCh) - schemas, err := backupSchemas.finishTableChecksum() + noFilter, err := filter.New(false, &filter.Rules{}) c.Assert(err, IsNil) - c.Assert(len(schemas), Equals, 0) + _, backupSchemas, err = BuildBackupRangeAndSchema( + s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) + c.Assert(err, NotNil) + c.Assert(backupSchemas, IsNil) tk.MustExec("use test") tk.MustExec("drop table if exists t1;") @@ -63,11 +68,12 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { tk.MustExec("insert into t1 values (10);") _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "test", "t1") + s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) c.Assert(err, IsNil) c.Assert(backupSchemas.Len(), Equals, 1) + updateCh := make(chan struct{}, 2) backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 1, updateCh) - schemas, err = backupSchemas.finishTableChecksum() + schemas, err := backupSchemas.finishTableChecksum() <-updateCh c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 1) @@ -82,7 +88,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { tk.MustExec("insert into t2 values (11);") _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "", "") + s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) c.Assert(err, IsNil) c.Assert(backupSchemas.Len(), Equals, 2) backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 2, updateCh) diff --git a/pkg/restore/db_test.go b/pkg/restore/db_test.go index 9583f7f8c..98341f510 100644 --- a/pkg/restore/db_test.go +++ b/pkg/restore/db_test.go @@ -64,7 +64,7 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { Db: dbInfo, } // Get the next AutoIncID - idAlloc := autoid.NewAllocator(s.mock.Storage, dbInfo.ID, false) + idAlloc := autoid.NewAllocator(s.mock.Storage, dbInfo.ID, false, autoid.RowIDAllocType) globalAutoID, err := idAlloc.NextGlobalAutoID(table.Schema.ID) c.Assert(err, IsNil, Commentf("Error allocate next auto id")) c.Assert(autoIncID, Equals, uint64(globalAutoID)) diff --git a/pkg/restore/util.go b/pkg/restore/util.go index a2e9e3e38..ea8629470 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -13,6 +13,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" + "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" @@ -50,7 +51,7 @@ func newIDAllocator(id int64) *idAllocator { return &idAllocator{id: id} } -func (alloc *idAllocator) Alloc(tableID int64, n uint64) (min int64, max int64, err error) { +func (alloc *idAllocator) Alloc(tableID int64, n uint64, increment, offset int64) (min int64, max int64, err error) { return alloc.id, alloc.id, nil } @@ -70,6 +71,10 @@ func (alloc *idAllocator) NextGlobalAutoID(tableID int64) (int64, error) { return alloc.id, nil } +func (alloc *idAllocator) GetType() autoid.AllocatorType { + return autoid.RowIDAllocType +} + // GetRewriteRules returns the rewrite rule of the new table and the old table. func GetRewriteRules( newTable *model.TableInfo, diff --git a/pkg/storage/flags.go b/pkg/storage/flags.go index 51fd98af1..2340467ba 100644 --- a/pkg/storage/flags.go +++ b/pkg/storage/flags.go @@ -1,55 +1,19 @@ package storage import ( - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/backup" "github.com/spf13/pflag" ) -const ( - // flagSendCredentialOption specify whether to send credentials to tikv - flagSendCredentialOption = "send-credentials-to-tikv" -) - -var ( - sendCredential bool -) - // DefineFlags adds flags to the flag set corresponding to all backend options. func DefineFlags(flags *pflag.FlagSet) { - flags.BoolP(flagSendCredentialOption, "c", true, - "Whether send credentials to tikv") defineS3Flags(flags) defineGCSFlags(flags) } -// GetBackendOptionsFromFlags obtains the backend options from the flag set. -func GetBackendOptionsFromFlags(flags *pflag.FlagSet) (options BackendOptions, err error) { - sendCredential, err = flags.GetBool(flagSendCredentialOption) - if err != nil { - err = errors.Trace(err) - return - } - - if options.S3, err = getBackendOptionsFromS3Flags(flags); err != nil { - return - } - if options.GCS, err = getBackendOptionsFromGCSFlags(flags); err != nil { - return - } - return -} - -// ParseBackendFromFlags is a convenient function to consecutively call -// GetBackendOptionsFromFlags and ParseBackend. -func ParseBackendFromFlags(flags *pflag.FlagSet, storageFlag string) (*backup.StorageBackend, error) { - u, err := flags.GetString(storageFlag) - if err != nil { - return nil, errors.Trace(err) - } - opts, err := GetBackendOptionsFromFlags(flags) - if err != nil { - return nil, err +// ParseFromFlags obtains the backend options from the flag set. +func (options *BackendOptions) ParseFromFlags(flags *pflag.FlagSet) error { + if err := options.S3.parseFromFlags(flags); err != nil { + return err } - return ParseBackend(u, &opts) + return options.GCS.parseFromFlags(flags) } diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index a0df5b03e..2eb310c3a 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -70,31 +70,28 @@ https://console.cloud.google.com/apis/credentials.`) _ = flags.MarkHidden(gcsCredentialsFile) } -func getBackendOptionsFromGCSFlags(flags *pflag.FlagSet) (options GCSBackendOptions, err error) { +func (options *GCSBackendOptions) parseFromFlags(flags *pflag.FlagSet) error { + var err error options.Endpoint, err = flags.GetString(gcsEndpointOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.StorageClass, err = flags.GetString(gcsStorageClassOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.PredefinedACL, err = flags.GetString(gcsPredefinedACL) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.CredentialsFile, err = flags.GetString(gcsCredentialsFile) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } - return + return nil } type gcsStorage struct { @@ -142,11 +139,16 @@ func (s *gcsStorage) FileExists(ctx context.Context, name string) (bool, error) return true, nil } -func newGCSStorage(ctx context.Context, gcs *backup.GCS) (*gcsStorage, error) { - return newGCSStorageWithHTTPClient(ctx, gcs, nil) +func newGCSStorage(ctx context.Context, gcs *backup.GCS, sendCredential bool) (*gcsStorage, error) { + return newGCSStorageWithHTTPClient(ctx, gcs, nil, sendCredential) } -func newGCSStorageWithHTTPClient(ctx context.Context, gcs *backup.GCS, hclient *http.Client) (*gcsStorage, error) { +func newGCSStorageWithHTTPClient( // revive:disable-line:flag-parameter + ctx context.Context, + gcs *backup.GCS, + hclient *http.Client, + sendCredential bool, +) (*gcsStorage, error) { var clientOps []option.ClientOption if gcs.CredentialsBlob == "" { creds, err := google.FindDefaultCredentials(ctx, storage.ScopeReadWrite) diff --git a/pkg/storage/gcs_test.go b/pkg/storage/gcs_test.go index da990cfe7..10bb44371 100644 --- a/pkg/storage/gcs_test.go +++ b/pkg/storage/gcs_test.go @@ -28,7 +28,7 @@ func (r *testStorageSuite) TestGCS(c *C) { PredefinedAcl: "private", CredentialsBlob: "Fake Credentials", } - stg, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + stg, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), false) c.Assert(err, IsNil) err = stg.Write(ctx, "key", []byte("data")) @@ -66,7 +66,6 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { server.CreateBucket(bucketName) { - sendCredential = true gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -74,13 +73,12 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "FakeCredentials", } - _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), true) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, "FakeCredentials") } { - sendCredential = false gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -88,7 +86,7 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "FakeCredentials", } - _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), false) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, "") } @@ -106,7 +104,6 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { defer os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") c.Assert(err, IsNil) - sendCredential = true gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -114,7 +111,7 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "", } - _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), true) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, `{"type": "service_account"}`) } @@ -132,7 +129,6 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { defer os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") c.Assert(err, IsNil) - sendCredential = false gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -140,13 +136,12 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "", } - _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), false) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, "") } { - sendCredential = true os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") gcs := &backup.GCS{ Bucket: bucketName, @@ -155,7 +150,7 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "", } - _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), true) c.Assert(err, NotNil) } } diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 5db54556c..8e04769b5 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -117,44 +117,41 @@ func defineS3Flags(flags *pflag.FlagSet) { _ = flags.MarkHidden(s3ProviderOption) } -func getBackendOptionsFromS3Flags(flags *pflag.FlagSet) (options S3BackendOptions, err error) { +func (options *S3BackendOptions) parseFromFlags(flags *pflag.FlagSet) error { + var err error options.Endpoint, err = flags.GetString(s3EndpointOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.Region, err = flags.GetString(s3RegionOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.SSE, err = flags.GetString(s3SSEOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.ACL, err = flags.GetString(s3ACLOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.StorageClass, err = flags.GetString(s3StorageClassOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.ForcePathStyle = true options.Provider, err = flags.GetString(s3ProviderOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } - - return options, err + return nil } // newS3Storage initialize a new s3 storage for metadata -func newS3Storage(backend *backup.S3) (*S3Storage, error) { +func newS3Storage( // revive:disable-line:flag-parameter + backend *backup.S3, + sendCredential bool, +) (*S3Storage, error) { qs := *backend awsConfig := aws.NewConfig(). WithMaxRetries(maxRetries). diff --git a/pkg/storage/s3_test.go b/pkg/storage/s3_test.go index 92a5a8737..3eaf1c206 100644 --- a/pkg/storage/s3_test.go +++ b/pkg/storage/s3_test.go @@ -236,7 +236,7 @@ func (r *testStorageSuite) TestS3Storage(c *C) { testFn := func(test *testcase, c *C) { c.Log(test.name) ctx := aws.BackgroundContext() - sendCredential = test.sendCredential + sendCredential := test.sendCredential if test.hackCheck { checkS3Bucket = func(svc *s3.S3, bucket string) error { return nil } } @@ -245,7 +245,7 @@ func (r *testStorageSuite) TestS3Storage(c *C) { S3: test.s3, }, } - _, err := Create(ctx, s3) + _, err := Create(ctx, s3, sendCredential) if test.errReturn { c.Assert(err, NotNil) return diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 173638bdd..f9ae368ae 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -18,7 +18,7 @@ type ExternalStorage interface { } // Create creates ExternalStorage -func Create(ctx context.Context, backend *backup.StorageBackend) (ExternalStorage, error) { +func Create(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) (ExternalStorage, error) { switch backend := backend.Backend.(type) { case *backup.StorageBackend_Local: return newLocalStorage(backend.Local.Path) @@ -26,14 +26,14 @@ func Create(ctx context.Context, backend *backup.StorageBackend) (ExternalStorag if backend.S3 == nil { return nil, errors.New("s3 config not found") } - return newS3Storage(backend.S3) + return newS3Storage(backend.S3, sendCreds) case *backup.StorageBackend_Noop: return newNoopStorage(), nil case *backup.StorageBackend_Gcs: if backend.Gcs == nil { return nil, errors.New("GCS config not found") } - return newGCSStorage(ctx, backend.Gcs) + return newGCSStorage(ctx, backend.Gcs, sendCreds) default: return nil, errors.Errorf("storage %T is not supported yet", backend) } diff --git a/pkg/task/backup.go b/pkg/task/backup.go new file mode 100644 index 000000000..b9613cd56 --- /dev/null +++ b/pkg/task/backup.go @@ -0,0 +1,157 @@ +package task + +import ( + "context" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +const ( + flagBackupTimeago = "timeago" + flagLastBackupTS = "lastbackupts" +) + +// BackupConfig is the configuration specific for backup tasks. +type BackupConfig struct { + Config + + TimeAgo time.Duration `json:"time-ago" toml:"time-ago"` + LastBackupTS uint64 `json:"last-backup-ts" toml:"last-backup-ts"` +} + +// DefineBackupFlags defines common flags for the backup command. +func DefineBackupFlags(flags *pflag.FlagSet) { + flags.Duration( + flagBackupTimeago, 0, + "The history version of the backup task, e.g. 1m, 1h. Do not exceed GCSafePoint") + + flags.Uint64(flagLastBackupTS, 0, "the last time backup ts") + _ = flags.MarkHidden(flagLastBackupTS) +} + +// ParseFromFlags parses the backup-related flags from the flag set. +func (cfg *BackupConfig) ParseFromFlags(flags *pflag.FlagSet) error { + timeAgo, err := flags.GetDuration(flagBackupTimeago) + if err != nil { + return errors.Trace(err) + } + if timeAgo < 0 { + return errors.New("negative timeago is not allowed") + } + cfg.TimeAgo = timeAgo + cfg.LastBackupTS, err = flags.GetUint64(flagLastBackupTS) + if err != nil { + return errors.Trace(err) + } + if err = cfg.Config.ParseFromFlags(flags); err != nil { + return errors.Trace(err) + } + return nil +} + +// RunBackup starts a backup task inside the current goroutine. +func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { + ctx, cancel := context.WithCancel(c) + defer cancel() + + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return err + } + tableFilter, err := filter.New(cfg.CaseSensitive, &cfg.Filter) + if err != nil { + return err + } + mgr, err := newMgr(ctx, cfg.PD) + if err != nil { + return err + } + defer mgr.Close() + + client, err := backup.NewBackupClient(ctx, mgr) + if err != nil { + return err + } + if err = client.SetStorage(ctx, u, cfg.SendCreds); err != nil { + return err + } + + backupTS, err := client.GetTS(ctx, cfg.TimeAgo) + if err != nil { + return err + } + + defer summary.Summary(cmdName) + + ranges, backupSchemas, err := backup.BuildBackupRangeAndSchema( + mgr.GetDomain(), mgr.GetTiKV(), tableFilter, backupTS) + if err != nil { + return err + } + + // The number of regions need to backup + approximateRegions := 0 + for _, r := range ranges { + var regionCount int + regionCount, err = mgr.GetRegionCount(ctx, r.StartKey, r.EndKey) + if err != nil { + return err + } + approximateRegions += regionCount + } + + summary.CollectInt("backup total regions", approximateRegions) + + // Backup + // Redirect to log if there is no log file to avoid unreadable output. + updateCh := utils.StartProgress( + ctx, cmdName, int64(approximateRegions), !cfg.LogProgress) + err = client.BackupRanges( + ctx, ranges, cfg.LastBackupTS, backupTS, cfg.RateLimit, cfg.Concurrency, updateCh) + if err != nil { + return err + } + // Backup has finished + close(updateCh) + + // Checksum + backupSchemasConcurrency := backup.DefaultSchemaConcurrency + if backupSchemas.Len() < backupSchemasConcurrency { + backupSchemasConcurrency = backupSchemas.Len() + } + updateCh = utils.StartProgress( + ctx, "Checksum", int64(backupSchemas.Len()), !cfg.LogProgress) + backupSchemas.SetSkipChecksum(!cfg.Checksum) + backupSchemas.Start( + ctx, mgr.GetTiKV(), backupTS, uint(backupSchemasConcurrency), updateCh) + + err = client.CompleteMeta(backupSchemas) + if err != nil { + return err + } + + valid, err := client.FastChecksum() + if err != nil { + return err + } + if !valid { + log.Error("backup FastChecksum mismatch!") + } + // Checksum has finished + close(updateCh) + + err = client.SaveBackupMeta(ctx) + if err != nil { + return err + } + return nil +} diff --git a/pkg/task/common.go b/pkg/task/common.go new file mode 100644 index 000000000..2433d94b9 --- /dev/null +++ b/pkg/task/common.go @@ -0,0 +1,236 @@ +package task + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/backup" + "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/pingcap/tidb/store/tikv" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/utils" +) + +const ( + // flagSendCreds specify whether to send credentials to tikv + flagSendCreds = "send-credentials-to-tikv" + // flagStorage is the name of storage flag. + flagStorage = "storage" + // flagPD is the name of PD url flag. + flagPD = "pd" + // flagCA is the name of TLS CA flag. + flagCA = "ca" + // flagCert is the name of TLS cert flag. + flagCert = "cert" + // flagKey is the name of TLS key flag. + flagKey = "key" + + flagDatabase = "db" + flagTable = "table" + + flagRateLimit = "ratelimit" + flagRateLimitUnit = "ratelimit-unit" + flagConcurrency = "concurrency" + flagChecksum = "checksum" +) + +// TLSConfig is the common configuration for TLS connection. +type TLSConfig struct { + CA string `json:"ca" toml:"ca"` + Cert string `json:"cert" toml:"cert"` + Key string `json:"key" toml:"key"` +} + +// Config is the common configuration for all BRIE tasks. +type Config struct { + storage.BackendOptions + + Storage string `json:"storage" toml:"storage"` + PD []string `json:"pd" toml:"pd"` + TLS TLSConfig `json:"tls" toml:"tls"` + RateLimit uint64 `json:"rate-limit" toml:"rate-limit"` + Concurrency uint32 `json:"concurrency" toml:"concurrency"` + Checksum bool `json:"checksum" toml:"checksum"` + SendCreds bool `json:"send-credentials-to-tikv" toml:"send-credentials-to-tikv"` + // LogProgress is true means the progress bar is printed to the log instead of stdout. + LogProgress bool `json:"log-progress" toml:"log-progress"` + + CaseSensitive bool `json:"case-sensitive" toml:"case-sensitive"` + Filter filter.Rules `json:"black-white-list" toml:"black-white-list"` +} + +// DefineCommonFlags defines the flags common to all BRIE commands. +func DefineCommonFlags(flags *pflag.FlagSet) { + flags.BoolP(flagSendCreds, "c", true, "Whether send credentials to tikv") + flags.StringP(flagStorage, "s", "", `specify the url where backup storage, eg, "local:///path/to/save"`) + flags.StringSliceP(flagPD, "u", []string{"127.0.0.1:2379"}, "PD address") + flags.String(flagCA, "", "CA certificate path for TLS connection") + flags.String(flagCert, "", "Certificate path for TLS connection") + flags.String(flagKey, "", "Private key path for TLS connection") + + flags.Uint64(flagRateLimit, 0, "The rate limit of the task, MB/s per node") + flags.Uint32(flagConcurrency, 4, "The size of thread pool on each node that executes the task") + flags.Bool(flagChecksum, true, "Run checksum at end of task") + + flags.Uint64(flagRateLimitUnit, utils.MB, "The unit of rate limit") + _ = flags.MarkHidden(flagRateLimitUnit) + + storage.DefineFlags(flags) +} + +// DefineDatabaseFlags defines the required --db flag. +func DefineDatabaseFlags(command *cobra.Command) { + command.Flags().String(flagDatabase, "", "database name") + _ = command.MarkFlagRequired(flagDatabase) +} + +// DefineTableFlags defines the required --db and --table flags. +func DefineTableFlags(command *cobra.Command) { + DefineDatabaseFlags(command) + command.Flags().StringP(flagTable, "t", "", "table name") + _ = command.MarkFlagRequired(flagTable) +} + +// ParseFromFlags parses the TLS config from the flag set. +func (tls *TLSConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + tls.CA, err = flags.GetString(flagCA) + if err != nil { + return errors.Trace(err) + } + tls.Cert, err = flags.GetString(flagCert) + if err != nil { + return errors.Trace(err) + } + tls.Key, err = flags.GetString(flagKey) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// ParseFromFlags parses the config from the flag set. +func (cfg *Config) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + cfg.Storage, err = flags.GetString(flagStorage) + if err != nil { + return errors.Trace(err) + } + cfg.SendCreds, err = flags.GetBool(flagSendCreds) + if err != nil { + return errors.Trace(err) + } + cfg.PD, err = flags.GetStringSlice(flagPD) + if err != nil { + return errors.Trace(err) + } + if len(cfg.PD) == 0 { + return errors.New("must provide at least one PD server address") + } + cfg.Concurrency, err = flags.GetUint32(flagConcurrency) + if err != nil { + return errors.Trace(err) + } + cfg.Checksum, err = flags.GetBool(flagChecksum) + if err != nil { + return errors.Trace(err) + } + + var rateLimit, rateLimitUnit uint64 + rateLimit, err = flags.GetUint64(flagRateLimit) + if err != nil { + return errors.Trace(err) + } + rateLimitUnit, err = flags.GetUint64(flagRateLimitUnit) + if err != nil { + return errors.Trace(err) + } + cfg.RateLimit = rateLimit * rateLimitUnit + + if dbFlag := flags.Lookup(flagDatabase); dbFlag != nil { + db := escapeFilterName(dbFlag.Value.String()) + if len(db) == 0 { + return errors.New("empty database name is not allowed") + } + if tblFlag := flags.Lookup(flagTable); tblFlag != nil { + tbl := escapeFilterName(tblFlag.Value.String()) + if len(tbl) == 0 { + return errors.New("empty table name is not allowed") + } + cfg.Filter.DoTables = []*filter.Table{{Schema: db, Name: tbl}} + } else { + cfg.Filter.DoDBs = []string{db} + } + } + + if err := cfg.BackendOptions.ParseFromFlags(flags); err != nil { + return err + } + return cfg.TLS.ParseFromFlags(flags) +} + +// newMgr creates a new mgr at the given PD address. +func newMgr(ctx context.Context, pds []string) (*conn.Mgr, error) { + pdAddress := strings.Join(pds, ",") + if len(pdAddress) == 0 { + return nil, errors.New("pd address can not be empty") + } + + // Disable GC because TiDB enables GC already. + store, err := tikv.Driver{}.Open(fmt.Sprintf("tikv://%s?disableGC=true", pdAddress)) + if err != nil { + return nil, err + } + return conn.NewMgr(ctx, pdAddress, store.(tikv.Storage)) +} + +// GetStorage gets the storage backend from the config. +func GetStorage( + ctx context.Context, + cfg *Config, +) (*backup.StorageBackend, storage.ExternalStorage, error) { + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return nil, nil, err + } + s, err := storage.Create(ctx, u, cfg.SendCreds) + if err != nil { + return nil, nil, errors.Annotate(err, "create storage failed") + } + return u, s, nil +} + +// ReadBackupMeta reads the backupmeta file from the storage. +func ReadBackupMeta( + ctx context.Context, + cfg *Config, +) (*backup.StorageBackend, storage.ExternalStorage, *backup.BackupMeta, error) { + u, s, err := GetStorage(ctx, cfg) + if err != nil { + return nil, nil, nil, err + } + metaData, err := s.Read(ctx, utils.MetaFile) + if err != nil { + return nil, nil, nil, errors.Annotate(err, "load backupmeta failed") + } + backupMeta := &backup.BackupMeta{} + if err = proto.Unmarshal(metaData, backupMeta); err != nil { + return nil, nil, nil, errors.Annotate(err, "parse backupmeta failed") + } + return u, s, backupMeta, nil +} + +func escapeFilterName(name string) string { + if !strings.HasPrefix(name, "~") { + return name + } + return "~^" + regexp.QuoteMeta(name) + "$" +} diff --git a/pkg/task/restore.go b/pkg/task/restore.go new file mode 100644 index 000000000..f2f3caf43 --- /dev/null +++ b/pkg/task/restore.go @@ -0,0 +1,254 @@ +package task + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/backup" + "github.com/pingcap/log" + "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/spf13/pflag" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +const ( + flagOnline = "online" +) + +var schedulers = map[string]struct{}{ + "balance-leader-scheduler": {}, + "balance-hot-region-scheduler": {}, + "balance-region-scheduler": {}, + + "shuffle-leader-scheduler": {}, + "shuffle-region-scheduler": {}, + "shuffle-hot-region-scheduler": {}, +} + +// RestoreConfig is the configuration specific for restore tasks. +type RestoreConfig struct { + Config + + Online bool `json:"online" toml:"online"` +} + +// DefineRestoreFlags defines common flags for the restore command. +func DefineRestoreFlags(flags *pflag.FlagSet) { + flags.Bool("online", false, "Whether online when restore") + // TODO remove hidden flag if it's stable + _ = flags.MarkHidden("online") +} + +// ParseFromFlags parses the restore-related flags from the flag set. +func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + cfg.Online, err = flags.GetBool(flagOnline) + if err != nil { + return errors.Trace(err) + } + return cfg.Config.ParseFromFlags(flags) +} + +// RunRestore starts a restore task inside the current goroutine. +func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { + ctx, cancel := context.WithCancel(c) + defer cancel() + + mgr, err := newMgr(ctx, cfg.PD) + if err != nil { + return err + } + defer mgr.Close() + + client, err := restore.NewRestoreClient(ctx, mgr.GetPDClient(), mgr.GetTiKV()) + if err != nil { + return err + } + defer client.Close() + + client.SetRateLimit(cfg.RateLimit) + client.SetConcurrency(uint(cfg.Concurrency)) + if cfg.Online { + client.EnableOnline() + } + + defer summary.Summary(cmdName) + + u, _, backupMeta, err := ReadBackupMeta(ctx, &cfg.Config) + if err != nil { + return err + } + if err = client.InitBackupMeta(backupMeta, u); err != nil { + return err + } + + files, tables, err := filterRestoreFiles(client, cfg) + if err != nil { + return err + } + if len(files) == 0 { + return errors.New("all files are filtered out from the backup archive, nothing to restore") + } + summary.CollectInt("restore files", len(files)) + + var newTS uint64 + if client.IsIncremental() { + newTS, err = client.GetTS(ctx) + if err != nil { + return err + } + } + rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) + if err != nil { + return err + } + + ranges, err := restore.ValidateFileRanges(files, rewriteRules) + if err != nil { + return err + } + summary.CollectInt("restore ranges", len(ranges)) + + // Redirect to log if there is no log file to avoid unreadable output. + updateCh := utils.StartProgress( + ctx, + cmdName, + // Split/Scatter + Download/Ingest + int64(len(ranges)+len(files)), + !cfg.LogProgress) + + err = restore.SplitRanges(ctx, client, ranges, rewriteRules, updateCh) + if err != nil { + log.Error("split regions failed", zap.Error(err)) + return err + } + + if !client.IsIncremental() { + if err = client.ResetTS(cfg.PD); err != nil { + log.Error("reset pd TS failed", zap.Error(err)) + return err + } + } + + removedSchedulers, err := restorePreWork(ctx, client, mgr) + if err != nil { + return err + } + err = client.RestoreAll(rewriteRules, updateCh) + // always run the post-work even on error, so we don't stuck in the import mode or emptied schedulers + postErr := restorePostWork(ctx, client, mgr, removedSchedulers) + + if err != nil { + return err + } + if postErr != nil { + return postErr + } + + // Restore has finished. + close(updateCh) + + // Checksum + updateCh = utils.StartProgress( + ctx, "Checksum", int64(len(newTables)), !cfg.LogProgress) + err = client.ValidateChecksum( + ctx, mgr.GetTiKV().GetClient(), tables, newTables, updateCh) + if err != nil { + return err + } + close(updateCh) + + return nil +} + +func filterRestoreFiles( + client *restore.Client, + cfg *RestoreConfig, +) (files []*backup.File, tables []*utils.Table, err error) { + tableFilter, err := filter.New(cfg.CaseSensitive, &cfg.Filter) + if err != nil { + return nil, nil, err + } + + for _, db := range client.GetDatabases() { + createdDatabase := false + for _, table := range db.Tables { + if !tableFilter.Match(&filter.Table{Schema: db.Schema.Name.O, Name: table.Schema.Name.O}) { + continue + } + + if !createdDatabase { + if err = client.CreateDatabase(db.Schema); err != nil { + return nil, nil, err + } + createdDatabase = true + } + + files = append(files, table.Files...) + tables = append(tables, table) + } + } + + return +} + +// restorePreWork executes some prepare work before restore +func restorePreWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr) ([]string, error) { + if client.IsOnline() { + return nil, nil + } + + if err := client.SwitchToImportMode(ctx); err != nil { + return nil, err + } + + existSchedulers, err := mgr.ListSchedulers(ctx) + if err != nil { + return nil, errors.Trace(err) + } + needRemoveSchedulers := make([]string, 0, len(existSchedulers)) + for _, s := range existSchedulers { + if _, ok := schedulers[s]; ok { + needRemoveSchedulers = append(needRemoveSchedulers, s) + } + } + return removePDLeaderScheduler(ctx, mgr, needRemoveSchedulers) +} + +func removePDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, existSchedulers []string) ([]string, error) { + removedSchedulers := make([]string, 0, len(existSchedulers)) + for _, scheduler := range existSchedulers { + err := mgr.RemoveScheduler(ctx, scheduler) + if err != nil { + return nil, err + } + removedSchedulers = append(removedSchedulers, scheduler) + } + return removedSchedulers, nil +} + +// restorePostWork executes some post work after restore +func restorePostWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr, removedSchedulers []string) error { + if client.IsOnline() { + return nil + } + if err := client.SwitchToNormalMode(ctx); err != nil { + return err + } + return addPDLeaderScheduler(ctx, mgr, removedSchedulers) +} + +func addPDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, removedSchedulers []string) error { + for _, scheduler := range removedSchedulers { + err := mgr.AddScheduler(ctx, scheduler) + if err != nil { + return err + } + } + return nil +} From 8c97452f3c27c641f4e0be5b980cbe491740056b Mon Sep 17 00:00:00 2001 From: 3pointer Date: Thu, 13 Feb 2020 10:24:23 +0800 Subject: [PATCH 05/46] restore: fix restore summary log (#150) Co-authored-by: kennytm --- pkg/restore/client.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 3030ba857..5e8df4418 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -242,8 +242,6 @@ func (rc *Client) RestoreTable( key := fmt.Sprintf("%s.%s", table.Db.Name.String(), table.Schema.Name.String()) if err != nil { summary.CollectFailureUnit(key, err) - } else { - summary.CollectSuccessUnit(key, elapsed) } }() @@ -342,6 +340,7 @@ func (rc *Client) RestoreAll( defer func() { elapsed := time.Since(start) log.Info("Restore All", zap.Duration("take", elapsed)) + summary.CollectSuccessUnit("restore all", elapsed) }() errCh := make(chan error, len(rc.databases)) wg := new(sync.WaitGroup) From 008ec45182220d6d7cc7e8b13f940bfea1e24fd6 Mon Sep 17 00:00:00 2001 From: 5kbpers <20279863+5kbpers@users.noreply.github.com> Date: Thu, 13 Feb 2020 15:43:18 +0800 Subject: [PATCH 06/46] restore: enhance error handling (#152) * restore: enhance error handling Signed-off-by: 5kbpers * unit test Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * fix region epoch error Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * remove `Restore*` Signed-off-by: 5kbpers * address lint Signed-off-by: 5kbpers * add debug log Signed-off-by: 5kbpers * Apply suggestions from code review Co-Authored-By: kennytm * Update pkg/restore/import.go Co-Authored-By: kennytm * fix retry error Signed-off-by: 5kbpers * handle RegionNotFound error Signed-off-by: 5kbpers Co-authored-by: Neil Shen Co-authored-by: kennytm --- go.mod | 3 + go.sum | 11 +++ pkg/restore/backoff.go | 117 +++++++++++++++++++++++++ pkg/restore/backoff_test.go | 58 +++++++++++++ pkg/restore/client.go | 133 +++++----------------------- pkg/restore/import.go | 168 ++++++++++++++++++------------------ pkg/restore/split.go | 3 +- pkg/restore/util.go | 52 +---------- pkg/task/restore.go | 2 +- pkg/utils/retry.go | 40 +++++++++ tests/br_full_ddl/run.sh | 2 +- 11 files changed, 338 insertions(+), 251 deletions(-) create mode 100644 pkg/restore/backoff.go create mode 100644 pkg/restore/backoff_test.go create mode 100644 pkg/utils/retry.go diff --git a/go.mod b/go.mod index 850750f09..180c58f93 100644 --- a/go.mod +++ b/go.mod @@ -8,12 +8,14 @@ require ( github.com/cheggaaa/pb/v3 v3.0.1 github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect + github.com/fatih/color v1.9.0 // indirect github.com/fsouza/fake-gcs-server v1.15.0 github.com/go-sql-driver/mysql v1.4.1 github.com/gogo/protobuf v1.3.1 github.com/golang/snappy v0.0.1 // indirect github.com/google/btree v1.0.0 github.com/google/uuid v1.1.1 + github.com/mattn/go-runewidth v0.0.7 // indirect github.com/onsi/ginkgo v1.10.3 // indirect github.com/onsi/gomega v1.7.1 // indirect github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 @@ -33,6 +35,7 @@ require ( go.uber.org/zap v1.13.0 golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 // indirect google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 ) diff --git a/go.sum b/go.sum index d5e9c891d..0fe4a3024 100644 --- a/go.sum +++ b/go.sum @@ -97,6 +97,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.15.0 h1:ss/ztlt10Y64A5qslmxZKsiqW/i28t5DkRtv6qSFaLQ= @@ -224,12 +226,18 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -518,6 +526,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190909082730-f460065e899a h1:mIzbOulag9/gXacgxKlFVwpCOWSfBT3/pDyyCwGA9as= golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= @@ -555,6 +564,8 @@ golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 h1:BKiPVwWbEdmAh+5CBwk13CYeVJQRDJpDnKgDyMOGz9M= +golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 h1:Toz2IK7k8rbltAXwNAxKcn9OzqyNfMUhUNjz3sL0NMk= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/restore/backoff.go b/pkg/restore/backoff.go new file mode 100644 index 000000000..dae14e109 --- /dev/null +++ b/pkg/restore/backoff.go @@ -0,0 +1,117 @@ +package restore + +import ( + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/utils" +) + +var ( + errNotLeader = errors.NewNoStackError("not leader") + errEpochNotMatch = errors.NewNoStackError("epoch not match") + errKeyNotInRegion = errors.NewNoStackError("key not in region") + errRegionNotFound = errors.NewNoStackError("region not found") + errResp = errors.NewNoStackError("response error") + errRewriteRuleNotFound = errors.NewNoStackError("rewrite rule not found") + errRangeIsEmpty = errors.NewNoStackError("range is empty") + errGrpc = errors.NewNoStackError("gRPC error") + + // TODO: add `error` field to `DownloadResponse` for distinguish the errors of gRPC + // and the errors of request + errBadFormat = errors.NewNoStackError("bad format") + errWrongKeyPrefix = errors.NewNoStackError("wrong key prefix") + errFileCorrupted = errors.NewNoStackError("file corrupted") + errCannotRead = errors.NewNoStackError("cannot read externel storage") +) + +const ( + importSSTRetryTimes = 16 + importSSTWaitInterval = 10 * time.Millisecond + importSSTMaxWaitInterval = 1 * time.Second + + downloadSSTRetryTimes = 8 + downloadSSTWaitInterval = 10 * time.Millisecond + downloadSSTMaxWaitInterval = 1 * time.Second + + resetTsRetryTime = 16 + resetTSWaitInterval = 50 * time.Millisecond + resetTSMaxWaitInterval = 500 * time.Millisecond +) + +type importerBackoffer struct { + attempt int + delayTime time.Duration + maxDelayTime time.Duration +} + +func newImportSSTBackoffer() utils.Backoffer { + return &importerBackoffer{ + attempt: importSSTRetryTimes, + delayTime: importSSTWaitInterval, + maxDelayTime: importSSTMaxWaitInterval, + } +} + +func newDownloadSSTBackoffer() utils.Backoffer { + return &importerBackoffer{ + attempt: downloadSSTRetryTimes, + delayTime: downloadSSTWaitInterval, + maxDelayTime: downloadSSTMaxWaitInterval, + } +} + +func (bo *importerBackoffer) NextBackoff(err error) time.Duration { + switch errors.Cause(err) { + case errResp, errGrpc, errEpochNotMatch, errNotLeader: + bo.delayTime = 2 * bo.delayTime + bo.attempt-- + case errRangeIsEmpty, errRewriteRuleNotFound: + // Excepted error, finish the operation + bo.delayTime = 0 + bo.attempt = 0 + default: + // Unexcepted error + bo.delayTime = 0 + bo.attempt = 0 + log.Warn("unexcepted error, stop to retry", zap.Error(err)) + } + if bo.delayTime > bo.maxDelayTime { + return bo.maxDelayTime + } + return bo.delayTime +} + +func (bo *importerBackoffer) Attempt() int { + return bo.attempt +} + +type resetTSBackoffer struct { + attempt int + delayTime time.Duration + maxDelayTime time.Duration +} + +func newResetTSBackoffer() utils.Backoffer { + return &resetTSBackoffer{ + attempt: resetTsRetryTime, + delayTime: resetTSWaitInterval, + maxDelayTime: resetTSMaxWaitInterval, + } +} + +func (bo *resetTSBackoffer) NextBackoff(err error) time.Duration { + bo.delayTime = 2 * bo.delayTime + bo.attempt-- + if bo.delayTime > bo.maxDelayTime { + return bo.maxDelayTime + } + return bo.delayTime +} + +func (bo *resetTSBackoffer) Attempt() int { + return bo.attempt +} diff --git a/pkg/restore/backoff_test.go b/pkg/restore/backoff_test.go new file mode 100644 index 000000000..537f0980c --- /dev/null +++ b/pkg/restore/backoff_test.go @@ -0,0 +1,58 @@ +package restore + +import ( + "context" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" + + "github.com/pingcap/br/pkg/utils" +) + +var _ = Suite(&testBackofferSuite{}) + +type testBackofferSuite struct { + mock *utils.MockCluster +} + +func (s *testBackofferSuite) SetUpSuite(c *C) { + var err error + s.mock, err = utils.NewMockCluster() + c.Assert(err, IsNil) +} + +func (s *testBackofferSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testBackofferSuite) TestImporterBackoffer(c *C) { + var counter int + err := utils.WithRetry(context.Background(), func() error { + defer func() { counter++ }() + switch counter { + case 0: + return errGrpc + case 1: + return errResp + case 2: + return errRangeIsEmpty + } + return nil + }, newImportSSTBackoffer()) + c.Assert(counter, Equals, 3) + c.Assert(err, Equals, errRangeIsEmpty) + + counter = 0 + backoffer := importerBackoffer{ + attempt: 10, + delayTime: time.Nanosecond, + maxDelayTime: time.Nanosecond, + } + err = utils.WithRetry(context.Background(), func() error { + defer func() { counter++ }() + return errResp + }, &backoffer) + c.Assert(counter, Equals, 10) + c.Assert(err, Equals, errResp) +} diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 5e8df4418..5402d78bc 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -2,7 +2,6 @@ package restore import ( "context" - "fmt" "math" "sync" "time" @@ -26,15 +25,9 @@ import ( "github.com/pingcap/br/pkg/utils" ) -const ( - resetTsRetryTime = 16 - resetTSWaitInterval = 50 * time.Millisecond - resetTSMaxWaitInterval = 500 * time.Millisecond - - // defaultChecksumConcurrency is the default number of the concurrent - // checksum tasks. - defaultChecksumConcurrency = 64 -) +// defaultChecksumConcurrency is the default number of the concurrent +// checksum tasks. +const defaultChecksumConcurrency = 64 // Client sends requests to restore files type Client struct { @@ -138,13 +131,10 @@ func (rc *Client) ResetTS(pdAddrs []string) error { restoreTS := rc.backupMeta.GetEndVersion() log.Info("reset pd timestamp", zap.Uint64("ts", restoreTS)) i := 0 - return withRetry(func() error { + return utils.WithRetry(rc.ctx, func() error { idx := i % len(pdAddrs) return utils.ResetTS(pdAddrs[idx], restoreTS) - }, func(e error) bool { - i++ - return true - }, resetTsRetryTime, resetTSWaitInterval, resetTSMaxWaitInterval) + }, newResetTSBackoffer()) } // GetDatabases returns all databases. @@ -228,29 +218,28 @@ func (rc *Client) setSpeedLimit() error { return nil } -// RestoreTable tries to restore the data of a table. -func (rc *Client) RestoreTable( - table *utils.Table, +// RestoreFiles tries to restore the files. +func (rc *Client) RestoreFiles( + files []*backup.File, rewriteRules *RewriteRules, updateCh chan<- struct{}, ) (err error) { start := time.Now() defer func() { elapsed := time.Since(start) - log.Info("restore table", - zap.Stringer("table", table.Schema.Name), zap.Duration("take", elapsed)) - key := fmt.Sprintf("%s.%s", table.Db.Name.String(), table.Schema.Name.String()) - if err != nil { - summary.CollectFailureUnit(key, err) + if err == nil { + log.Info("Restore Files", + zap.Int("files", len(files)), zap.Duration("take", elapsed)) + summary.CollectSuccessUnit("files", elapsed) + } else { + summary.CollectFailureUnit("files", err) } }() - log.Debug("start to restore table", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), - zap.Array("files", files(table.Files)), + log.Debug("start to restore files", + zap.Int("files", len(files)), ) - errCh := make(chan error, len(table.Files)) + errCh := make(chan error, len(files)) wg := new(sync.WaitGroup) defer close(errCh) err = rc.setSpeedLimit() @@ -258,7 +247,7 @@ func (rc *Client) RestoreTable( return err } - for _, file := range table.Files { + for _, file := range files { wg.Add(1) fileReplica := file rc.workerPool.Apply( @@ -272,100 +261,18 @@ func (rc *Client) RestoreTable( } }) } - for range table.Files { + for range files { err := <-errCh if err != nil { rc.cancel() wg.Wait() log.Error( - "restore table failed", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), + "restore files failed", zap.Error(err), ) return err } } - log.Info( - "finish to restore table", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), - ) - return nil -} - -// RestoreDatabase tries to restore the data of a database -func (rc *Client) RestoreDatabase( - db *utils.Database, - rewriteRules *RewriteRules, - updateCh chan<- struct{}, -) (err error) { - start := time.Now() - defer func() { - elapsed := time.Since(start) - log.Info("Restore Database", zap.Stringer("db", db.Schema.Name), zap.Duration("take", elapsed)) - }() - errCh := make(chan error, len(db.Tables)) - wg := new(sync.WaitGroup) - defer close(errCh) - for _, table := range db.Tables { - wg.Add(1) - tblReplica := table - rc.tableWorkerPool.Apply(func() { - defer wg.Done() - select { - case <-rc.ctx.Done(): - errCh <- nil - case errCh <- rc.RestoreTable( - tblReplica, rewriteRules, updateCh): - } - }) - } - for range db.Tables { - err = <-errCh - if err != nil { - wg.Wait() - return err - } - } - return nil -} - -// RestoreAll tries to restore all the data of backup files. -func (rc *Client) RestoreAll( - rewriteRules *RewriteRules, - updateCh chan<- struct{}, -) (err error) { - start := time.Now() - defer func() { - elapsed := time.Since(start) - log.Info("Restore All", zap.Duration("take", elapsed)) - summary.CollectSuccessUnit("restore all", elapsed) - }() - errCh := make(chan error, len(rc.databases)) - wg := new(sync.WaitGroup) - defer close(errCh) - for _, db := range rc.databases { - wg.Add(1) - dbReplica := db - rc.tableWorkerPool.Apply(func() { - defer wg.Done() - select { - case <-rc.ctx.Done(): - errCh <- nil - case errCh <- rc.RestoreDatabase( - dbReplica, rewriteRules, updateCh): - } - }) - } - - for range rc.databases { - err = <-errCh - if err != nil { - wg.Wait() - return err - } - } return nil } diff --git a/pkg/restore/import.go b/pkg/restore/import.go index 77273ebab..01f8456ef 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -2,6 +2,7 @@ package restore import ( "context" + "strings" "sync" "time" @@ -16,25 +17,10 @@ import ( "google.golang.org/grpc" "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" ) -var ( - errNotLeader = errors.New("not leader") - errEpochNotMatch = errors.New("epoch not match") - errRewriteRuleNotFound = errors.New("rewrite rule not found") - errRangeIsEmpty = errors.New("range is empty") -) - -const ( - importScanResgionTime = 10 * time.Second - importFileRetryTimes = 16 - importFileWaitInterval = 10 * time.Millisecond - importFileMaxWaitInterval = 1 * time.Second - - downloadSSTRetryTimes = 8 - downloadSSTWaitInterval = 10 * time.Millisecond - downloadSSTMaxWaitInterval = 1 * time.Second -) +const importScanRegionTime = 10 * time.Second // ImporterClient is used to import a file to TiKV type ImporterClient interface { @@ -172,10 +158,9 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul log.Debug("rewrite file keys", zap.Stringer("file", file), zap.Binary("startKey", startKey), - zap.Binary("endKey", endKey), - ) - err = withRetry(func() error { - ctx, cancel := context.WithTimeout(importer.ctx, importScanResgionTime) + zap.Binary("endKey", endKey)) + err = utils.WithRetry(importer.ctx, func() error { + ctx, cancel := context.WithTimeout(importer.ctx, importScanRegionTime) defer cancel() // Scan regions covered by the file range regionInfos, err1 := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) @@ -185,63 +170,56 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul log.Debug("scan regions", zap.Stringer("file", file), zap.Int("count", len(regionInfos))) // Try to download and ingest the file in every region for _, regionInfo := range regionInfos { - var downloadMeta *import_sstpb.SSTMeta info := regionInfo // Try to download file. - err = withRetry(func() error { - var err2 error - var isEmpty bool - downloadMeta, isEmpty, err2 = importer.downloadSST(info, file, rewriteRules) - if err2 != nil { - if err != errRewriteRuleNotFound { - log.Warn("download file failed", - zap.Stringer("file", file), - zap.Stringer("region", info.Region), - zap.Binary("startKey", startKey), - zap.Binary("endKey", endKey), - zap.Error(err2), - ) - } - return err2 - } - if isEmpty { - log.Info( - "file don't have any key in this region, skip it", - zap.Stringer("file", file), - zap.Stringer("region", info.Region), - ) - return errRangeIsEmpty - } - return nil - }, func(e error) bool { - // Scan regions may return some regions which cannot match any rewrite rule, - // like [t{tableID}, t{tableID}_r), those regions should be skipped - return e != errRewriteRuleNotFound && e != errRangeIsEmpty - }, downloadSSTRetryTimes, downloadSSTWaitInterval, downloadSSTMaxWaitInterval) - if err != nil { - if err == errRewriteRuleNotFound || err == errRangeIsEmpty { + var downloadMeta *import_sstpb.SSTMeta + err1 = utils.WithRetry(importer.ctx, func() error { + var e error + downloadMeta, e = importer.downloadSST(info, file, rewriteRules) + return e + }, newDownloadSSTBackoffer()) + if err1 != nil { + if err1 == errRewriteRuleNotFound || err1 == errRangeIsEmpty { // Skip this region continue } - return err + log.Error("download file failed", + zap.Stringer("file", file), + zap.Stringer("region", info.Region), + zap.Binary("startKey", startKey), + zap.Binary("endKey", endKey), + zap.Error(err1)) + return err1 } - err = importer.ingestSST(downloadMeta, info) - if err != nil { - log.Warn("ingest file failed", + err1 = importer.ingestSST(downloadMeta, info) + // If error is `NotLeader`, update the region info and retry + for errors.Cause(err1) == errNotLeader { + log.Debug("ingest sst returns not leader error, retry it", + zap.Stringer("region", info.Region)) + var newInfo *RegionInfo + newInfo, err1 = importer.metaClient.GetRegion(importer.ctx, info.Region.GetStartKey()) + if err1 != nil { + break + } + if !checkRegionEpoch(newInfo, info) { + err1 = errEpochNotMatch + break + } + err1 = importer.ingestSST(downloadMeta, newInfo) + } + if err1 != nil { + log.Error("ingest file failed", zap.Stringer("file", file), zap.Stringer("range", downloadMeta.GetRange()), zap.Stringer("region", info.Region), - zap.Error(err), - ) - return err + zap.Error(err1)) + return err1 } summary.CollectSuccessUnit(summary.TotalKV, file.TotalKvs) summary.CollectSuccessUnit(summary.TotalBytes, file.TotalBytes) } return nil - }, func(e error) bool { - return true - }, importFileRetryTimes, importFileWaitInterval, importFileMaxWaitInterval) + }, newImportSSTBackoffer()) return err } @@ -257,33 +235,25 @@ func (importer *FileImporter) downloadSST( regionInfo *RegionInfo, file *backup.File, rewriteRules *RewriteRules, -) (*import_sstpb.SSTMeta, bool, error) { +) (*import_sstpb.SSTMeta, error) { id, err := uuid.New().MarshalBinary() if err != nil { - return nil, true, errors.Trace(err) + return nil, errors.Trace(err) } // Assume one region reflects to one rewrite rule _, key, err := codec.DecodeBytes(regionInfo.Region.GetStartKey()) if err != nil { - return nil, true, err + return nil, err } regionRule := matchNewPrefix(key, rewriteRules) if regionRule == nil { - log.Debug("cannot find rewrite rule, skip region", - zap.Stringer("region", regionInfo.Region), - zap.Array("tableRule", rules(rewriteRules.Table)), - zap.Array("dataRule", rules(rewriteRules.Data)), - zap.Binary("key", key), - ) - return nil, true, errRewriteRuleNotFound + return nil, errors.Trace(errRewriteRuleNotFound) } rule := import_sstpb.RewriteRule{ OldKeyPrefix: encodeKeyPrefix(regionRule.GetOldKeyPrefix()), NewKeyPrefix: encodeKeyPrefix(regionRule.GetNewKeyPrefix()), } sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) - sstMeta.RegionId = regionInfo.Region.GetId() - sstMeta.RegionEpoch = regionInfo.Region.GetRegionEpoch() req := &import_sstpb.DownloadRequest{ Sst: sstMeta, StorageBackend: importer.backend, @@ -298,15 +268,15 @@ func (importer *FileImporter) downloadSST( for _, peer := range regionInfo.Region.GetPeers() { resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) if err != nil { - return nil, true, err + return nil, extractDownloadSSTError(err) } if resp.GetIsEmpty() { - return &sstMeta, true, nil + return nil, errors.Trace(errRangeIsEmpty) } } sstMeta.Range.Start = truncateTS(resp.Range.GetStart()) sstMeta.Range.End = truncateTS(resp.Range.GetEnd()) - return &sstMeta, false, nil + return &sstMeta, nil } func (importer *FileImporter) ingestSST( @@ -329,17 +299,45 @@ func (importer *FileImporter) ingestSST( log.Debug("download SST", zap.Stringer("sstMeta", sstMeta)) resp, err := importer.importClient.IngestSST(importer.ctx, leader.GetStoreId(), req) if err != nil { - return err + if strings.Contains(err.Error(), "RegionNotFound") { + return errors.Trace(errRegionNotFound) + } + return errors.Trace(err) } respErr := resp.GetError() if respErr != nil { - if respErr.EpochNotMatch != nil { - return errEpochNotMatch + log.Debug("ingest sst resp error", zap.Stringer("error", respErr)) + if respErr.GetKeyNotInRegion() != nil { + return errors.Trace(errKeyNotInRegion) } - if respErr.NotLeader != nil { - return errNotLeader + if respErr.GetNotLeader() != nil { + return errors.Trace(errNotLeader) } - return errors.Errorf("ingest failed: %v", respErr) + return errors.Wrap(errResp, respErr.String()) } return nil } + +func checkRegionEpoch(new, old *RegionInfo) bool { + if new.Region.GetId() == old.Region.GetId() && + new.Region.GetRegionEpoch().GetVersion() == old.Region.GetRegionEpoch().GetVersion() && + new.Region.GetRegionEpoch().GetConfVer() == old.Region.GetRegionEpoch().GetConfVer() { + return true + } + return false +} + +func extractDownloadSSTError(e error) error { + err := errGrpc + switch { + case strings.Contains(e.Error(), "bad format"): + err = errBadFormat + case strings.Contains(e.Error(), "wrong prefix"): + err = errWrongKeyPrefix + case strings.Contains(e.Error(), "corrupted"): + err = errFileCorrupted + case strings.Contains(e.Error(), "Cannot read"): + err = errCannotRead + } + return errors.Trace(err) +} diff --git a/pkg/restore/split.go b/pkg/restore/split.go index 31b23a60f..3248fdd0d 100644 --- a/pkg/restore/split.go +++ b/pkg/restore/split.go @@ -111,7 +111,7 @@ SplitRegions: } time.Sleep(interval) if i > 3 { - log.Warn("splitting regions failed, retry it", zap.Error(err)) + log.Warn("splitting regions failed, retry it", zap.Error(err), zap.ByteStrings("keys", keys)) } continue SplitRegions } @@ -259,6 +259,7 @@ func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionI splitKeys = make([][]byte, 0, 1) } splitKeyMap[region.Region.GetId()] = append(splitKeys, key) + log.Debug("get key for split region", zap.Binary("key", key), zap.Stringer("region", region.Region)) } } return splitKeyMap diff --git a/pkg/restore/util.go b/pkg/restore/util.go index ea8629470..63ee92969 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -17,31 +17,12 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" - "go.uber.org/zap/zapcore" "github.com/pingcap/br/pkg/summary" ) var recordPrefixSep = []byte("_r") -type files []*backup.File - -func (fs files) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range fs { - arr.AppendString(fs[i].String()) - } - return nil -} - -type rules []*import_sstpb.RewriteRule - -func (rs rules) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range rs { - arr.AppendString(rs[i].String()) - } - return nil -} - // idAllocator always returns a specified ID type idAllocator struct { id int64 @@ -163,40 +144,11 @@ func getSSTMetaFromFile( Start: rangeStart, End: rangeEnd, }, + RegionId: region.GetId(), + RegionEpoch: region.GetRegionEpoch(), } } -type retryableFunc func() error -type continueFunc func(error) bool - -func withRetry( - retryableFunc retryableFunc, - continueFunc continueFunc, - attempts uint, - delayTime time.Duration, - maxDelayTime time.Duration, -) error { - var lastErr error - for i := uint(0); i < attempts; i++ { - err := retryableFunc() - if err != nil { - lastErr = err - // If this is the last attempt, do not wait - if !continueFunc(err) || i == attempts-1 { - break - } - delayTime = 2 * delayTime - if delayTime > maxDelayTime { - delayTime = maxDelayTime - } - time.Sleep(delayTime) - } else { - return nil - } - } - return lastErr -} - // ValidateFileRanges checks and returns the ranges of the files. func ValidateFileRanges( files []*backup.File, diff --git a/pkg/task/restore.go b/pkg/task/restore.go index f2f3caf43..a56a1d6da 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -139,7 +139,7 @@ func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { if err != nil { return err } - err = client.RestoreAll(rewriteRules, updateCh) + err = client.RestoreFiles(files, rewriteRules, updateCh) // always run the post-work even on error, so we don't stuck in the import mode or emptied schedulers postErr := restorePostWork(ctx, client, mgr, removedSchedulers) diff --git a/pkg/utils/retry.go b/pkg/utils/retry.go new file mode 100644 index 000000000..a8f446764 --- /dev/null +++ b/pkg/utils/retry.go @@ -0,0 +1,40 @@ +package utils + +import ( + "context" + "time" +) + +// RetryableFunc presents a retryable opreation +type RetryableFunc func() error + +// Backoffer implements a backoff policy for retrying operations +type Backoffer interface { + // NextBackoff returns a duration to wait before retrying again + NextBackoff(err error) time.Duration + // Attempt returns the remain attempt times + Attempt() int +} + +// WithRetry retrys a given operation with a backoff policy +func WithRetry( + ctx context.Context, + retryableFunc RetryableFunc, + backoffer Backoffer, +) error { + var lastErr error + for backoffer.Attempt() > 0 { + err := retryableFunc() + if err != nil { + lastErr = err + select { + case <-ctx.Done(): + return lastErr + case <-time.After(backoffer.NextBackoff(err)): + } + } else { + return nil + } + } + return lastErr +} diff --git a/tests/br_full_ddl/run.sh b/tests/br_full_ddl/run.sh index 3db1ecd60..1e40415d7 100755 --- a/tests/br_full_ddl/run.sh +++ b/tests/br_full_ddl/run.sh @@ -28,7 +28,7 @@ for i in $(seq $DDL_COUNT); do run_sql "USE $DB; ALTER TABLE $TABLE ADD INDEX (FIELD$i);" done -for i in $(sql $DDL_COUNT); do +for i in $(seq $DDL_COUNT); do if (( RANDOM % 2 )); then run_sql "USE $DB; ALTER TABLE $TABLE DROP INDEX FIELD$i;" fi From 1e1fc97bc5771f93bb7ded23bec73830802f4eef Mon Sep 17 00:00:00 2001 From: 5kbpers <20279863+5kbpers@users.noreply.github.com> Date: Thu, 20 Feb 2020 20:04:45 +0800 Subject: [PATCH 07/46] Incremental BR: support DDL (#155) * support backup&restore ddl Signed-off-by: 5kbpers * integration tests Signed-off-by: 5kbpers * update kvproto Signed-off-by: 5kbpers * fix integration tests Signed-off-by: 5kbpers * reduce cyclomatic complexity of `runRestore` Signed-off-by: 5kbpers * fix test Signed-off-by: 5kbpers * add unit test Signed-off-by: 5kbpers * fix tests Signed-off-by: 5kbpers * disable fast checksum in incremental br Signed-off-by: 5kbpers * fix no valid key error Signed-off-by: 5kbpers * address lint Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers --- cmd/validate.go | 10 +-- go.mod | 2 +- go.sum | 11 ++++ pkg/backup/client.go | 55 +++++++++++++++- pkg/checksum/executor.go | 8 +-- pkg/checksum/executor_test.go | 2 +- pkg/restore/client.go | 41 +++++++++++- pkg/restore/client_test.go | 2 +- pkg/restore/db.go | 103 +++++++++++++++++++++++++++--- pkg/restore/db_test.go | 50 +++++++++++++-- pkg/restore/split.go | 37 ++++++++++- pkg/restore/split_test.go | 25 +++++++- pkg/task/backup.go | 37 +++++++++-- pkg/task/restore.go | 12 +++- pkg/utils/schema.go | 10 +-- tests/br_full_ddl/run.sh | 4 +- tests/br_incremental/run.sh | 57 ++++++----------- tests/br_incremental_ddl/run.sh | 74 +++++++++++++++++++++ tests/br_incremental_index/run.sh | 74 +++++++++++++++++++++ tests/config/tikv.toml | 1 + tests/run.sh | 2 + 21 files changed, 527 insertions(+), 90 deletions(-) create mode 100755 tests/br_incremental_ddl/run.sh create mode 100755 tests/br_incremental_index/run.sh diff --git a/cmd/validate.go b/cmd/validate.go index 559cb9983..8bca7e553 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -196,19 +196,19 @@ func newBackupMetaCommand() *cobra.Command { newTable := new(model.TableInfo) tableID, _ := tableIDAllocator.Alloc() newTable.ID = int64(tableID) - newTable.Name = table.Schema.Name - newTable.Indices = make([]*model.IndexInfo, len(table.Schema.Indices)) - for i, indexInfo := range table.Schema.Indices { + newTable.Name = table.Info.Name + newTable.Indices = make([]*model.IndexInfo, len(table.Info.Indices)) + for i, indexInfo := range table.Info.Indices { indexID, _ := indexIDAllocator.Alloc() newTable.Indices[i] = &model.IndexInfo{ ID: int64(indexID), Name: indexInfo.Name, } } - rules := restore.GetRewriteRules(newTable, table.Schema, 0) + rules := restore.GetRewriteRules(newTable, table.Info, 0) rewriteRules.Table = append(rewriteRules.Table, rules.Table...) rewriteRules.Data = append(rewriteRules.Data, rules.Data...) - tableIDMap[table.Schema.ID] = int64(tableID) + tableIDMap[table.Info.ID] = int64(tableID) } // Validate rewrite rules for _, file := range files { diff --git a/go.mod b/go.mod index 180c58f93..7d9e6b77e 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/onsi/gomega v1.7.1 // indirect github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 - github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5 + github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162 github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d diff --git a/go.sum b/go.sum index 0fe4a3024..24f73f5c9 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,7 @@ github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdc github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.26.1 h1:JGQggXhOiNJIqsmbYUl3cYtJZUffeOWlHtxfzGK7WPI= github.com/aws/aws-sdk-go v1.26.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -182,6 +183,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -300,6 +303,8 @@ github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO github.com/pingcap/kvproto v0.0.0-20191213111810-93cb7c623c8b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5 h1:RUxQExD5yubAjWGnw8kcxfO9abbiVHIE1rbuCyQCWDE= github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= +github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162 h1:lsoIoCoXMpcHvW6jHcqP/prA4I6duAp1DVyG2ULz4bM= +github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d h1:4QwSJRxmBjTB9ssJNWg2f2bDm5rqnHCUUjMh4N1QOOY= @@ -342,6 +347,7 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -496,6 +502,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191011234655-491137f69257 h1:ry8e2D+cwaV6hk7lb3aRTjjZo24shrbK0e11QEOkTIg= golang.org/x/net v0.0.0-20191011234655-491137f69257/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -563,6 +570,7 @@ golang.org/x/tools v0.0.0-20191107010934-f79515f33823 h1:akkRBeitX2EZP59KdtKw310 golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 h1:BKiPVwWbEdmAh+5CBwk13CYeVJQRDJpDnKgDyMOGz9M= golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -596,6 +604,7 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -605,6 +614,7 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -629,6 +639,7 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 49e48638d..6d6eff033 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/oracle" @@ -119,7 +120,12 @@ func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend } // SaveBackupMeta saves the current backup meta at the given path. -func (bc *Client) SaveBackupMeta(ctx context.Context) error { +func (bc *Client) SaveBackupMeta(ctx context.Context, ddlJobs []*model.Job) error { + ddlJobsData, err := json.Marshal(ddlJobs) + if err != nil { + return errors.Trace(err) + } + bc.backupMeta.Ddls = ddlJobsData backupMetaData, err := proto.Marshal(&bc.backupMeta) if err != nil { return errors.Trace(err) @@ -127,7 +133,7 @@ func (bc *Client) SaveBackupMeta(ctx context.Context) error { log.Debug("backup meta", zap.Reflect("meta", bc.backupMeta)) backendURL := storage.FormatBackendURL(bc.backend) - log.Info("save backup meta", zap.Stringer("path", &backendURL)) + log.Info("save backup meta", zap.Stringer("path", &backendURL), zap.Int("jobs", len(ddlJobs))) return bc.storage.Write(ctx, utils.MetaFile, backupMetaData) } @@ -241,6 +247,51 @@ func BuildBackupRangeAndSchema( return ranges, backupSchemas, nil } +// GetBackupDDLJobs returns the ddl jobs are done in (lastBackupTS, backupTS] +func GetBackupDDLJobs(dom *domain.Domain, lastBackupTS, backupTS uint64) ([]*model.Job, error) { + snapMeta, err := dom.GetSnapshotMeta(backupTS) + if err != nil { + return nil, errors.Trace(err) + } + lastSnapMeta, err := dom.GetSnapshotMeta(lastBackupTS) + if err != nil { + return nil, errors.Trace(err) + } + lastSchemaVersion, err := lastSnapMeta.GetSchemaVersion() + if err != nil { + return nil, errors.Trace(err) + } + allJobs := make([]*model.Job, 0) + defaultJobs, err := snapMeta.GetAllDDLJobsInQueue(meta.DefaultJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get default jobs", zap.Int("jobs", len(defaultJobs))) + allJobs = append(allJobs, defaultJobs...) + addIndexJobs, err := snapMeta.GetAllDDLJobsInQueue(meta.AddIndexJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get add index jobs", zap.Int("jobs", len(addIndexJobs))) + allJobs = append(allJobs, addIndexJobs...) + historyJobs, err := snapMeta.GetAllHistoryDDLJobs() + if err != nil { + return nil, errors.Trace(err) + } + log.Debug("get history jobs", zap.Int("jobs", len(historyJobs))) + allJobs = append(allJobs, historyJobs...) + + completedJobs := make([]*model.Job, 0) + for _, job := range allJobs { + if (job.State == model.JobStateDone || job.State == model.JobStateSynced) && + (job.BinlogInfo != nil && job.BinlogInfo.SchemaVersion > lastSchemaVersion) { + completedJobs = append(completedJobs, job) + } + } + log.Debug("get completed jobs", zap.Int("jobs", len(completedJobs))) + return completedJobs, nil +} + // BackupRanges make a backup of the given key ranges. func (bc *Client) BackupRanges( ctx context.Context, diff --git a/pkg/checksum/executor.go b/pkg/checksum/executor.go index 2ca5cf66d..30e8f11c8 100644 --- a/pkg/checksum/executor.go +++ b/pkg/checksum/executor.go @@ -61,7 +61,7 @@ func buildChecksumRequest( reqs := make([]*kv.Request, 0, (len(newTable.Indices)+1)*(len(partDefs)+1)) var oldTableID int64 if oldTable != nil { - oldTableID = oldTable.Schema.ID + oldTableID = oldTable.Info.ID } rs, err := buildRequest(newTable, newTable.ID, oldTable, oldTableID, startTS) if err != nil { @@ -72,7 +72,7 @@ func buildChecksumRequest( for _, partDef := range partDefs { var oldPartID int64 if oldTable != nil { - for _, oldPartDef := range oldTable.Schema.Partition.Definitions { + for _, oldPartDef := range oldTable.Info.Partition.Definitions { if oldPartDef.Name == partDef.Name { oldPartID = oldPartDef.ID } @@ -108,7 +108,7 @@ func buildRequest( } var oldIndexInfo *model.IndexInfo if oldTable != nil { - for _, oldIndex := range oldTable.Schema.Indices { + for _, oldIndex := range oldTable.Info.Indices { if oldIndex.Name == indexInfo.Name { oldIndexInfo = oldIndex break @@ -117,7 +117,7 @@ func buildRequest( if oldIndexInfo == nil { log.Panic("index not found", zap.Reflect("table", tableInfo), - zap.Reflect("oldTable", oldTable.Schema), + zap.Reflect("oldTable", oldTable.Info), zap.Stringer("index", indexInfo.Name)) } } diff --git a/pkg/checksum/executor_test.go b/pkg/checksum/executor_test.go index ca68628e2..3e6d8078c 100644 --- a/pkg/checksum/executor_test.go +++ b/pkg/checksum/executor_test.go @@ -83,7 +83,7 @@ func (s *testChecksumSuite) TestChecksum(c *C) { // Test rewrite rules tk.MustExec("alter table t1 add index i2(a);") tableInfo1 = s.getTableInfo(c, "test", "t1") - oldTable := utils.Table{Schema: tableInfo1} + oldTable := utils.Table{Info: tableInfo1} exe2, err = NewExecutorBuilder(tableInfo2, math.MaxUint64). SetOldTable(&oldTable).Build() c.Assert(err, IsNil) diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 5402d78bc..f45b3d510 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -2,7 +2,9 @@ package restore import ( "context" + "encoding/json" "math" + "sort" "sync" "time" @@ -40,6 +42,7 @@ type Client struct { tableWorkerPool *utils.WorkerPool databases map[string]*utils.Database + ddlJobs []*model.Job backupMeta *backup.BackupMeta db *DB rateLimit uint64 @@ -97,8 +100,15 @@ func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup. if err != nil { return errors.Trace(err) } + var ddlJobs []*model.Job + err = json.Unmarshal(backupMeta.GetDdls(), &ddlJobs) + if err != nil { + return errors.Trace(err) + } rc.databases = databases + rc.ddlJobs = ddlJobs rc.backupMeta = backupMeta + log.Info("load backupmeta", zap.Int("databases", len(rc.databases)), zap.Int("jobs", len(rc.ddlJobs))) metaClient := NewSplitClient(rc.pdClient) importClient := NewImportClient(metaClient) @@ -151,6 +161,11 @@ func (rc *Client) GetDatabase(name string) *utils.Database { return rc.databases[name] } +// GetDDLJobs returns ddl jobs +func (rc *Client) GetDDLJobs() []*model.Job { + return rc.ddlJobs +} + // GetTableSchema returns the schema of a table from TiDB. func (rc *Client) GetTableSchema( dom *domain.Domain, @@ -189,11 +204,11 @@ func (rc *Client) CreateTables( if err != nil { return nil, nil, err } - newTableInfo, err := rc.GetTableSchema(dom, table.Db.Name, table.Schema.Name) + newTableInfo, err := rc.GetTableSchema(dom, table.Db.Name, table.Info.Name) if err != nil { return nil, nil, err } - rules := GetRewriteRules(newTableInfo, table.Schema, newTS) + rules := GetRewriteRules(newTableInfo, table.Info, newTS) rewriteRules.Table = append(rewriteRules.Table, rules.Table...) rewriteRules.Data = append(rewriteRules.Data, rules.Data...) newTables = append(newTables, newTableInfo) @@ -201,6 +216,26 @@ func (rc *Client) CreateTables( return rewriteRules, newTables, nil } +// ExecDDLs executes the queries of the ddl jobs. +func (rc *Client) ExecDDLs(ddlJobs []*model.Job) error { + // Sort the ddl jobs by schema version in ascending order. + sort.Slice(ddlJobs, func(i, j int) bool { + return ddlJobs[i].BinlogInfo.SchemaVersion < ddlJobs[j].BinlogInfo.SchemaVersion + }) + + for _, job := range ddlJobs { + err := rc.db.ExecDDL(rc.ctx, job) + if err != nil { + return errors.Trace(err) + } + log.Info("execute ddl query", + zap.String("db", job.SchemaName), + zap.String("query", job.Query), + zap.Int64("historySchemaVersion", job.BinlogInfo.SchemaVersion)) + } + return nil +} + func (rc *Client) setSpeedLimit() error { if !rc.hasSpeedLimited && rc.rateLimit != 0 { stores, err := rc.pdClient.GetAllStores(rc.ctx, pd.WithExcludeTombstone()) @@ -380,7 +415,7 @@ func (rc *Client) ValidateChecksum( checksumResp.TotalBytes != table.TotalBytes { log.Error("failed in validate checksum", zap.String("database", table.Db.Name.L), - zap.String("table", table.Schema.Name.L), + zap.String("table", table.Info.Name.L), zap.Uint64("origin tidb crc64", table.Crc64Xor), zap.Uint64("calculated crc64", checksumResp.Checksum), zap.Uint64("origin tidb total kvs", table.TotalKvs), diff --git a/pkg/restore/client_test.go b/pkg/restore/client_test.go index 5007f1281..3d608b3b9 100644 --- a/pkg/restore/client_test.go +++ b/pkg/restore/client_test.go @@ -52,7 +52,7 @@ func (s *testRestoreClientSuite) TestCreateTables(c *C) { for i := len(tables) - 1; i >= 0; i-- { tables[i] = &utils.Table{ Db: dbSchema, - Schema: &model.TableInfo{ + Info: &model.TableInfo{ ID: int64(i), Name: model.NewCIStr("test" + strconv.Itoa(i)), Columns: []*model.ColumnInfo{{ diff --git a/pkg/restore/db.go b/pkg/restore/db.go index b114b7629..8c09af16f 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "sort" "strings" "github.com/pingcap/errors" @@ -38,6 +39,31 @@ func NewDB(store kv.Storage) (*DB, error) { }, nil } +// ExecDDL executes the query of a ddl job. +func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { + var err error + if ddlJob.BinlogInfo.TableInfo != nil { + switchDbSQL := fmt.Sprintf("use %s;", ddlJob.SchemaName) + _, err = db.se.Execute(ctx, switchDbSQL) + if err != nil { + log.Error("switch db failed", + zap.String("query", switchDbSQL), + zap.String("db", ddlJob.SchemaName), + zap.Error(err)) + return errors.Trace(err) + } + } + _, err = db.se.Execute(ctx, ddlJob.Query) + if err != nil { + log.Error("execute ddl query failed", + zap.String("query", ddlJob.Query), + zap.String("db", ddlJob.SchemaName), + zap.Int64("historySchemaVersion", ddlJob.BinlogInfo.SchemaVersion), + zap.Error(err)) + } + return errors.Trace(err) +} + // CreateDatabase executes a CREATE DATABASE SQL. func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { var buf bytes.Buffer @@ -49,16 +75,15 @@ func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { createSQL := buf.String() _, err = db.se.Execute(ctx, createSQL) if err != nil { - log.Error("create database failed", zap.String("SQL", createSQL), zap.Error(err)) - return errors.Trace(err) + log.Error("create database failed", zap.String("query", createSQL), zap.Error(err)) } - return nil + return errors.Trace(err) } // CreateTable executes a CREATE TABLE SQL. func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { var buf bytes.Buffer - schema := table.Schema + schema := table.Info err := executor.ConstructResultOfShowCreateTable(db.se, schema, newIDAllocator(schema.AutoIncID), &buf) if err != nil { log.Error( @@ -88,7 +113,7 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { log.Error("create table failed", zap.String("SQL", createSQL), zap.Stringer("db", table.Db.Name), - zap.Stringer("table", table.Schema.Name), + zap.Stringer("table", table.Info.Name), zap.Error(err)) return errors.Trace(err) } @@ -99,16 +124,76 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { _, err = db.se.Execute(ctx, alterAutoIncIDSQL) if err != nil { log.Error("alter AutoIncID failed", - zap.String("SQL", alterAutoIncIDSQL), + zap.String("query", alterAutoIncIDSQL), zap.Stringer("db", table.Db.Name), - zap.Stringer("table", table.Schema.Name), + zap.Stringer("table", table.Info.Name), zap.Error(err)) - return errors.Trace(err) } - return nil + return errors.Trace(err) } // Close closes the connection func (db *DB) Close() { db.se.Close() } + +// FilterDDLJobs filters ddl jobs +func FilterDDLJobs(allDDLJobs []*model.Job, tables []*utils.Table) (ddlJobs []*model.Job) { + // Sort the ddl jobs by schema version in descending order. + sort.Slice(allDDLJobs, func(i, j int) bool { + return allDDLJobs[i].BinlogInfo.SchemaVersion > allDDLJobs[j].BinlogInfo.SchemaVersion + }) + dbs := getDatabases(tables) + for _, db := range dbs { + // These maps is for solving some corner case. + // e.g. let "t=2" indicates that the id of database "t" is 2, if the ddl execution sequence is: + // rename "a" to "b"(a=1) -> drop "b"(b=1) -> create "b"(b=2) -> rename "b" to "a"(a=2) + // Which we cannot find the "create" DDL by name and id directly. + // To cover †his case, we must find all names and ids the database/table ever had. + dbIDs := make(map[int64]bool) + dbIDs[db.ID] = true + dbNames := make(map[string]bool) + dbNames[db.Name.String()] = true + for _, job := range allDDLJobs { + if job.BinlogInfo.DBInfo != nil { + if dbIDs[job.SchemaID] || dbNames[job.BinlogInfo.DBInfo.Name.String()] { + ddlJobs = append(ddlJobs, job) + // The the jobs executed with the old id, like the step 2 in the example above. + dbIDs[job.SchemaID] = true + // For the jobs executed after rename, like the step 3 in the example above. + dbNames[job.BinlogInfo.DBInfo.Name.String()] = true + } + } + } + } + + for _, table := range tables { + tableIDs := make(map[int64]bool) + tableIDs[table.Info.ID] = true + tableNames := make(map[string]bool) + tableNames[table.Info.Name.String()] = true + for _, job := range allDDLJobs { + if job.BinlogInfo.TableInfo != nil { + if tableIDs[job.TableID] || tableNames[job.BinlogInfo.TableInfo.Name.String()] { + ddlJobs = append(ddlJobs, job) + tableIDs[job.TableID] = true + // For truncate table, the id may be changed + tableIDs[job.BinlogInfo.TableInfo.ID] = true + tableNames[job.BinlogInfo.TableInfo.Name.String()] = true + } + } + } + } + return ddlJobs +} + +func getDatabases(tables []*utils.Table) (dbs []*model.DBInfo) { + dbIDs := make(map[int64]bool) + for _, table := range tables { + if !dbIDs[table.Db.ID] { + dbs = append(dbs, table.Db) + dbIDs[table.Db.ID] = true + } + } + return +} diff --git a/pkg/restore/db_test.go b/pkg/restore/db_test.go index 98341f510..0151b4da6 100644 --- a/pkg/restore/db_test.go +++ b/pkg/restore/db_test.go @@ -12,6 +12,7 @@ import ( "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/backup" "github.com/pingcap/br/pkg/utils" ) @@ -25,19 +26,18 @@ func (s *testRestoreSchemaSuite) SetUpSuite(c *C) { var err error s.mock, err = utils.NewMockCluster() c.Assert(err, IsNil) + c.Assert(s.mock.Start(), IsNil) } func TestT(t *testing.T) { TestingT(t) } func (s *testRestoreSchemaSuite) TearDownSuite(c *C) { + s.mock.Stop() testleak.AfterTest(c)() } func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { - c.Assert(s.mock.Start(), IsNil) - defer s.mock.Stop() - tk := testkit.NewTestKit(c, s.mock.Storage) tk.MustExec("use test") tk.MustExec("set @@sql_mode=''") @@ -60,16 +60,16 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { tableInfo, err := info.TableByName(model.NewCIStr("test"), model.NewCIStr("\"t\"")) c.Assert(err, IsNil, Commentf("Error get table info: %s", err)) table := utils.Table{ - Schema: tableInfo.Meta(), - Db: dbInfo, + Info: tableInfo.Meta(), + Db: dbInfo, } // Get the next AutoIncID idAlloc := autoid.NewAllocator(s.mock.Storage, dbInfo.ID, false, autoid.RowIDAllocType) - globalAutoID, err := idAlloc.NextGlobalAutoID(table.Schema.ID) + globalAutoID, err := idAlloc.NextGlobalAutoID(table.Info.ID) c.Assert(err, IsNil, Commentf("Error allocate next auto id")) c.Assert(autoIncID, Equals, uint64(globalAutoID)) // Alter AutoIncID to the next AutoIncID + 100 - table.Schema.AutoIncID = globalAutoID + 100 + table.Info.AutoIncID = globalAutoID + 100 db, err := NewDB(s.mock.Storage) c.Assert(err, IsNil, Commentf("Error create DB")) tk.MustExec("drop database if exists test;") @@ -92,3 +92,39 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { c.Assert(err, IsNil, Commentf("Error query auto inc id: %s", err)) c.Assert(autoIncID, Equals, uint64(globalAutoID+100)) } + +func (s *testRestoreSchemaSuite) TestFilterDDLJobs(c *C) { + tk := testkit.NewTestKit(c, s.mock.Storage) + tk.MustExec("CREATE DATABASE IF NOT EXISTS test_db;") + tk.MustExec("CREATE TABLE IF NOT EXISTS test_db.test_table (c1 INT);") + lastTs, err := s.mock.GetOracle().GetTimestamp(context.Background()) + c.Assert(err, IsNil, Commentf("Error get last ts: %s", err)) + tk.MustExec("RENAME TABLE test_db.test_table to test_db.test_table1;") + tk.MustExec("DROP TABLE test_db.test_table1;") + tk.MustExec("DROP DATABASE test_db;") + tk.MustExec("CREATE DATABASE test_db;") + tk.MustExec("USE test_db;") + tk.MustExec("CREATE TABLE test_table1 (c2 CHAR(255));") + tk.MustExec("RENAME TABLE test_table1 to test_table;") + tk.MustExec("TRUNCATE TABLE test_table;") + + ts, err := s.mock.GetOracle().GetTimestamp(context.Background()) + c.Assert(err, IsNil, Commentf("Error get ts: %s", err)) + allDDLJobs, err := backup.GetBackupDDLJobs(s.mock.Domain, lastTs, ts) + c.Assert(err, IsNil, Commentf("Error get ddl jobs: %s", err)) + infoSchema, err := s.mock.Domain.GetSnapshotInfoSchema(ts) + c.Assert(err, IsNil, Commentf("Error get snapshot info schema: %s", err)) + dbInfo, ok := infoSchema.SchemaByName(model.NewCIStr("test_db")) + c.Assert(ok, IsTrue, Commentf("DB info not exist")) + tableInfo, err := infoSchema.TableByName(model.NewCIStr("test_db"), model.NewCIStr("test_table")) + c.Assert(err, IsNil, Commentf("Error get table info: %s", err)) + tables := []*utils.Table{{ + Db: dbInfo, + Info: tableInfo.Meta(), + }} + ddlJobs := FilterDDLJobs(allDDLJobs, tables) + for _, job := range ddlJobs { + c.Logf("get ddl job: %s", job.Query) + } + c.Assert(len(ddlJobs), Equals, 7) +} diff --git a/pkg/restore/split.go b/pkg/restore/split.go index 3248fdd0d..378e256c6 100644 --- a/pkg/restore/split.go +++ b/pkg/restore/split.go @@ -3,12 +3,14 @@ package restore import ( "bytes" "context" + "strings" "time" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/log" + "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" ) @@ -103,8 +105,18 @@ SplitRegions: } for regionID, keys := range splitKeyMap { var newRegions []*RegionInfo - newRegions, err = rs.splitAndScatterRegions(ctx, regionMap[regionID], keys) + region := regionMap[regionID] + newRegions, err = rs.splitAndScatterRegions(ctx, region, keys) if err != nil { + if strings.Contains(err.Error(), "no valid key") { + for _, key := range keys { + log.Error("no valid key", + zap.Binary("startKey", region.Region.StartKey), + zap.Binary("endKey", region.Region.EndKey), + zap.Binary("key", codec.EncodeBytes([]byte{}, key))) + } + return errors.Trace(err) + } interval = 2 * interval if interval > SplitMaxRetryInterval { interval = SplitMaxRetryInterval @@ -115,6 +127,7 @@ SplitRegions: } continue SplitRegions } + log.Debug("split regions", zap.Stringer("region", region.Region), zap.ByteStrings("keys", keys)) scatterRegions = append(scatterRegions, newRegions...) onSplit(keys) } @@ -250,7 +263,7 @@ func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionI checkKeys = append(checkKeys, rule.GetNewKeyPrefix()) } for _, rg := range ranges { - checkKeys = append(checkKeys, rg.EndKey) + checkKeys = append(checkKeys, truncateRowKey(rg.EndKey)) } for _, key := range checkKeys { if region := needSplit(key, regions); region != nil { @@ -259,7 +272,10 @@ func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionI splitKeys = make([][]byte, 0, 1) } splitKeyMap[region.Region.GetId()] = append(splitKeys, key) - log.Debug("get key for split region", zap.Binary("key", key), zap.Stringer("region", region.Region)) + log.Debug("get key for split region", + zap.Binary("key", key), + zap.Binary("startKey", region.Region.StartKey), + zap.Binary("endKey", region.Region.EndKey)) } } return splitKeyMap @@ -285,6 +301,21 @@ func needSplit(splitKey []byte, regions []*RegionInfo) *RegionInfo { return nil } +var ( + tablePrefix = []byte{'t'} + idLen = 8 + recordPrefix = []byte("_r") +) + +func truncateRowKey(key []byte) []byte { + if bytes.HasPrefix(key, tablePrefix) && + len(key) > tablecodec.RecordRowKeyLen && + bytes.HasPrefix(key[len(tablePrefix)+idLen:], recordPrefix) { + return key[:tablecodec.RecordRowKeyLen] + } + return key +} + func beforeEnd(key []byte, end []byte) bool { return bytes.Compare(key, end) < 0 || len(end) == 0 } diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go index 509c4cfa0..3ace5b8c8 100644 --- a/pkg/restore/split_test.go +++ b/pkg/restore/split_test.go @@ -280,7 +280,7 @@ func validateRegions(regions map[uint64]*RegionInfo) bool { return false } FindRegion: - for i := 1; i < 12; i++ { + for i := 1; i < len(keys); i++ { for _, region := range regions { startKey := []byte(keys[i-1]) if len(startKey) != 0 { @@ -299,3 +299,26 @@ FindRegion: } return true } + +func (s *testRestoreUtilSuite) TestNeedSplit(c *C) { + regions := []*RegionInfo{ + { + Region: &metapb.Region{ + StartKey: codec.EncodeBytes([]byte{}, []byte("b")), + EndKey: codec.EncodeBytes([]byte{}, []byte("d")), + }, + }, + } + // Out of region + c.Assert(needSplit([]byte("a"), regions), IsNil) + // Region start key + c.Assert(needSplit([]byte("b"), regions), IsNil) + // In region + region := needSplit([]byte("c"), regions) + c.Assert(bytes.Compare(region.Region.GetStartKey(), codec.EncodeBytes([]byte{}, []byte("b"))), Equals, 0) + c.Assert(bytes.Compare(region.Region.GetEndKey(), codec.EncodeBytes([]byte{}, []byte("d"))), Equals, 0) + // Region end key + c.Assert(needSplit([]byte("d"), regions), IsNil) + // Out of region + c.Assert(needSplit([]byte("e"), regions), IsNil) +} diff --git a/pkg/task/backup.go b/pkg/task/backup.go index b9613cd56..240754517 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -6,8 +6,10 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" + "github.com/pingcap/parser/model" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/spf13/pflag" + "go.uber.org/zap" "github.com/pingcap/br/pkg/backup" "github.com/pingcap/br/pkg/storage" @@ -98,6 +100,19 @@ func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { return err } + ddlJobs := make([]*model.Job, 0) + if cfg.LastBackupTS > 0 { + err = backup.CheckGCSafepoint(ctx, mgr.GetPDClient(), cfg.LastBackupTS) + if err != nil { + log.Error("Check gc safepoint for last backup ts failed", zap.Error(err)) + return err + } + ddlJobs, err = backup.GetBackupDDLJobs(mgr.GetDomain(), cfg.LastBackupTS, backupTS) + if err != nil { + return err + } + } + // The number of regions need to backup approximateRegions := 0 for _, r := range ranges { @@ -139,17 +154,25 @@ func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { return err } - valid, err := client.FastChecksum() - if err != nil { - return err - } - if !valid { - log.Error("backup FastChecksum mismatch!") + if cfg.LastBackupTS == 0 { + var valid bool + valid, err = client.FastChecksum() + if err != nil { + return err + } + if !valid { + log.Error("backup FastChecksum mismatch!") + return errors.Errorf("mismatched checksum") + } + + } else { + // Since we don't support checksum for incremental data, fast checksum should be skipped. + log.Info("Skip fast checksum in incremental backup") } // Checksum has finished close(updateCh) - err = client.SaveBackupMeta(ctx) + err = client.SaveBackupMeta(ctx, ddlJobs) if err != nil { return err } diff --git a/pkg/task/restore.go b/pkg/task/restore.go index a56a1d6da..599dcb478 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -103,6 +103,14 @@ func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { return err } } + ddlJobs := restore.FilterDDLJobs(client.GetDDLJobs(), tables) + if err != nil { + return err + } + err = client.ExecDDLs(ddlJobs) + if err != nil { + return errors.Trace(err) + } rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) if err != nil { return err @@ -178,12 +186,12 @@ func filterRestoreFiles( for _, db := range client.GetDatabases() { createdDatabase := false for _, table := range db.Tables { - if !tableFilter.Match(&filter.Table{Schema: db.Schema.Name.O, Name: table.Schema.Name.O}) { + if !tableFilter.Match(&filter.Table{Schema: db.Info.Name.O, Name: table.Info.Name.O}) { continue } if !createdDatabase { - if err = client.CreateDatabase(db.Schema); err != nil { + if err = client.CreateDatabase(db.Info); err != nil { return nil, nil, err } createdDatabase = true diff --git a/pkg/utils/schema.go b/pkg/utils/schema.go index 67d28132f..0afe98e5b 100644 --- a/pkg/utils/schema.go +++ b/pkg/utils/schema.go @@ -24,7 +24,7 @@ const ( // Table wraps the schema and files of a table. type Table struct { Db *model.DBInfo - Schema *model.TableInfo + Info *model.TableInfo Crc64Xor uint64 TotalKvs uint64 TotalBytes uint64 @@ -33,14 +33,14 @@ type Table struct { // Database wraps the schema and tables of a database. type Database struct { - Schema *model.DBInfo + Info *model.DBInfo Tables []*Table } // GetTable returns a table of the database by name. func (db *Database) GetTable(name string) *Table { for _, table := range db.Tables { - if table.Schema.Name.String() == name { + if table.Info.Name.String() == name { return table } } @@ -61,7 +61,7 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { db, ok := databases[dbInfo.Name.String()] if !ok { db = &Database{ - Schema: dbInfo, + Info: dbInfo, Tables: make([]*Table, 0), } databases[dbInfo.Name.String()] = db @@ -94,7 +94,7 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { } table := &Table{ Db: dbInfo, - Schema: tableInfo, + Info: tableInfo, Crc64Xor: schema.Crc64Xor, TotalKvs: schema.TotalKvs, TotalBytes: schema.TotalBytes, diff --git a/tests/br_full_ddl/run.sh b/tests/br_full_ddl/run.sh index 1e40415d7..e50ef1ecf 100755 --- a/tests/br_full_ddl/run.sh +++ b/tests/br_full_ddl/run.sh @@ -36,7 +36,7 @@ done # backup full echo "backup start..." -br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG checksum_count=$(cat $LOG | grep "fast checksum success" | wc -l | xargs) @@ -50,7 +50,7 @@ run_sql "DROP DATABASE $DB;" # restore full echo "restore start..." -br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR +run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') diff --git a/tests/br_incremental/run.sh b/tests/br_incremental/run.sh index bb6a42efb..b6a6061de 100755 --- a/tests/br_incremental/run.sh +++ b/tests/br_incremental/run.sh @@ -20,55 +20,38 @@ TABLE="usertable" run_sql "CREATE DATABASE $DB;" go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB - -row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +row_count_ori_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') # full backup echo "full backup start..." -run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 - -run_sql "DROP TABLE $DB.$TABLE;" - -# full restore -echo "full restore start..." -run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB" --pd $PD_ADDR - -row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') - -if [ "$row_count_ori" -ne "$row_count_new" ];then - echo "TEST: [$TEST_NAME] full br failed!" - exit 1 -fi +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 go-ycsb run mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB -row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') -last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB" | tail -n1) - -# clean up data -rm -rf $TEST_DIR/$DB - # incremental backup echo "incremental backup start..." -run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts - -start_ts=$(br validate decode --field="start-version" -s "local://$TEST_DIR/$DB" | tail -n1) -end_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB" | tail -n1) +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts +row_count_ori_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') -echo "start version: $start_ts, end version: $end_ts" +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${row_count_ori_full}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi # incremental restore echo "incremental restore start..." -run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR - -row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') - -echo "[original] row count: $row_count_ori, [after br] row count: $row_count_new" - -if [ "$row_count_ori" -eq "$row_count_new" ];then - echo "TEST: [$TEST_NAME] successed!" -else - echo "TEST: [$TEST_NAME] failed!" +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${row_count_ori_inc}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" exit 1 fi diff --git a/tests/br_incremental_ddl/run.sh b/tests/br_incremental_ddl/run.sh new file mode 100755 index 000000000..d9a88709b --- /dev/null +++ b/tests/br_incremental_ddl/run.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" +done + +# full backup +echo "full backup start..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 +# run ddls +echo "run ddls..." +run_sql "RENAME TABLE ${DB}.${TABLE} to ${DB}.${TABLE}1;" +run_sql "DROP TABLE ${DB}.${TABLE}1;" +run_sql "DROP DATABASE ${DB};" +run_sql "CREATE DATABASE ${DB};" +run_sql "CREATE TABLE ${DB}.${TABLE}1 (c2 CHAR(255));" +run_sql "RENAME TABLE ${DB}.${TABLE}1 to ${DB}.${TABLE};" +run_sql "TRUNCATE TABLE ${DB}.${TABLE};" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('$i');" +done +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi +run_sql "INSERT INTO ${DB}.${TABLE}(c2) VALUES ('1');" + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_incremental_index/run.sh b/tests/br_incremental_index/run.sh new file mode 100755 index 000000000..f4b4b9de7 --- /dev/null +++ b/tests/br_incremental_index/run.sh @@ -0,0 +1,74 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE} VALUES ($i);" +done + +# full backup +echo "backup full start..." +run_sql "CREATE INDEX idx_c1 ON ${DB}.${TABLE}(c1)" & +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/full" --ratelimit 5 --concurrency 4 +wait +# run ddls +echo "run ddls..." +run_sql "ALTER TABLE ${DB}.${TABLE} ADD COLUMN c2 INT NOT NULL;"; +run_sql "ALTER TABLE ${DB}.${TABLE} ADD COLUMN c3 INT NOT NULL;"; +run_sql "ALTER TABLE ${DB}.${TABLE} DROP COLUMN c3;"; +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi +run_sql "INSERT INTO ${DB}.${TABLE} VALUES (1, 1);" +row_count_insert=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check insert count +if [ "${row_count_insert}" != "$(expr $row_count_inc + 1)" ];then + echo "TEST: [$TEST_NAME] insert record fail on database $DB" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/config/tikv.toml b/tests/config/tikv.toml index e93a16597..73323d878 100644 --- a/tests/config/tikv.toml +++ b/tests/config/tikv.toml @@ -11,3 +11,4 @@ max-open-files = 4096 [raftstore] # true (default value) for high reliability, this can prevent data loss when power failure. sync-log = false +capacity = "10GB" \ No newline at end of file diff --git a/tests/run.sh b/tests/run.sh index 3cedc7093..a4edb762a 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -24,6 +24,7 @@ TIDB_ADDR="127.0.0.1:4000" TIDB_STATUS_ADDR="127.0.0.1:10080" # actaul tikv_addr are TIKV_ADDR${i} TIKV_ADDR="127.0.0.1:2016" +TIKV_STATUS_ADDR="127.0.0.1:2018" TIKV_COUNT=4 stop_services() { @@ -55,6 +56,7 @@ start_services() { bin/tikv-server \ --pd "$PD_ADDR" \ -A "$TIKV_ADDR$i" \ + --status-addr "$TIKV_STATUS_ADDR$i" \ --log-file "$TEST_DIR/tikv${i}.log" \ -C "tests/config/tikv.toml" \ -s "$TEST_DIR/tikv${i}" & From 4492b8fc8a60bb9b1e275bc95b906c85a477e0d4 Mon Sep 17 00:00:00 2001 From: kennytm Date: Sat, 22 Feb 2020 17:46:38 +0800 Subject: [PATCH 08/46] Reduce TiDB dependencies (#158) * utils: exclude mock_cluster outside of unit test * utils: remove unused ResultSetToStringSlice() * *: abstract away dependencies of tidb/session into a Glue interface * *: fix hound lint * util,mock: move utils.MockCluster to mock.Cluster * restore: fix test build failure Co-authored-by: 3pointer --- cmd/backup.go | 2 +- cmd/cmd.go | 2 + cmd/restore.go | 2 +- pkg/backup/safe_point_test.go | 6 +-- pkg/backup/schema_test.go | 10 ++--- pkg/checksum/executor_test.go | 7 ++-- pkg/conn/conn.go | 7 ++-- pkg/glue/glue.go | 24 +++++++++++ pkg/gluetidb/glue.go | 65 +++++++++++++++++++++++++++++ pkg/{utils => mock}/mock_cluster.go | 16 +++---- pkg/mock/mock_cluster_test.go | 27 ++++++++++++ pkg/restore/backoff_test.go | 5 ++- pkg/restore/client.go | 4 +- pkg/restore/client_test.go | 10 +++-- pkg/restore/db.go | 36 +++++++--------- pkg/restore/db_test.go | 8 ++-- pkg/restore/util.go | 9 ---- pkg/task/backup.go | 5 ++- pkg/task/common.go | 5 ++- pkg/task/restore.go | 7 ++-- pkg/utils/mock_cluster_test.go | 27 ------------ pkg/utils/schema.go | 33 --------------- 22 files changed, 186 insertions(+), 131 deletions(-) create mode 100644 pkg/glue/glue.go create mode 100644 pkg/gluetidb/glue.go rename pkg/{utils => mock}/mock_cluster.go (93%) create mode 100644 pkg/mock/mock_cluster_test.go delete mode 100644 pkg/utils/mock_cluster_test.go diff --git a/cmd/backup.go b/cmd/backup.go index 39aa4fd28..8ae45270c 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -15,7 +15,7 @@ func runBackupCommand(command *cobra.Command, cmdName string) error { if err := cfg.ParseFromFlags(command.Flags()); err != nil { return err } - return task.RunBackup(GetDefaultContext(), cmdName, &cfg) + return task.RunBackup(GetDefaultContext(), tidbGlue, cmdName, &cfg) } // NewBackupCommand return a full backup subcommand. diff --git a/cmd/cmd.go b/cmd/cmd.go index fdadaa6f8..83355e5dd 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -13,6 +13,7 @@ import ( "github.com/spf13/cobra" "go.uber.org/zap" + "github.com/pingcap/br/pkg/gluetidb" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) @@ -21,6 +22,7 @@ var ( initOnce = sync.Once{} defaultContext context.Context hasLogFile uint64 + tidbGlue = gluetidb.Glue{} ) const ( diff --git a/cmd/restore.go b/cmd/restore.go index 2dfec9846..9f7c47bdb 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -14,7 +14,7 @@ func runRestoreCommand(command *cobra.Command, cmdName string) error { if err := cfg.ParseFromFlags(command.Flags()); err != nil { return err } - return task.RunRestore(GetDefaultContext(), cmdName, &cfg) + return task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg) } // NewRestoreCommand returns a restore subcommand diff --git a/pkg/backup/safe_point_test.go b/pkg/backup/safe_point_test.go index 1bea9e210..5a4939191 100644 --- a/pkg/backup/safe_point_test.go +++ b/pkg/backup/safe_point_test.go @@ -8,18 +8,18 @@ import ( pd "github.com/pingcap/pd/client" "github.com/pingcap/tidb/util/testleak" - "github.com/pingcap/br/pkg/utils" + "github.com/pingcap/br/pkg/mock" ) var _ = Suite(&testSaftPointSuite{}) type testSaftPointSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testSaftPointSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } diff --git a/pkg/backup/schema_test.go b/pkg/backup/schema_test.go index f657310bf..a1514ba4a 100644 --- a/pkg/backup/schema_test.go +++ b/pkg/backup/schema_test.go @@ -9,18 +9,18 @@ import ( "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" - "github.com/pingcap/br/pkg/utils" + "github.com/pingcap/br/pkg/mock" ) var _ = Suite(&testBackupSchemaSuite{}) type testBackupSchemaSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testBackupSchemaSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -77,7 +77,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { <-updateCh c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 1) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(schemas[0].Crc64Xor, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalKvs, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalBytes, Not(Equals), 0, Commentf("%v", schemas[0])) @@ -97,7 +97,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { <-updateCh c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 2) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(schemas[0].Crc64Xor, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalKvs, Not(Equals), 0, Commentf("%v", schemas[0])) c.Assert(schemas[0].TotalBytes, Not(Equals), 0, Commentf("%v", schemas[0])) diff --git a/pkg/checksum/executor_test.go b/pkg/checksum/executor_test.go index 3e6d8078c..e9db6267b 100644 --- a/pkg/checksum/executor_test.go +++ b/pkg/checksum/executor_test.go @@ -12,6 +12,7 @@ import ( "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tipb/go-tipb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) @@ -22,12 +23,12 @@ func TestT(t *testing.T) { var _ = Suite(&testChecksumSuite{}) type testChecksumSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testChecksumSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -61,7 +62,7 @@ func (s *testChecksumSuite) TestChecksum(c *C) { c.Assert(len(exe1.reqs), Equals, 1) resp, err := exe1.Execute(context.TODO(), s.mock.Storage.GetClient(), func() {}) c.Assert(err, IsNil) - // MockCluster returns a dummy checksum (all fields are 1). + // Cluster returns a dummy checksum (all fields are 1). c.Assert(resp.Checksum, Equals, uint64(1), Commentf("%v", resp)) c.Assert(resp.TotalKvs, Equals, uint64(1), Commentf("%v", resp)) c.Assert(resp.TotalBytes, Equals, uint64(1), Commentf("%v", resp)) diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 3695a2a0c..6ef1e87eb 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -20,13 +20,14 @@ import ( "github.com/pingcap/log" pd "github.com/pingcap/pd/client" "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/keepalive" + + "github.com/pingcap/br/pkg/glue" ) const ( @@ -87,7 +88,7 @@ func pdRequest( } // NewMgr creates a new Mgr. -func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, error) { +func NewMgr(ctx context.Context, g glue.Glue, pdAddrs string, storage tikv.Storage) (*Mgr, error) { addrs := strings.Split(pdAddrs, ",") failure := errors.Errorf("pd address (%s) has wrong format", pdAddrs) @@ -130,7 +131,7 @@ func NewMgr(ctx context.Context, pdAddrs string, storage tikv.Storage) (*Mgr, er return nil, errors.Errorf("tikv cluster not health %+v", stores) } - dom, err := session.BootstrapSession(storage) + dom, err := g.BootstrapSession(storage) if err != nil { return nil, errors.Trace(err) } diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go new file mode 100644 index 000000000..bc6fb214e --- /dev/null +++ b/pkg/glue/glue.go @@ -0,0 +1,24 @@ +package glue + +import ( + "context" + + "github.com/pingcap/parser/model" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" +) + +// Glue is an abstraction of TiDB function calls used in BR. +type Glue interface { + BootstrapSession(store kv.Storage) (*domain.Domain, error) + CreateSession(store kv.Storage) (Session, error) +} + +// Session is an abstraction of the session.Session interface. +type Session interface { + Execute(ctx context.Context, sql string) error + ShowCreateDatabase(schema *model.DBInfo) (string, error) + ShowCreateTable(table *model.TableInfo, allocator autoid.Allocator) (string, error) + Close() +} diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go new file mode 100644 index 000000000..6b9f2f667 --- /dev/null +++ b/pkg/gluetidb/glue.go @@ -0,0 +1,65 @@ +package gluetidb + +import ( + "bytes" + "context" + + "github.com/pingcap/parser/model" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/session" + + "github.com/pingcap/br/pkg/glue" +) + +// Glue is an implementation of glue.Glue using a new TiDB session. +type Glue struct{} + +type tidbSession struct { + se session.Session +} + +// BootstrapSession implements glue.Glue +func (Glue) BootstrapSession(store kv.Storage) (*domain.Domain, error) { + return session.BootstrapSession(store) +} + +// CreateSession implements glue.Glue +func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { + se, err := session.CreateSession(store) + if err != nil { + return nil, err + } + return &tidbSession{se: se}, nil +} + +// Execute implements glue.Session +func (gs *tidbSession) Execute(ctx context.Context, sql string) error { + _, err := gs.se.Execute(ctx, sql) + return err +} + +// ShowCreateDatabase implements glue.Session +func (gs *tidbSession) ShowCreateDatabase(schema *model.DBInfo) (string, error) { + var buf bytes.Buffer + if err := executor.ConstructResultOfShowCreateDatabase(gs.se, schema, true, &buf); err != nil { + return "", err + } + return buf.String(), nil +} + +// ShowCreateTable implements glue.Session +func (gs *tidbSession) ShowCreateTable(table *model.TableInfo, allocator autoid.Allocator) (string, error) { + var buf bytes.Buffer + if err := executor.ConstructResultOfShowCreateTable(gs.se, table, allocator, &buf); err != nil { + return "", err + } + return buf.String(), nil +} + +// Close implements glue.Session +func (gs *tidbSession) Close() { + gs.se.Close() +} diff --git a/pkg/utils/mock_cluster.go b/pkg/mock/mock_cluster.go similarity index 93% rename from pkg/utils/mock_cluster.go rename to pkg/mock/mock_cluster.go index dc7b87c3c..aee9666ed 100644 --- a/pkg/utils/mock_cluster.go +++ b/pkg/mock/mock_cluster.go @@ -1,4 +1,4 @@ -package utils +package mock import ( "database/sql" @@ -28,8 +28,8 @@ import ( var pprofOnce sync.Once -// MockCluster is mock tidb cluster, includes tikv and pd. -type MockCluster struct { +// Cluster is mock tidb cluster, includes tikv and pd. +type Cluster struct { *server.Server *mocktikv.Cluster mocktikv.MVCCStore @@ -40,8 +40,8 @@ type MockCluster struct { PDClient pd.Client } -// NewMockCluster create a new mock cluster. -func NewMockCluster() (*MockCluster, error) { +// NewCluster create a new mock cluster. +func NewCluster() (*Cluster, error) { pprofOnce.Do(func() { go func() { // Make sure pprof is registered. @@ -72,7 +72,7 @@ func NewMockCluster() (*MockCluster, error) { if err != nil { return nil, err } - return &MockCluster{ + return &Cluster{ Cluster: cluster, MVCCStore: mvccStore, Storage: storage, @@ -82,7 +82,7 @@ func NewMockCluster() (*MockCluster, error) { } // Start runs a mock cluster -func (mock *MockCluster) Start() error { +func (mock *Cluster) Start() error { statusURL, err := url.Parse(tempurl.Alloc()) if err != nil { return err @@ -124,7 +124,7 @@ func (mock *MockCluster) Start() error { } // Stop stops a mock cluster -func (mock *MockCluster) Stop() { +func (mock *Cluster) Stop() { if mock.Domain != nil { mock.Domain.Close() } diff --git a/pkg/mock/mock_cluster_test.go b/pkg/mock/mock_cluster_test.go new file mode 100644 index 000000000..e7ffc6e85 --- /dev/null +++ b/pkg/mock/mock_cluster_test.go @@ -0,0 +1,27 @@ +package mock + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" +) + +var _ = Suite(&testClusterSuite{}) + +type testClusterSuite struct { + mock *Cluster +} + +func (s *testClusterSuite) SetUpSuite(c *C) { + var err error + s.mock, err = NewCluster() + c.Assert(err, IsNil) +} + +func (s *testClusterSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testClusterSuite) TestSmoke(c *C) { + c.Assert(s.mock.Start(), IsNil) + s.mock.Stop() +} diff --git a/pkg/restore/backoff_test.go b/pkg/restore/backoff_test.go index 537f0980c..73161a9f6 100644 --- a/pkg/restore/backoff_test.go +++ b/pkg/restore/backoff_test.go @@ -7,18 +7,19 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testBackofferSuite{}) type testBackofferSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testBackofferSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } diff --git a/pkg/restore/client.go b/pkg/restore/client.go index f45b3d510..a06617084 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/keepalive" "github.com/pingcap/br/pkg/checksum" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) @@ -53,11 +54,12 @@ type Client struct { // NewRestoreClient returns a new RestoreClient func NewRestoreClient( ctx context.Context, + g glue.Glue, pdClient pd.Client, store kv.Storage, ) (*Client, error) { ctx, cancel := context.WithCancel(ctx) - db, err := NewDB(store) + db, err := NewDB(g, store) if err != nil { cancel() return nil, errors.Trace(err) diff --git a/pkg/restore/client_test.go b/pkg/restore/client_test.go index 3d608b3b9..b67bbcfd7 100644 --- a/pkg/restore/client_test.go +++ b/pkg/restore/client_test.go @@ -12,18 +12,20 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testRestoreClientSuite{}) type testRestoreClientSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testRestoreClientSuite) SetUpTest(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) } @@ -36,7 +38,7 @@ func (s *testRestoreClientSuite) TestCreateTables(c *C) { defer s.mock.Stop() client := Client{} - db, err := NewDB(s.mock.Storage) + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil) client.db = db client.ctx = context.Background() @@ -93,7 +95,7 @@ func (s *testRestoreClientSuite) TestIsOnline(c *C) { defer s.mock.Stop() client := Client{} - db, err := NewDB(s.mock.Storage) + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil) client.db = db client.ctx = context.Background() diff --git a/pkg/restore/db.go b/pkg/restore/db.go index 8c09af16f..22a1a4794 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -1,7 +1,6 @@ package restore import ( - "bytes" "context" "fmt" "sort" @@ -10,27 +9,26 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/parser/model" - "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/session" "go.uber.org/zap" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/utils" ) // DB is a TiDB instance, not thread-safe. type DB struct { - se session.Session + se glue.Session } // NewDB returns a new DB -func NewDB(store kv.Storage) (*DB, error) { - se, err := session.CreateSession(store) +func NewDB(g glue.Glue, store kv.Storage) (*DB, error) { + se, err := g.CreateSession(store) if err != nil { return nil, errors.Trace(err) } // Set SQL mode to None for avoiding SQL compatibility problem - _, err = se.Execute(context.Background(), "set @@sql_mode=''") + err = se.Execute(context.Background(), "set @@sql_mode=''") if err != nil { return nil, errors.Trace(err) } @@ -44,7 +42,7 @@ func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { var err error if ddlJob.BinlogInfo.TableInfo != nil { switchDbSQL := fmt.Sprintf("use %s;", ddlJob.SchemaName) - _, err = db.se.Execute(ctx, switchDbSQL) + err = db.se.Execute(ctx, switchDbSQL) if err != nil { log.Error("switch db failed", zap.String("query", switchDbSQL), @@ -53,7 +51,7 @@ func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { return errors.Trace(err) } } - _, err = db.se.Execute(ctx, ddlJob.Query) + err = db.se.Execute(ctx, ddlJob.Query) if err != nil { log.Error("execute ddl query failed", zap.String("query", ddlJob.Query), @@ -66,14 +64,12 @@ func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { // CreateDatabase executes a CREATE DATABASE SQL. func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { - var buf bytes.Buffer - err := executor.ConstructResultOfShowCreateDatabase(db.se, schema, true, &buf) + createSQL, err := db.se.ShowCreateDatabase(schema) if err != nil { log.Error("build create database SQL failed", zap.Stringer("db", schema.Name), zap.Error(err)) return errors.Trace(err) } - createSQL := buf.String() - _, err = db.se.Execute(ctx, createSQL) + err = db.se.Execute(ctx, createSQL) if err != nil { log.Error("create database failed", zap.String("query", createSQL), zap.Error(err)) } @@ -82,9 +78,8 @@ func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { // CreateTable executes a CREATE TABLE SQL. func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { - var buf bytes.Buffer schema := table.Info - err := executor.ConstructResultOfShowCreateTable(db.se, schema, newIDAllocator(schema.AutoIncID), &buf) + createSQL, err := db.se.ShowCreateTable(schema, newIDAllocator(schema.AutoIncID)) if err != nil { log.Error( "build create table SQL failed", @@ -93,8 +88,8 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { zap.Error(err)) return errors.Trace(err) } - switchDbSQL := fmt.Sprintf("use %s;", table.Db.Name) - _, err = db.se.Execute(ctx, switchDbSQL) + switchDbSQL := fmt.Sprintf("use %s;", utils.EncloseName(table.Db.Name.O)) + err = db.se.Execute(ctx, switchDbSQL) if err != nil { log.Error("switch db failed", zap.String("SQL", switchDbSQL), @@ -102,13 +97,12 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { zap.Error(err)) return errors.Trace(err) } - createSQL := buf.String() // Insert `IF NOT EXISTS` statement to skip the created tables words := strings.SplitN(createSQL, " ", 3) if len(words) > 2 && strings.ToUpper(words[0]) == "CREATE" && strings.ToUpper(words[1]) == "TABLE" { createSQL = "CREATE TABLE IF NOT EXISTS " + words[2] } - _, err = db.se.Execute(ctx, createSQL) + err = db.se.Execute(ctx, createSQL) if err != nil { log.Error("create table failed", zap.String("SQL", createSQL), @@ -119,9 +113,9 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { } alterAutoIncIDSQL := fmt.Sprintf( "alter table %s auto_increment = %d", - escapeTableName(schema.Name), + utils.EncloseName(schema.Name.O), schema.AutoIncID) - _, err = db.se.Execute(ctx, alterAutoIncIDSQL) + err = db.se.Execute(ctx, alterAutoIncIDSQL) if err != nil { log.Error("alter AutoIncID failed", zap.String("query", alterAutoIncIDSQL), diff --git a/pkg/restore/db_test.go b/pkg/restore/db_test.go index 0151b4da6..b1e9e947c 100644 --- a/pkg/restore/db_test.go +++ b/pkg/restore/db_test.go @@ -13,18 +13,20 @@ import ( "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/mock" "github.com/pingcap/br/pkg/utils" ) var _ = Suite(&testRestoreSchemaSuite{}) type testRestoreSchemaSuite struct { - mock *utils.MockCluster + mock *mock.Cluster } func (s *testRestoreSchemaSuite) SetUpSuite(c *C) { var err error - s.mock, err = utils.NewMockCluster() + s.mock, err = mock.NewCluster() c.Assert(err, IsNil) c.Assert(s.mock.Start(), IsNil) } @@ -70,7 +72,7 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { c.Assert(autoIncID, Equals, uint64(globalAutoID)) // Alter AutoIncID to the next AutoIncID + 100 table.Info.AutoIncID = globalAutoID + 100 - db, err := NewDB(s.mock.Storage) + db, err := NewDB(gluetidb.Glue{}, s.mock.Storage) c.Assert(err, IsNil, Commentf("Error create DB")) tk.MustExec("drop database if exists test;") // Test empty collate value diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 63ee92969..64ccfab19 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -324,12 +324,3 @@ func encodeKeyPrefix(key []byte) []byte { encodedPrefix = append(encodedPrefix, codec.EncodeBytes([]byte{}, key[:len(key)-ungroupedLen])...) return append(encodedPrefix[:len(encodedPrefix)-9], key[len(key)-ungroupedLen:]...) } - -// escape the identifier for pretty-printing. -// For instance, the identifier "foo `bar`" will become "`foo ``bar```". -// The sqlMode controls whether to escape with backquotes (`) or double quotes -// (`"`) depending on whether mysql.ModeANSIQuotes is enabled. -func escapeTableName(cis model.CIStr) string { - quote := "`" - return quote + strings.Replace(cis.O, quote, quote+quote, -1) + quote -} diff --git a/pkg/task/backup.go b/pkg/task/backup.go index 240754517..ead4c2351 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -12,6 +12,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" @@ -61,7 +62,7 @@ func (cfg *BackupConfig) ParseFromFlags(flags *pflag.FlagSet) error { } // RunBackup starts a backup task inside the current goroutine. -func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { +func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig) error { ctx, cancel := context.WithCancel(c) defer cancel() @@ -73,7 +74,7 @@ func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { if err != nil { return err } - mgr, err := newMgr(ctx, cfg.PD) + mgr, err := newMgr(ctx, g, cfg.PD) if err != nil { return err } diff --git a/pkg/task/common.go b/pkg/task/common.go index 2433d94b9..c3f866492 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -15,6 +15,7 @@ import ( "github.com/spf13/pflag" "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/utils" ) @@ -178,7 +179,7 @@ func (cfg *Config) ParseFromFlags(flags *pflag.FlagSet) error { } // newMgr creates a new mgr at the given PD address. -func newMgr(ctx context.Context, pds []string) (*conn.Mgr, error) { +func newMgr(ctx context.Context, g glue.Glue, pds []string) (*conn.Mgr, error) { pdAddress := strings.Join(pds, ",") if len(pdAddress) == 0 { return nil, errors.New("pd address can not be empty") @@ -189,7 +190,7 @@ func newMgr(ctx context.Context, pds []string) (*conn.Mgr, error) { if err != nil { return nil, err } - return conn.NewMgr(ctx, pdAddress, store.(tikv.Storage)) + return conn.NewMgr(ctx, g, pdAddress, store.(tikv.Storage)) } // GetStorage gets the storage backend from the config. diff --git a/pkg/task/restore.go b/pkg/task/restore.go index 599dcb478..bb00d189d 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -11,6 +11,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/restore" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" @@ -55,17 +56,17 @@ func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { } // RunRestore starts a restore task inside the current goroutine. -func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { +func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConfig) error { ctx, cancel := context.WithCancel(c) defer cancel() - mgr, err := newMgr(ctx, cfg.PD) + mgr, err := newMgr(ctx, g, cfg.PD) if err != nil { return err } defer mgr.Close() - client, err := restore.NewRestoreClient(ctx, mgr.GetPDClient(), mgr.GetTiKV()) + client, err := restore.NewRestoreClient(ctx, g, mgr.GetPDClient(), mgr.GetTiKV()) if err != nil { return err } diff --git a/pkg/utils/mock_cluster_test.go b/pkg/utils/mock_cluster_test.go deleted file mode 100644 index 42cacae9c..000000000 --- a/pkg/utils/mock_cluster_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package utils - -import ( - . "github.com/pingcap/check" - "github.com/pingcap/tidb/util/testleak" -) - -var _ = Suite(&testMockClusterSuite{}) - -type testMockClusterSuite struct { - mock *MockCluster -} - -func (s *testMockClusterSuite) SetUpSuite(c *C) { - var err error - s.mock, err = NewMockCluster() - c.Assert(err, IsNil) -} - -func (s *testMockClusterSuite) TearDownSuite(c *C) { - testleak.AfterTest(c)() -} - -func (s *testMockClusterSuite) TestSmoke(c *C) { - c.Assert(s.mock.Start(), IsNil) - s.mock.Stop() -} diff --git a/pkg/utils/schema.go b/pkg/utils/schema.go index 0afe98e5b..e1aec1225 100644 --- a/pkg/utils/schema.go +++ b/pkg/utils/schema.go @@ -2,16 +2,13 @@ package utils import ( "bytes" - "context" "encoding/json" "strings" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/parser/model" - "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/tablecodec" - "github.com/pingcap/tidb/util/sqlexec" ) const ( @@ -106,36 +103,6 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { return databases, nil } -// ResultSetToStringSlice changes the RecordSet to [][]string. port from tidb -func ResultSetToStringSlice(ctx context.Context, s session.Session, rs sqlexec.RecordSet) ([][]string, error) { - rows, err := session.GetRows4Test(ctx, s, rs) - if err != nil { - return nil, err - } - err = rs.Close() - if err != nil { - return nil, err - } - sRows := make([][]string, len(rows)) - for i := range rows { - row := rows[i] - iRow := make([]string, row.Len()) - for j := 0; j < row.Len(); j++ { - if row.IsNull(j) { - iRow[j] = "" - } else { - d := row.GetDatum(j, &rs.Fields()[j].Column.FieldType) - iRow[j], err = d.ToString() - if err != nil { - return nil, err - } - } - } - sRows[i] = iRow - } - return sRows, nil -} - // EncloseName formats name in sql func EncloseName(name string) string { return "`" + strings.ReplaceAll(name, "`", "``") + "`" From 3c9d42fdab5f07fb65360ab545613f90531b7eb0 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Sun, 23 Feb 2020 17:07:58 +0800 Subject: [PATCH 09/46] go.mod: update tidb (#168) Signed-off-by: Neil Shen --- go.mod | 15 ++++++------ go.sum | 77 ++++++++++++++++++++++------------------------------------ 2 files changed, 36 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index 7d9e6b77e..02aaa51ef 100644 --- a/go.mod +++ b/go.mod @@ -18,15 +18,15 @@ require ( github.com/mattn/go-runewidth v0.0.7 // indirect github.com/onsi/ginkgo v1.10.3 // indirect github.com/onsi/gomega v1.7.1 // indirect - github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 + github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 - github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162 - github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 - github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d - github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d - github.com/pingcap/tidb v1.1.0-beta.0.20200110130413-8c3ee37c1938 + github.com/pingcap/kvproto v0.0.0-20200217103621-528e82bf7248 + github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd + github.com/pingcap/parser v0.0.0-20200218113622-517beb2e39c2 + github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497 + github.com/pingcap/tidb v1.1.0-beta.0.20200223044457-aedea3ec5e1e github.com/pingcap/tidb-tools v4.0.0-beta+incompatible - github.com/pingcap/tipb v0.0.0-20191227083941-3996eff010dc + github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 github.com/prometheus/client_golang v1.0.0 github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v0.0.5 @@ -35,7 +35,6 @@ require ( go.uber.org/zap v1.13.0 golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 // indirect google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 ) diff --git a/go.sum b/go.sum index 24f73f5c9..a58f8bf55 100644 --- a/go.sum +++ b/go.sum @@ -117,7 +117,6 @@ github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZp github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -136,11 +135,12 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -180,7 +180,6 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= @@ -221,7 +220,6 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -242,7 +240,6 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -280,12 +277,14 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.3.0 h1:e5+lF2E4Y2WCIxBefVowBuB0iHrUH4HZ8q+6mGF7fJc= github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= +github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4 h1:iRtOAQ6FXkY/BGvst3CDfTva4nTqh6CL8WXvanLdbu0= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= -github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 h1:rfD9v3+ppLPzoQBgZev0qYCpegrwyFx/BUpkApEiKdY= -github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= +github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 h1:R8gStypOBmpnHEx1qi//SaqxJVI4inOqljg/Aj5/390= +github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= @@ -295,52 +294,52 @@ github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0h github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d h1:F8vp38kTAckN+v8Jlc98uMBvKIzr1a+UhnLyVYn8Q5Q= github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798 h1:6DMbRqPI1qzQ8N1xc3+nKY8IxSACd9VqQKkRVvbyoIg= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d h1:rCmRK0lCRrHMUbS99BKFYhK9YxJDNw0xB033cQbYo0s= github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20191213111810-93cb7c623c8b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5 h1:RUxQExD5yubAjWGnw8kcxfO9abbiVHIE1rbuCyQCWDE= -github.com/pingcap/kvproto v0.0.0-20200108025604-a4dc183d2af5/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162 h1:lsoIoCoXMpcHvW6jHcqP/prA4I6duAp1DVyG2ULz4bM= -github.com/pingcap/kvproto v0.0.0-20200210234432-a965739f8162/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200217103621-528e82bf7248 h1:DhGKu4ACa5v0Z70J1NWrc9ti+OqihhxmyzsK7YDTpVQ= +github.com/pingcap/kvproto v0.0.0-20200217103621-528e82bf7248/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d h1:4QwSJRxmBjTB9ssJNWg2f2bDm5rqnHCUUjMh4N1QOOY= -github.com/pingcap/parser v0.0.0-20200109073933-a9496438d77d/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= -github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d h1:Ui80aiLTyd0EZD56o2tjFRYpHfhazBjtBdKeR8UoTFY= -github.com/pingcap/pd v1.1.0-beta.0.20191219054547-4d65bbefbc6d/go.mod h1:CML+b1JVjN+VbDijaIcUSmuPgpDjXEY7UiOx5yDP8eE= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/parser v0.0.0-20200218113622-517beb2e39c2 h1:DsymejjOFdljM1q0BJ8yBZUYQ7718+7JTO046Rqd/3k= +github.com/pingcap/parser v0.0.0-20200218113622-517beb2e39c2/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497 h1:FzLErYtcXnSxtC469OuVDlgBbh0trJZzNxw0mNKzyls= +github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497/go.mod h1:cfT/xu4Zz+Tkq95QrLgEBZ9ikRcgzy4alHqqoaTftqI= github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b h1:EEyo/SCRswLGuSk+7SB86Ak1p8bS6HL1Mi4Dhyuv6zg= github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= -github.com/pingcap/tidb v1.1.0-beta.0.20200110130413-8c3ee37c1938 h1:Jt9ENNiS1ZNC9jV2Pd3wdegXQYFq3U6z1xFlzZNMNC8= -github.com/pingcap/tidb v1.1.0-beta.0.20200110130413-8c3ee37c1938/go.mod h1:DlMN+GGqC/WpREnzcH8xgxbXnntjybLhT84AbUSvMVM= +github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd h1:k7CIHMFVKjHsda3PKkiN4zv++NEnexlUwiJEhryWpG0= +github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/tidb v1.1.0-beta.0.20200223044457-aedea3ec5e1e h1:HPSJdnkI6mt0qEIbSkJzVsq99929Ki5VblkJMmlqhI0= +github.com/pingcap/tidb v1.1.0-beta.0.20200223044457-aedea3ec5e1e/go.mod h1:zzO/kysmwHMkr0caH2NmuSAKLdsySXKDQGTCYrb7Gx8= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible h1:H1jg0aDWz2SLRh3hNBo2HFtnuHtudIUvBumU7syRkic= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tidb-tools v4.0.0-beta+incompatible h1:+XJdcVLCM8GDgXiMS6lFV59N3XPVOqtNHeWNLVrb2pg= github.com/pingcap/tidb-tools v4.0.0-beta+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= -github.com/pingcap/tipb v0.0.0-20191227083941-3996eff010dc h1:IOKsFObJ4GZwAgyuhdJKg3oKCzWcoBFfHhpq2TOn5H0= -github.com/pingcap/tipb v0.0.0-20191227083941-3996eff010dc/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 h1:aJPXrT1u4VfUSGFA2oQVwl4pOXzqe+YI6wed01cjDH4= +github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -359,7 +358,6 @@ github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49 github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca h1:3fECS8atRjByijiI8yYiuwLwQ2ZxXobW7ua/8GRB3pI= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= @@ -401,9 +399,7 @@ github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1 github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo= github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= @@ -415,10 +411,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yookoala/realpath v1.0.0 h1:7OA9pj4FZd+oZDsyvXWQvjn5oBdcHRTV44PpdMSuImQ= github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -449,13 +443,11 @@ go.uber.org/zap v1.12.0 h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -477,13 +469,14 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -500,8 +493,6 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190909003024-a7b16738d86b h1:XfVGCX+0T4WOStkaOsJRllbsiImhB2jgVBGc9L0lPGc= -golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191011234655-491137f69257 h1:ry8e2D+cwaV6hk7lb3aRTjjZo24shrbK0e11QEOkTIg= golang.org/x/net v0.0.0-20191011234655-491137f69257/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -531,8 +522,6 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190909082730-f460065e899a h1:mIzbOulag9/gXacgxKlFVwpCOWSfBT3/pDyyCwGA9as= -golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -570,13 +559,11 @@ golang.org/x/tools v0.0.0-20191107010934-f79515f33823 h1:akkRBeitX2EZP59KdtKw310 golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 h1:BKiPVwWbEdmAh+5CBwk13CYeVJQRDJpDnKgDyMOGz9M= -golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 h1:Toz2IK7k8rbltAXwNAxKcn9OzqyNfMUhUNjz3sL0NMk= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200206050830-dd0d5d485177 h1:E2vxBajJgSA3TcJhDGTh/kP3VnsvXKl9jSijv+h7svQ= +golang.org/x/tools v0.0.0-20200206050830-dd0d5d485177/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -591,7 +578,6 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -601,14 +587,11 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514 h1:oFSK4421fpCKRrpzIpybyBVWyht05NegY9+L/3TLAZs= -google.golang.org/genproto v0.0.0-20190905072037-92dd089d5514/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -617,7 +600,6 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= @@ -629,7 +611,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= From 028963d18cfc46c480dd748891e9b3dc4c66ca66 Mon Sep 17 00:00:00 2001 From: 3pointer Date: Thu, 27 Feb 2020 13:22:14 +0800 Subject: [PATCH 10/46] BR support TLS (#161) * *: support tls * move tikv.driver to glue * fix comments --- go.mod | 4 +- go.sum | 6 + pkg/conn/conn.go | 42 +++++- pkg/glue/glue.go | 2 + pkg/gluetidb/glue.go | 15 ++ pkg/restore/client.go | 19 ++- pkg/restore/import.go | 12 +- pkg/restore/split_client.go | 12 +- pkg/restore/util.go | 2 +- pkg/task/backup.go | 2 +- pkg/task/common.go | 43 +++++- pkg/task/restore.go | 4 +- pkg/utils/tso.go | 17 ++- tests/_utils/run_services | 174 +++++++++++++++++++++++ tests/br_tls/certificates/ca.pem | 21 +++ tests/br_tls/certificates/client-key.pem | 27 ++++ tests/br_tls/certificates/client.pem | 21 +++ tests/br_tls/certificates/server-key.pem | 27 ++++ tests/br_tls/certificates/server.pem | 22 +++ tests/br_tls/config/pd.toml | 9 ++ tests/br_tls/config/tidb.toml | 14 ++ tests/br_tls/config/tikv.toml | 19 +++ tests/br_tls/run.sh | 67 +++++++++ tests/run.sh | 82 +---------- 24 files changed, 560 insertions(+), 103 deletions(-) create mode 100644 tests/_utils/run_services create mode 100644 tests/br_tls/certificates/ca.pem create mode 100644 tests/br_tls/certificates/client-key.pem create mode 100644 tests/br_tls/certificates/client.pem create mode 100644 tests/br_tls/certificates/server-key.pem create mode 100644 tests/br_tls/certificates/server.pem create mode 100644 tests/br_tls/config/pd.toml create mode 100644 tests/br_tls/config/tidb.toml create mode 100644 tests/br_tls/config/tikv.toml create mode 100755 tests/br_tls/run.sh diff --git a/go.mod b/go.mod index 02aaa51ef..70ca858a0 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,6 @@ require ( github.com/fsouza/fake-gcs-server v1.15.0 github.com/go-sql-driver/mysql v1.4.1 github.com/gogo/protobuf v1.3.1 - github.com/golang/snappy v0.0.1 // indirect github.com/google/btree v1.0.0 github.com/google/uuid v1.1.1 github.com/mattn/go-runewidth v0.0.7 // indirect @@ -31,6 +30,9 @@ require ( github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.3 + github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 // indirect + github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect + go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 go.opencensus.io v0.22.2 // indirect go.uber.org/zap v1.13.0 golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect diff --git a/go.sum b/go.sum index a58f8bf55..7c8ae23e9 100644 --- a/go.sum +++ b/go.sum @@ -264,10 +264,12 @@ github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2 github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= @@ -388,11 +390,15 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= +github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 h1:uSDYjYejelKyceA6DiCsngFof9jAyeaSyX9XC5a1a7Q= +github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 h1:lYIiVDtZnyTWlNwiAxLj0bbpTcx1BWCFhXjfsvmPdNc= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo= github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1C1PjvOJnJykCzcD5QHbk= diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 6ef1e87eb..d1f7858f6 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -3,6 +3,7 @@ package conn import ( "bytes" "context" + "crypto/tls" "encoding/json" "fmt" "io" @@ -25,6 +26,7 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "github.com/pingcap/br/pkg/glue" @@ -44,6 +46,7 @@ type Mgr struct { addrs []string cli *http.Client } + tlsConf *tls.Config dom *domain.Domain storage tikv.Storage grpcClis struct { @@ -58,9 +61,6 @@ func pdRequest( ctx context.Context, addr string, prefix string, cli *http.Client, method string, body io.Reader) ([]byte, error) { - if addr != "" && !strings.HasPrefix("http", addr) { - addr = "http://" + addr - } u, err := url.Parse(addr) if err != nil { return nil, errors.Trace(err) @@ -88,12 +88,33 @@ func pdRequest( } // NewMgr creates a new Mgr. -func NewMgr(ctx context.Context, g glue.Glue, pdAddrs string, storage tikv.Storage) (*Mgr, error) { +func NewMgr( + ctx context.Context, + g glue.Glue, + pdAddrs string, + storage tikv.Storage, + tlsConf *tls.Config, + securityOption pd.SecurityOption) (*Mgr, error) { addrs := strings.Split(pdAddrs, ",") failure := errors.Errorf("pd address (%s) has wrong format", pdAddrs) cli := &http.Client{Timeout: 30 * time.Second} + if tlsConf != nil { + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConf + cli.Transport = transport + } + + processedAddrs := make([]string, 0, len(addrs)) for _, addr := range addrs { + if addr != "" && !strings.HasPrefix("http", addr) { + if tlsConf != nil { + addr = "https://" + addr + } else { + addr = "http://" + addr + } + } + processedAddrs = append(processedAddrs, addr) _, failure = pdRequest(ctx, addr, clusterVersionPrefix, cli, http.MethodGet, nil) // TODO need check cluster version >= 3.1 when br release if failure == nil { @@ -104,7 +125,7 @@ func NewMgr(ctx context.Context, g glue.Glue, pdAddrs string, storage tikv.Stora return nil, errors.Annotatef(failure, "pd address (%s) not available, please check network", pdAddrs) } - pdClient, err := pd.NewClient(addrs, pd.SecurityOption{}) + pdClient, err := pd.NewClient(addrs, securityOption) if err != nil { log.Error("fail to create pd client", zap.Error(err)) return nil, err @@ -140,8 +161,9 @@ func NewMgr(ctx context.Context, g glue.Glue, pdAddrs string, storage tikv.Stora pdClient: pdClient, storage: storage, dom: dom, + tlsConf: tlsConf, } - mgr.pdHTTP.addrs = addrs + mgr.pdHTTP.addrs = processedAddrs mgr.pdHTTP.cli = cli mgr.grpcClis.clis = make(map[uint64]*grpc.ClientConn) return mgr, nil @@ -217,6 +239,9 @@ func (mgr *Mgr) getGrpcConnLocked(ctx context.Context, storeID uint64) (*grpc.Cl return nil, errors.Trace(err) } opt := grpc.WithInsecure() + if mgr.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(mgr.tlsConf)) + } ctx, cancel := context.WithTimeout(ctx, dialTimeout) keepAlive := 10 keepAliveTimeout := 3 @@ -269,6 +294,11 @@ func (mgr *Mgr) GetTiKV() tikv.Storage { return mgr.storage } +// GetTLSConfig returns the tls config +func (mgr *Mgr) GetTLSConfig() *tls.Config { + return mgr.tlsConf +} + // GetLockResolver gets the LockResolver. func (mgr *Mgr) GetLockResolver() *tikv.LockResolver { return mgr.storage.GetLockResolver() diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go index bc6fb214e..b680370aa 100644 --- a/pkg/glue/glue.go +++ b/pkg/glue/glue.go @@ -4,6 +4,7 @@ import ( "context" "github.com/pingcap/parser/model" + pd "github.com/pingcap/pd/client" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" @@ -13,6 +14,7 @@ import ( type Glue interface { BootstrapSession(store kv.Storage) (*domain.Domain, error) CreateSession(store kv.Storage) (Session, error) + Open(path string, option pd.SecurityOption) (kv.Storage, error) } // Session is an abstraction of the session.Session interface. diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go index 6b9f2f667..27ae01c37 100644 --- a/pkg/gluetidb/glue.go +++ b/pkg/gluetidb/glue.go @@ -5,11 +5,14 @@ import ( "context" "github.com/pingcap/parser/model" + pd "github.com/pingcap/pd/client" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/br/pkg/glue" ) @@ -35,6 +38,18 @@ func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { return &tidbSession{se: se}, nil } +// Open implements glue.Glue +func (Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + if option.CAPath != "" { + conf := config.GetGlobalConfig() + conf.Security.ClusterSSLCA = option.CAPath + conf.Security.ClusterSSLCert = option.CertPath + conf.Security.ClusterSSLKey = option.KeyPath + config.StoreGlobalConfig(conf) + } + return tikv.Driver{}.Open(path) +} + // Execute implements glue.Session func (gs *tidbSession) Execute(ctx context.Context, sql string) error { _, err := gs.se.Execute(ctx, sql) diff --git a/pkg/restore/client.go b/pkg/restore/client.go index a06617084..a7e5c4d08 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -2,6 +2,7 @@ package restore import ( "context" + "crypto/tls" "encoding/json" "math" "sort" @@ -20,6 +21,7 @@ import ( "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "github.com/pingcap/br/pkg/checksum" @@ -41,6 +43,7 @@ type Client struct { fileImporter FileImporter workerPool *utils.WorkerPool tableWorkerPool *utils.WorkerPool + tlsConf *tls.Config databases map[string]*utils.Database ddlJobs []*model.Job @@ -57,6 +60,7 @@ func NewRestoreClient( g glue.Glue, pdClient pd.Client, store kv.Storage, + tlsConf *tls.Config, ) (*Client, error) { ctx, cancel := context.WithCancel(ctx) db, err := NewDB(g, store) @@ -71,6 +75,7 @@ func NewRestoreClient( pdClient: pdClient, tableWorkerPool: utils.NewWorkerPool(128, "table"), db: db, + tlsConf: tlsConf, }, nil } @@ -112,8 +117,8 @@ func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup. rc.backupMeta = backupMeta log.Info("load backupmeta", zap.Int("databases", len(rc.databases)), zap.Int("jobs", len(rc.ddlJobs))) - metaClient := NewSplitClient(rc.pdClient) - importClient := NewImportClient(metaClient) + metaClient := NewSplitClient(rc.pdClient, rc.tlsConf) + importClient := NewImportClient(metaClient, rc.tlsConf) rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, rc.rateLimit) return nil } @@ -128,6 +133,11 @@ func (rc *Client) EnableOnline() { rc.isOnline = true } +// GetTLSConfig returns the tls config +func (rc *Client) GetTLSConfig() *tls.Config { + return rc.tlsConf +} + // GetTS gets a new timestamp from PD func (rc *Client) GetTS(ctx context.Context) (uint64, error) { p, l, err := rc.pdClient.GetTS(ctx) @@ -145,7 +155,7 @@ func (rc *Client) ResetTS(pdAddrs []string) error { i := 0 return utils.WithRetry(rc.ctx, func() error { idx := i % len(pdAddrs) - return utils.ResetTS(pdAddrs[idx], restoreTS) + return utils.ResetTS(pdAddrs[idx], restoreTS, rc.tlsConf) }, newResetTSBackoffer()) } @@ -332,6 +342,9 @@ func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMo bfConf.MaxDelay = time.Second * 3 for _, store := range stores { opt := grpc.WithInsecure() + if rc.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(rc.tlsConf)) + } gctx, cancel := context.WithTimeout(ctx, time.Second*5) keepAlive := 10 keepAliveTimeout := 3 diff --git a/pkg/restore/import.go b/pkg/restore/import.go index 01f8456ef..887ee3e88 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -2,6 +2,7 @@ package restore import ( "context" + "crypto/tls" "strings" "sync" "time" @@ -15,6 +16,7 @@ import ( "github.com/pingcap/pd/pkg/codec" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" @@ -47,13 +49,15 @@ type importClient struct { mu sync.Mutex metaClient SplitClient clients map[uint64]import_sstpb.ImportSSTClient + tlsConf *tls.Config } // NewImportClient returns a new ImporterClient -func NewImportClient(metaClient SplitClient) ImporterClient { +func NewImportClient(metaClient SplitClient, tlsConf *tls.Config) ImporterClient { return &importClient{ metaClient: metaClient, clients: make(map[uint64]import_sstpb.ImportSSTClient), + tlsConf: tlsConf, } } @@ -107,7 +111,11 @@ func (ic *importClient) getImportClient( if err != nil { return nil, err } - conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + opt := grpc.WithInsecure() + if ic.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(ic.tlsConf)) + } + conn, err := grpc.Dial(store.GetAddress(), opt) if err != nil { return nil, err } diff --git a/pkg/restore/split_client.go b/pkg/restore/split_client.go index 8a618a191..9ab3ed7f1 100644 --- a/pkg/restore/split_client.go +++ b/pkg/restore/split_client.go @@ -3,6 +3,7 @@ package restore import ( "bytes" "context" + "crypto/tls" "encoding/json" "fmt" "io/ioutil" @@ -20,6 +21,7 @@ import ( pd "github.com/pingcap/pd/client" "github.com/pingcap/pd/server/schedule/placement" "google.golang.org/grpc" + "google.golang.org/grpc/credentials" ) // SplitClient is an external client used by RegionSplitter. @@ -58,13 +60,15 @@ type SplitClient interface { type pdClient struct { mu sync.Mutex client pd.Client + tlsConf *tls.Config storeCache map[uint64]*metapb.Store } // NewSplitClient returns a client used by RegionSplitter. -func NewSplitClient(client pd.Client) SplitClient { +func NewSplitClient(client pd.Client, tlsConf *tls.Config) SplitClient { return &pdClient{ client: client, + tlsConf: tlsConf, storeCache: make(map[uint64]*metapb.Store), } } @@ -199,7 +203,11 @@ func (c *pdClient) BatchSplitRegions( if err != nil { return nil, err } - conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + opt := grpc.WithInsecure() + if c.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(c.tlsConf)) + } + conn, err := grpc.Dial(store.GetAddress(), opt) if err != nil { return nil, err } diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 64ccfab19..7b70c9806 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -284,7 +284,7 @@ func SplitRanges( elapsed := time.Since(start) summary.CollectDuration("split region", elapsed) }() - splitter := NewRegionSplitter(NewSplitClient(client.GetPDClient())) + splitter := NewRegionSplitter(NewSplitClient(client.GetPDClient(), client.GetTLSConfig())) return splitter.Split(ctx, ranges, rewriteRules, func(keys [][]byte) { for range keys { updateCh <- struct{}{} diff --git a/pkg/task/backup.go b/pkg/task/backup.go index ead4c2351..31594a08f 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -74,7 +74,7 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig if err != nil { return err } - mgr, err := newMgr(ctx, g, cfg.PD) + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS) if err != nil { return err } diff --git a/pkg/task/common.go b/pkg/task/common.go index c3f866492..2de01b326 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -2,6 +2,7 @@ package task import ( "context" + "crypto/tls" "fmt" "regexp" "strings" @@ -9,10 +10,12 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" + pd "github.com/pingcap/pd/client" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/store/tikv" "github.com/spf13/cobra" "github.com/spf13/pflag" + "go.etcd.io/etcd/pkg/transport" "github.com/pingcap/br/pkg/conn" "github.com/pingcap/br/pkg/glue" @@ -50,6 +53,25 @@ type TLSConfig struct { Key string `json:"key" toml:"key"` } +// IsEnabled checks if TLS open or not +func (tls *TLSConfig) IsEnabled() bool { + return tls.CA != "" +} + +// ToTLSConfig generate tls.Config +func (tls *TLSConfig) ToTLSConfig() (*tls.Config, error) { + tlsInfo := transport.TLSInfo{ + CertFile: tls.Cert, + KeyFile: tls.Key, + TrustedCAFile: tls.CA, + } + tlsConfig, err := tlsInfo.ClientConfig() + if err != nil { + return nil, errors.Trace(err) + } + return tlsConfig, nil +} + // Config is the common configuration for all BRIE tasks. type Config struct { storage.BackendOptions @@ -179,18 +201,33 @@ func (cfg *Config) ParseFromFlags(flags *pflag.FlagSet) error { } // newMgr creates a new mgr at the given PD address. -func newMgr(ctx context.Context, g glue.Glue, pds []string) (*conn.Mgr, error) { +func newMgr(ctx context.Context, g glue.Glue, pds []string, tlsConfig TLSConfig) (*conn.Mgr, error) { + var ( + tlsConf *tls.Config + err error + ) pdAddress := strings.Join(pds, ",") if len(pdAddress) == 0 { return nil, errors.New("pd address can not be empty") } + securityOption := pd.SecurityOption{} + if tlsConfig.IsEnabled() { + securityOption.CAPath = tlsConfig.CA + securityOption.CertPath = tlsConfig.Cert + securityOption.KeyPath = tlsConfig.Key + tlsConf, err = tlsConfig.ToTLSConfig() + if err != nil { + return nil, err + } + } + // Disable GC because TiDB enables GC already. - store, err := tikv.Driver{}.Open(fmt.Sprintf("tikv://%s?disableGC=true", pdAddress)) + store, err := g.Open(fmt.Sprintf("tikv://%s?disableGC=true", pdAddress), securityOption) if err != nil { return nil, err } - return conn.NewMgr(ctx, g, pdAddress, store.(tikv.Storage)) + return conn.NewMgr(ctx, g, pdAddress, store.(tikv.Storage), tlsConf, securityOption) } // GetStorage gets the storage backend from the config. diff --git a/pkg/task/restore.go b/pkg/task/restore.go index bb00d189d..ef1ac861f 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -60,13 +60,13 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf ctx, cancel := context.WithCancel(c) defer cancel() - mgr, err := newMgr(ctx, g, cfg.PD) + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS) if err != nil { return err } defer mgr.Close() - client, err := restore.NewRestoreClient(ctx, g, mgr.GetPDClient(), mgr.GetTiKV()) + client, err := restore.NewRestoreClient(ctx, g, mgr.GetPDClient(), mgr.GetTiKV(), mgr.GetTLSConfig()) if err != nil { return err } diff --git a/pkg/utils/tso.go b/pkg/utils/tso.go index a4ca5f5b5..ec084b20d 100644 --- a/pkg/utils/tso.go +++ b/pkg/utils/tso.go @@ -2,10 +2,12 @@ package utils import ( "bytes" + "crypto/tls" "encoding/json" "fmt" "net/http" "strings" + "time" "github.com/pingcap/errors" ) @@ -15,16 +17,23 @@ const ( ) // ResetTS resets the timestamp of PD to a bigger value -func ResetTS(pdAddr string, ts uint64) error { +func ResetTS(pdAddr string, ts uint64, tlsConf *tls.Config) error { req, err := json.Marshal(struct { TSO string `json:"tso,omitempty"` }{TSO: fmt.Sprintf("%d", ts)}) if err != nil { return err } - // TODO: Support TLS - reqURL := "http://" + pdAddr + resetTSURL - resp, err := http.Post(reqURL, "application/json", strings.NewReader(string(req))) + cli := &http.Client{Timeout: 30 * time.Second} + prefix := "http://" + if tlsConf != nil { + prefix = "https://" + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConf + cli.Transport = transport + } + reqURL := prefix + pdAddr + resetTSURL + resp, err := cli.Post(reqURL, "application/json", strings.NewReader(string(req))) if err != nil { return errors.Trace(err) } diff --git a/tests/_utils/run_services b/tests/_utils/run_services new file mode 100644 index 000000000..9ae0999bd --- /dev/null +++ b/tests/_utils/run_services @@ -0,0 +1,174 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +TEST_DIR=/tmp/backup_restore_test + +PD_ADDR="127.0.0.1:2379" +TIDB_IP="127.0.0.1" +TIDB_PORT="4000" +TIDB_ADDR="127.0.0.1:4000" +TIDB_STATUS_ADDR="127.0.0.1:10080" +# actaul tikv_addr are TIKV_ADDR${i} +TIKV_ADDR="127.0.0.1:2016" +TIKV_STATUS_ADDR="127.0.0.1:2018" +TIKV_COUNT=4 + +stop_services() { + killall -9 tikv-server || true + killall -9 pd-server || true + killall -9 tidb-server || true + + find "$TEST_DIR" -maxdepth 1 -not -path "$TEST_DIR" -not -name "*.log" | xargs rm -r || true +} + +start_services() { + stop_services + + echo "Starting PD..." + bin/pd-server \ + --client-urls "http://$PD_ADDR" \ + --log-file "$TEST_DIR/pd.log" \ + --data-dir "$TEST_DIR/pd" & + # wait until PD is online... + i=0 + while ! curl -o /dev/null -sf "http://$PD_ADDR/pd/api/v1/version"; do + i=$((i+1)) + if [ "$i" -gt 10 ]; then + echo 'Failed to start PD' + exit 1 + fi + sleep 3 + done + + echo "Starting TiKV..." + for i in $(seq $TIKV_COUNT); do + bin/tikv-server \ + --pd "$PD_ADDR" \ + -A "$TIKV_ADDR$i" \ + --status-addr "$TIKV_STATUS_ADDR$i" \ + --log-file "$TEST_DIR/tikv${i}.log" \ + -C "tests/config/tikv.toml" \ + -s "$TEST_DIR/tikv${i}" & + done + sleep 1 + + echo "Starting TiDB..." + bin/tidb-server \ + -P 4000 \ + --status 10080 \ + --store tikv \ + --path "$PD_ADDR" \ + --config "tests/config/tidb.toml" \ + --log-file "$TEST_DIR/tidb.log" & + + echo "Verifying TiDB is started..." + i=0 + while ! curl -o /dev/null -sf "http://$TIDB_IP:10080/status"; do + i=$((i+1)) + if [ "$i" -gt 10 ]; then + echo 'Failed to start TiDB' + exit 1 + fi + sleep 3 + done + + i=0 + while ! curl "http://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do + i=$((i+1)) + if [ "$i" -gt 10 ]; then + echo 'Failed to bootstrap cluster' + exit 1 + fi + sleep 3 + done +} + +start_services_withTLS() { + stop_services + + PD_CONFIG="$1/config/pd.toml" + TIDB_CONFIG="$1/config/tidb.toml" + TIKV_CONFIG="$1/config/tikv.toml" + + echo $PD_CONFIG + echo $TIDB_CONFIG + echo $TIKV_CONFIG + + echo "Starting PD..." + bin/pd-server \ + --client-urls "https://$PD_ADDR" \ + --log-file "$TEST_DIR/pd.log" \ + --config "$PD_CONFIG" \ + --data-dir "$TEST_DIR/pd" & + # wait until PD is online... + i=0 + while ! curl -k --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + -o /dev/null -sf "https://$PD_ADDR/pd/api/v1/version"; do + i=$((i+1)) + if [ "$i" -gt 10 ]; then + echo 'Failed to start PD' + exit 1 + fi + sleep 3 + done + + echo "Starting TiKV..." + for i in $(seq $TIKV_COUNT); do + bin/tikv-server \ + --pd "$PD_ADDR" \ + -A "$TIKV_ADDR$i" \ + --log-file "$TEST_DIR/tikv${i}.log" \ + -C "$TIKV_CONFIG" \ + -s "$TEST_DIR/tikv${i}" & + done + sleep 1 + + echo "Starting TiDB..." + bin/tidb-server \ + -P 4000 \ + --status 10080 \ + --store tikv \ + --config "$TIDB_CONFIG" \ + --path "$PD_ADDR" \ + --log-file "$TEST_DIR/tidb.log" & + + echo "Verifying TiDB is started..." + i=0 + while ! curl -k --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + -o /dev/null -sf "https://$TIDB_IP:10080/status"; do + i=$((i+1)) + if [ "$i" -gt 10 ]; then + echo 'Failed to start TiDB' + exit 1 + fi + sleep 3 + done + + i=0 + while ! curl -k --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + "https://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do + i=$((i+1)) + if [ "$i" -gt 10 ]; then + echo 'Failed to bootstrap cluster' + exit 1 + fi + sleep 3 + done +} \ No newline at end of file diff --git a/tests/br_tls/certificates/ca.pem b/tests/br_tls/certificates/ca.pem new file mode 100644 index 000000000..49098d653 --- /dev/null +++ b/tests/br_tls/certificates/ca.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDgDCCAmigAwIBAgIUHWvlRJydvYTR0ot3b8f6IlSHcGUwDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl +aWppbmcxEDAOBgNVBAoTB1BpbmdDQVAxEjAQBgNVBAMTCU15IG93biBDQTAgFw0y +MDAyMTgwNzQxMDBaGA8yMTIwMDEyNTA3NDEwMFowVzELMAkGA1UEBhMCQ04xEDAO +BgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0JlaWppbmcxEDAOBgNVBAoTB1BpbmdD +QVAxEjAQBgNVBAMTCU15IG93biBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOAdNtjanFhPaKJHQjr7+h/Cpps5bLc6S1vmgi/EIi9PKv3eyDgtlW1r +As2sjXRMHjcuZp2hHJ9r9FrMQD1rQQq5vJzQqM+eyWLc2tyZWXNWkZVvpjU4Hy5k +jZFLXoyHgAvps/LGu81F5Lk5CvLHswWTyGQUCFi1l/cYcQg6AExh2pO/WJu4hQhe +1mBBIKsJhZ5b5tWruLeI+YIjD1oo1ADMHYLK1BHON2fUmUHRGbrYKu4yCuyip3wn +rbVlpabn7l1JBMatCUJLHR6VWQ2MNjrOXAEUYm4xGEN+tUYyUOGl5mHFguLl3OIn +wj+1dT3WOr/NraPYlwVOnAd9GNbPJj0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG +MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ0CEqxLwEpI6J2gYJRg15oWZrj/ +MA0GCSqGSIb3DQEBCwUAA4IBAQCf8xRf7q1xAaGrc9HCPvN4OFkxDwz1CifrvrLR +ZgIWGUdCHDW2D1IiWKZQWeJKC1otA5x0hrS5kEGfkLFhADEU4txwp70DQaBArPti +pSgheIEbaT0H3BUTYSgS3VL2HjxN5OVMN6jNG3rWyxnJpNOCsJhhJXPK50CRZ7fk +Dcodj6FfEM2bfp2bGkxyVtUch7eepfUVbslXa7jE7Y8M3cr9NoLUcSP6D1RJWkNd +dBQoUsb6Ckq27ozEKOgwuBVv4BrrbFN//+7WHP8Vy6sSMyd+dJLBi6wehJjQhIz6 +vqLWE81rSJuxZqjLpCkFdeEF+9SRjWegU0ZDM4V+YeX53BPC +-----END CERTIFICATE----- diff --git a/tests/br_tls/certificates/client-key.pem b/tests/br_tls/certificates/client-key.pem new file mode 100644 index 000000000..43b021796 --- /dev/null +++ b/tests/br_tls/certificates/client-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA06qb7HABWHrU4CvBUO/2hXGgobi/UlTqTrYGZoJqSrvhKCP6 +HOivZjyWaSMDIfrguN+0C+bd80/5XGMmwjDt8PuZ+Ef3jcJLuB1e+Kms0s5tiTng +6Z028PkSpGvKXjPebWu7zoxVpDcTGM6MmlZQqpOuIgwi+7WX/bIgIu9wooCvJEGq +hScG2wpvK3txnpsVA4eXXdFoH0R5mtqbxVFfhwKMDqga4pRwJStLTDiMrtUz+OKc +rMIrkH4ndhvm2UYTVvhHlkZ3ooeDYsu40NnvedwaE+Ii7EnmcSDF9PaCVrXSK9F/ +KNRXX/x67PMWCVqcNyGtRsCuDe7FnDfGpudVXwIDAQABAoIBAHAzW/v1U4FHe1hp +WUxCJ3eNSAzyFdja0mlu6+2i7B05gpz4lTiFz5RuQXzx5lM43a6iRpqYgsbbed+T +X5RIw5iehnuqCnvGpsSuLQ27Q7VrX30ChUrQ37LVFSC7Usak0B9IoIFYun0WBLV9 +p+KYJqKFLiU2McUj+bGtnoNmUVqRzXQosoQue/pS9OknZ3NU7FxiyI3o4ME8rDvv +9x4vc1zcqbGXTQB224kOT0xoYt8RTmIbHvkR6/yszAtHDBcdzhINVuf38lv9FvaN +FxymwsY4IKPumQZlOEzHvSnpHTrwBMFdXjqpX1VxQb3yznEK+01MHf/tYsiU57IS +WVQMTeECgYEA7Fk0w66LGgdeeWrNTSTBCBPTofDVmR7Tro6k++5XTRt552ZoVz2l +8Lfi/Px5hIyve9gnM7slWrQ72JIQ5xVYZHtic3iwVFuRAD/QVfWU/SNsRsSB/93M +3BEumwJA6vN/qvkZueos3LOxN8kExk6GD0wIl6HjTeJPbbPHqmk6Pr0CgYEA5UQI +skaj8QGpjG8Hc10FeJpYsZiZA7gJaNu4RPqBj+1RHu/eYrL2mouooZdJfIJTmlTz +4NJcfb+Dl6qwbHUQ3mddhauFu1/YRwmaR5QKjwaBdeZbly9ljsRISFpjtosc7IBA +/Bl83xtbCboMdm7cH49X2CgRQ1uVFWraye0MBEsCgYEA43vtHFdYjaIAHa9dkV3J +6aNjtF/gxzNznXSwecfrAU1r5PydezLcEDh94vCDacAbe4EOIm2Dw6zsWUQlvrW9 +0WEs3mWQmnFTvECvnrz0PT2mDus/EO4EKuDi0dG2eC4MeJywVVB/A6J09XOnA9Q6 +lmihcIkiBinIN5etm2kS5aUCgYBCdcRnmZ6woKC7uvvX72FEosmPQgMpVtIzeW4j +YNLqHAtmAnbe+a4PAukxXp/I3ibKGFJSG+j/8uJ8tthJuG3ZavFrbFtqA9C4VwpI +MZwV9fbVbJ+kZfL0veWOQ9Wf9xe9Xzh3XBQcwNtVKH+wXVamN3FpkcPfWM8Q1Fb0 +LilLnQKBgQCq7+YlSnQX0rbmPTXVVb+B12rbqLDnqA0EuaVGrdu9zPPT04e5fpHU +SD33ibaEyeOF+zLg8T53whDbLJ0tURhUk+BlLTjdd99NXtyGMlfDnIsCnAeJhY8f +Iki6LYbbP2uWV4/5IDy9XW7J42Pfl9QyEVXq+PfTyPPjXC9/J4GOuw== +-----END RSA PRIVATE KEY----- diff --git a/tests/br_tls/certificates/client.pem b/tests/br_tls/certificates/client.pem new file mode 100644 index 000000000..7dace2f9d --- /dev/null +++ b/tests/br_tls/certificates/client.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIUaupI14PPUSshx7FmD7lsVPFahwAwDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl +aWppbmcxEDAOBgNVBAoTB1BpbmdDQVAxEjAQBgNVBAMTCU15IG93biBDQTAgFw0y +MDAyMTgwNzQ4MDBaGA8yMTIwMDEyNTA3NDgwMFowETEPMA0GA1UEAxMGY2xpZW50 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA06qb7HABWHrU4CvBUO/2 +hXGgobi/UlTqTrYGZoJqSrvhKCP6HOivZjyWaSMDIfrguN+0C+bd80/5XGMmwjDt +8PuZ+Ef3jcJLuB1e+Kms0s5tiTng6Z028PkSpGvKXjPebWu7zoxVpDcTGM6MmlZQ +qpOuIgwi+7WX/bIgIu9wooCvJEGqhScG2wpvK3txnpsVA4eXXdFoH0R5mtqbxVFf +hwKMDqga4pRwJStLTDiMrtUz+OKcrMIrkH4ndhvm2UYTVvhHlkZ3ooeDYsu40Nnv +edwaE+Ii7EnmcSDF9PaCVrXSK9F/KNRXX/x67PMWCVqcNyGtRsCuDe7FnDfGpudV +XwIDAQABo4GDMIGAMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcD +AjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBRqlq/slflqw/cdlE+xNcnmmxZwlTAf +BgNVHSMEGDAWgBSdAhKsS8BKSOidoGCUYNeaFma4/zALBgNVHREEBDACggAwDQYJ +KoZIhvcNAQELBQADggEBAMGC48O77wZHZRRxXIpTQDMUSpGTKks76l+s1N7sMLrG +DCQi/XFVfV8e/Z1qs224IyU1IGXXcdwK0Zfa9owUmVmiHE8lznv7m9m7j4BGOshc +pvnJaeuUluKR/QHzwpMsUKudoEyRjn09e0Jl0/IcsKh13rzgd458XR0ShCjxybo4 +nQ1aZb1wOPLG6tpUYsV+x2Coc6TgnJWJYlDbRfpIuj6y16T1kKuWzpm6VU3kbiJ9 +/nzDgauuJHIlXEWL9dBZcpzUibFswIQyGsK7c4AJrtY1OGx0/2nZIIjtGY3gtWyX +XGV9c4kM695gl5rJndB4IPl5GQeJBCNyIaVybh7Va70= +-----END CERTIFICATE----- diff --git a/tests/br_tls/certificates/server-key.pem b/tests/br_tls/certificates/server-key.pem new file mode 100644 index 000000000..2779d6ec6 --- /dev/null +++ b/tests/br_tls/certificates/server-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAq9mcQG/nSLREM2r7s2tCKCE/1KJQvV0xmkIglFD2VDDfYW+C +mBME5LNWbYR6L0yCVHU8B7aVnw1FsbiF4TpUY3w/r4mOGl7QbGivMYvRe6Nh2xUO +TvctwFyv2FvrtBX1rZ5/8QLbz1IFHOtTV7QUzLzWq3fSAiF1vhVsS3BUmh6QvWU8 +q9dylpmUQ22otSRXmirwEzFt9K+w3VK9Z6aac7e2XRurVPxbqgQUq2bblUhii8Fc +dCUA8NenlWp+H64mN2TzVaGb5Csr7SNS7AWDEPKfoo7W3H7bzKlmRVcPeRdftwti +SI1jfboxprya/nbTyBPE/yfLU/SYn/b89epziwIDAQABAoIBACPlI08OULgN90Tq +LsLuP3ZUY5nNgaHcKnU3JMj2FE3Hm5ElkpijOF1w3Dep+T+R8pMjnbNavuvnAMy7 +ZzOBVIknNcI7sDPv5AcQ4q8trkbt/I2fW0rBNIw+j/hYUuZdw+BNABpeZ31pe2nr ++Y+TLNkLBKfyMiqBxK88mE81mmZKblyvXCawW0A/iDDJ7fPNqoGF+y9ylTYaNRPk +aJGnaEZobJ4Lm5tSqW4gRX2ft6Hm67RkvVaopPFnlkvfusXUTFUqEVQCURRUqXbf +1ah2chUHxj22UdY9540H5yVNgEP3oR+uS/hbZqxKcJUTznUW5th3CyQPIKMlGlcB +p+zWlTECgYEAxlY4zGJw4QQwGYMKLyWCSHUgKYrKu2Ub2JKJFMTdsoj9H7DI+WHf +lQaO9NCOo2lt0ofYM1MzEpI5Cl/aMrPw+mwquBbxWdMHXK2eSsUQOVo9HtUjgK2t +J2AYFCfsYndo+hCj3ApMHgiY3sghTCXeycvT52bm11VeNVcs3pKxIYMCgYEA3dAJ +PwIfAB8t+6JCP2yYH4ExNjoMNYMdXqhz4vt3UGwgskRqTW6qdd9JvrRQ/JPvGpDy +T375h/+lLw0E4ljsnOPGSzbXNf4bYRHTwPOL+LqVM4Bg90hjclqphElHChxep1di +WcdArB0oae/l4M96z3GjfnXIUVOp8K6BUQCab1kCgYAFFAQUR5j4SfEpVg+WsXEq +hcUzCxixv5785pOX8opynctNWmtq5zSgTjCu2AAu8u4a69t/ROwT16aaO2YM0kqj +Ps3BNOUtFZgkqVVaOL13mnXiKjbkfo3majFzoqoMw13uuSpY4fKc+j9fxOQFXRrd +M9jTHfFfJhJpbzf44uyiHQKBgFIPwzvyVvG+l05/Ky83x9fv/frn4thxV45LmAQj +sHKqbjZFpWZcSOgu4aOSJlwrhsw3T84lVcAAzmXn1STAbVll01jEQz6QciSpacP6 +1pAAx240UqtptpD6BbkROxz8ffA/Hf3E/6Itb2QyAsP3PqI8kpYYkTG1WCvZA7Kq +HHiRAoGAXbUZ25LcrmyuxKWpbty8fck1tjKPvclQB35rOx6vgnfW6pcKMeebYvgq +nJka/QunEReOH/kGxAd/+ymvUBuFQCfFg3Aus+DtAuh9AkBr+cIyPjJqynnIT87J +MbkOw4uEhDJAtGUR9o1j83N1f05bnEwssXiXR0LZPylb9Qzc4tg= +-----END RSA PRIVATE KEY----- diff --git a/tests/br_tls/certificates/server.pem b/tests/br_tls/certificates/server.pem new file mode 100644 index 000000000..ea5ef2d5f --- /dev/null +++ b/tests/br_tls/certificates/server.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjzCCAnegAwIBAgIUWBTDQm4xOYDxZBTkpCQouREtT8QwDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEDAOBgNVBAgTB0JlaWppbmcxEDAOBgNVBAcTB0Jl +aWppbmcxEDAOBgNVBAoTB1BpbmdDQVAxEjAQBgNVBAMTCU15IG93biBDQTAgFw0y +MDAyMTgwOTExMDBaGA8yMTIwMDEyNTA5MTEwMFowFjEUMBIGA1UEAxMLdGlkYi1z +ZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCr2ZxAb+dItEQz +avuza0IoIT/UolC9XTGaQiCUUPZUMN9hb4KYEwTks1ZthHovTIJUdTwHtpWfDUWx +uIXhOlRjfD+viY4aXtBsaK8xi9F7o2HbFQ5O9y3AXK/YW+u0FfWtnn/xAtvPUgUc +61NXtBTMvNard9ICIXW+FWxLcFSaHpC9ZTyr13KWmZRDbai1JFeaKvATMW30r7Dd +Ur1npppzt7ZdG6tU/FuqBBSrZtuVSGKLwVx0JQDw16eVan4friY3ZPNVoZvkKyvt +I1LsBYMQ8p+ijtbcftvMqWZFVw95F1+3C2JIjWN9ujGmvJr+dtPIE8T/J8tT9Jif +9vz16nOLAgMBAAGjgZEwgY4wDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsG +AQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBRVB/Bvdzvh +6WQRWpc9SzcbXLz77zAfBgNVHSMEGDAWgBSdAhKsS8BKSOidoGCUYNeaFma4/zAP +BgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAAqg5pgGQqORKRSdlY +wzVvzKaulpvjZfVMM6YiOUtmlU0CGWq7E3gLFzkvebpU0KsFlbyZ92h/2Fw5Ay2b +kxkCy18mJ4lGkvF0cU4UD3XheFMvD2QWWRX4WPpAhStofrWOXeyq3Div2+fQjMJd +kyeWUzPU7T467IWUHOWNsFAjfVHNsmG45qLGt+XQckHTvASX5IvN+5tkRUCW30vO +b3BdDQUFglGTUFU2epaZGTti0SYiRiY+9R3zFWX4uBcEBYhk9e/0BU8FqdWW5GjI +pFpH9t64CjKIdRQXpIn4cogK/GwyuRuDPV/RkMjrIqOi7pGejXwyDe9avHFVR6re +oowA +-----END CERTIFICATE----- diff --git a/tests/br_tls/config/pd.toml b/tests/br_tls/config/pd.toml new file mode 100644 index 000000000..69cb94b6f --- /dev/null +++ b/tests/br_tls/config/pd.toml @@ -0,0 +1,9 @@ +# config of pd + +[security] +# Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty +cacert-path = "tests/br_tls/certificates/ca.pem" +# Path of file that contains X509 certificate in PEM format. +cert-path = "tests/br_tls/certificates/server.pem" +# Path of file that contains X509 key in PEM format. +key-path = "tests/br_tls/certificates/server-key.pem" diff --git a/tests/br_tls/config/tidb.toml b/tests/br_tls/config/tidb.toml new file mode 100644 index 000000000..48a783332 --- /dev/null +++ b/tests/br_tls/config/tidb.toml @@ -0,0 +1,14 @@ +# config of tidb + +# Schema lease duration +# There are lot of ddl in the tests, setting this +# to 360s to test whether BR is gracefully shutdown. +lease = "360s" + +[security] +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +cluster-ssl-ca = "tests/br_tls/certificates/ca.pem" +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +cluster-ssl-cert = "tests/br_tls/certificates/server.pem" +# Path of file that contains X509 key in PEM format for connection with cluster components. +cluster-ssl-key = "tests/br_tls/certificates/server-key.pem" \ No newline at end of file diff --git a/tests/br_tls/config/tikv.toml b/tests/br_tls/config/tikv.toml new file mode 100644 index 000000000..b4859a731 --- /dev/null +++ b/tests/br_tls/config/tikv.toml @@ -0,0 +1,19 @@ +# config of tikv + +[coprocessor] +region-max-keys = 20 +region-split-keys = 12 + +[rocksdb] +max-open-files = 4096 +[raftdb] +max-open-files = 4096 +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = false + +[security] +# set the path for certificates. Empty string means disabling secure connectoins. +ca-path = "tests/br_tls/certificates/ca.pem" +cert-path = "tests/br_tls/certificates/server.pem" +key-path = "tests/br_tls/certificates/server-key.pem" diff --git a/tests/br_tls/run.sh b/tests/br_tls/run.sh new file mode 100755 index 000000000..9c494b700 --- /dev/null +++ b/tests/br_tls/run.sh @@ -0,0 +1,67 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $cur/../_utils/run_services + +DB="$TEST_NAME" +TABLE="usertable1" +TABLE2="usertable2" + +echo "Restart cluster with tls" +start_services_withTLS "$cur" + +run_sql "DROP DATABASE IF EXISTS $DB;" +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.$TABLE( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.$TABLE VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.$TABLE VALUES (\"aa\", \"b\");" + +run_sql "CREATE TABLE $DB.$TABLE2( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.$TABLE2 VALUES (\"c\", \"d\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --ca $cur/certificates/ca.pem --cert $cur/certificates/client.pem --key $cur/certificates/client-key.pem + +run_sql "DROP DATABASE $DB;" + +# restore db +echo "restore start..." +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --ca $cur/certificates/ca.pem --cert $cur/certificates/client.pem --key $cur/certificates/client-key.pem + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "2" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" + +echo "Restart service without tls" +start_services diff --git a/tests/run.sh b/tests/run.sh index a4edb762a..053013352 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -14,85 +14,11 @@ # limitations under the License. set -eu +cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $cur/_utils/run_services -TEST_DIR=/tmp/backup_restore_test - -PD_ADDR="127.0.0.1:2379" -TIDB_IP="127.0.0.1" -TIDB_PORT="4000" -TIDB_ADDR="127.0.0.1:4000" -TIDB_STATUS_ADDR="127.0.0.1:10080" -# actaul tikv_addr are TIKV_ADDR${i} -TIKV_ADDR="127.0.0.1:2016" -TIKV_STATUS_ADDR="127.0.0.1:2018" -TIKV_COUNT=4 - -stop_services() { - killall -9 tikv-server || true - killall -9 pd-server || true - killall -9 tidb-server || true - - find "$TEST_DIR" -maxdepth 1 -not -path "$TEST_DIR" -not -name "*.log" | xargs rm -r || true -} - -start_services() { - stop_services - - mkdir -p "$TEST_DIR" - rm -f "$TEST_DIR"/*.log - - echo "Starting PD..." - bin/pd-server \ - --client-urls "http://$PD_ADDR" \ - --log-file "$TEST_DIR/pd.log" \ - --data-dir "$TEST_DIR/pd" & - # wait until PD is online... - while ! curl -o /dev/null -sf "http://$PD_ADDR/pd/api/v1/version"; do - sleep 1 - done - - echo "Starting TiKV..." - for i in $(seq $TIKV_COUNT); do - bin/tikv-server \ - --pd "$PD_ADDR" \ - -A "$TIKV_ADDR$i" \ - --status-addr "$TIKV_STATUS_ADDR$i" \ - --log-file "$TEST_DIR/tikv${i}.log" \ - -C "tests/config/tikv.toml" \ - -s "$TEST_DIR/tikv${i}" & - done - sleep 1 - - echo "Starting TiDB..." - bin/tidb-server \ - -P 4000 \ - --status 10080 \ - --store tikv \ - --path "$PD_ADDR" \ - --config "tests/config/tidb.toml" \ - --log-file "$TEST_DIR/tidb.log" & - - echo "Verifying TiDB is started..." - i=0 - while ! curl -o /dev/null -sf "http://$TIDB_IP:10080/status"; do - i=$((i+1)) - if [ "$i" -gt 10 ]; then - echo 'Failed to start TiDB' - exit 1 - fi - sleep 3 - done - - i=0 - while ! curl "http://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do - i=$((i+1)) - if [ "$i" -gt 10 ]; then - echo 'Failed to bootstrap cluster' - exit 1 - fi - sleep 3 - done -} +mkdir -p "$TEST_DIR" +rm -f "$TEST_DIR"/*.log trap stop_services EXIT start_services From e462f80577a2bf8b5adebab2c20cc26a17db19f8 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Fri, 28 Feb 2020 14:18:15 +0800 Subject: [PATCH 11/46] upgrade golangci and prepare for go 1.14 (#171) Signed-off-by: Neil Shen --- Makefile | 6 ++++++ go.mod | 9 ++++++--- go.sum | 17 +++++++++++++---- pkg/utils/tso.go | 1 + tools.json | 2 +- 5 files changed, 27 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 839a27b9e..a62c2db96 100644 --- a/Makefile +++ b/Makefile @@ -71,6 +71,12 @@ static: --disable interfacer \ --disable goimports \ --disable gofmt \ + --disable wsl \ + --disable funlen \ + --disable whitespace \ + --disable gocognit \ + --disable godox \ + --disable gomnd \ $$($(PACKAGE_DIRECTORIES)) lint: diff --git a/go.mod b/go.mod index 70ca858a0..6720e3602 100644 --- a/go.mod +++ b/go.mod @@ -14,9 +14,10 @@ require ( github.com/gogo/protobuf v1.3.1 github.com/google/btree v1.0.0 github.com/google/uuid v1.1.1 + github.com/klauspost/cpuid v1.2.0 // indirect github.com/mattn/go-runewidth v0.0.7 // indirect - github.com/onsi/ginkgo v1.10.3 // indirect - github.com/onsi/gomega v1.7.1 // indirect + github.com/onsi/ginkgo v1.11.0 // indirect + github.com/onsi/gomega v1.8.1 // indirect github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 github.com/pingcap/kvproto v0.0.0-20200217103621-528e82bf7248 @@ -29,7 +30,7 @@ require ( github.com/prometheus/client_golang v1.0.0 github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v0.0.5 - github.com/spf13/pflag v1.0.3 + github.com/spf13/pflag v1.0.5 github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 @@ -37,6 +38,8 @@ require ( go.uber.org/zap v1.13.0 golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/tools v0.0.0-20200226224502-204d844ad48d // indirect google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 + gopkg.in/yaml.v2 v2.2.7 // indirect ) diff --git a/go.sum b/go.sum index 7c8ae23e9..ca889933a 100644 --- a/go.sum +++ b/go.sum @@ -129,6 +129,7 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -213,6 +214,8 @@ github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= @@ -265,13 +268,13 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= @@ -380,6 +383,8 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -568,6 +573,8 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200206050830-dd0d5d485177 h1:E2vxBajJgSA3TcJhDGTh/kP3VnsvXKl9jSijv+h7svQ= golang.org/x/tools v0.0.0-20200206050830-dd0d5d485177/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200226224502-204d844ad48d h1:loGv/4fxITSrCD4t2P8ZF4oUC4RlRFDAsczcoUS2g6c= +golang.org/x/tools v0.0.0-20200226224502-204d844ad48d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -629,6 +636,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/utils/tso.go b/pkg/utils/tso.go index ec084b20d..6f9ca1aa4 100644 --- a/pkg/utils/tso.go +++ b/pkg/utils/tso.go @@ -37,6 +37,7 @@ func ResetTS(pdAddr string, ts uint64, tlsConf *tls.Config) error { if err != nil { return errors.Trace(err) } + defer resp.Body.Close() if resp.StatusCode != 200 && resp.StatusCode != 403 { buf := new(bytes.Buffer) _, err := buf.ReadFrom(resp.Body) diff --git a/tools.json b/tools.json index e3dd19414..2b41d4fce 100644 --- a/tools.json +++ b/tools.json @@ -18,7 +18,7 @@ }, { "Repository": "github.com/golangci/golangci-lint/cmd/golangci-lint", - "Commit": "901cf25e20f86b7e9dc6f73eaba5afbd0cbdc257" + "Commit": "b9eef79121fff235d0d794c176ffa2b3d9bd422f" } ], "RetoolVersion": "1.3.7" From 4657932d492d611cbd957399df16320afda0736e Mon Sep 17 00:00:00 2001 From: 3pointer Date: Wed, 4 Mar 2020 11:21:44 +0800 Subject: [PATCH 12/46] backup: add raw backup command (#101) * backup: add raw backup command --- Makefile | 2 + cmd/backup.go | 23 +++ go.mod | 1 + go.sum | 3 + pkg/backup/client.go | 99 ++++++------ pkg/conn/conn.go | 4 +- pkg/task/backup.go | 10 +- pkg/task/backup_raw.go | 142 +++++++++++++++++ pkg/utils/key.go | 70 +++++++++ pkg/utils/key_test.go | 32 ++++ tests/br_rawkv/client.go | 325 +++++++++++++++++++++++++++++++++++++++ tests/br_rawkv/run.sh | 52 +++++++ 12 files changed, 711 insertions(+), 52 deletions(-) create mode 100644 pkg/task/backup_raw.go create mode 100644 pkg/utils/key.go create mode 100644 pkg/utils/key_test.go create mode 100644 tests/br_rawkv/client.go create mode 100644 tests/br_rawkv/run.sh diff --git a/Makefile b/Makefile index a62c2db96..eea680b74 100644 --- a/Makefile +++ b/Makefile @@ -28,6 +28,8 @@ build_for_integration_test: GO111MODULE=on go build -race -o bin/locker tests/br_key_locked/*.go # build gc GO111MODULE=on go build -race -o bin/gc tests/br_z_gc_safepoint/*.go + # build rawkv client + GO111MODULE=on go build -race -o bin/rawkv tests/br_rawkv/*.go test: GO111MODULE=on go test -race -tags leak ./... diff --git a/cmd/backup.go b/cmd/backup.go index 8ae45270c..a0a6bcecb 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -18,6 +18,14 @@ func runBackupCommand(command *cobra.Command, cmdName string) error { return task.RunBackup(GetDefaultContext(), tidbGlue, cmdName, &cfg) } +func runBackupRawCommand(command *cobra.Command, cmdName string) error { + cfg := task.BackupRawConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + return err + } + return task.RunBackupRaw(GetDefaultContext(), tidbGlue, cmdName, &cfg) +} + // NewBackupCommand return a full backup subcommand. func NewBackupCommand() *cobra.Command { command := &cobra.Command{ @@ -43,6 +51,7 @@ func NewBackupCommand() *cobra.Command { newFullBackupCommand(), newDbBackupCommand(), newTableBackupCommand(), + newRawBackupCommand(), ) task.DefineBackupFlags(command.PersistentFlags()) @@ -87,3 +96,17 @@ func newTableBackupCommand() *cobra.Command { task.DefineTableFlags(command) return command } + +// newRawBackupCommand return a raw kv range backup subcommand. +func newRawBackupCommand() *cobra.Command { + command := &cobra.Command{ + Use: "raw", + Short: "backup a raw kv range from TiKV cluster", + RunE: func(command *cobra.Command, _ []string) error { + return runBackupRawCommand(command, "Raw backup") + }, + } + + task.DefineRawBackupFlags(command) + return command +} diff --git a/go.mod b/go.mod index 6720e3602..ebad44174 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( github.com/pingcap/tidb-tools v4.0.0-beta+incompatible github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 github.com/prometheus/client_golang v1.0.0 + github.com/prometheus/common v0.4.1 github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.5 diff --git a/go.sum b/go.sum index ca889933a..26526840e 100644 --- a/go.sum +++ b/go.sum @@ -26,7 +26,9 @@ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUW github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -614,6 +616,7 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 6d6eff033..fb2960962 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -11,7 +11,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/google/btree" "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/backup" + kvproto "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" @@ -36,7 +36,7 @@ import ( // ClientMgr manages connections needed by backup. type ClientMgr interface { - GetBackupClient(ctx context.Context, storeID uint64) (backup.BackupClient, error) + GetBackupClient(ctx context.Context, storeID uint64) (kvproto.BackupClient, error) GetPDClient() pd.Client GetTiKV() tikv.Storage GetLockResolver() *tikv.LockResolver @@ -53,9 +53,9 @@ type Client struct { mgr ClientMgr clusterID uint64 - backupMeta backup.BackupMeta + backupMeta kvproto.BackupMeta storage storage.ExternalStorage - backend *backup.StorageBackend + backend *kvproto.StorageBackend } // NewBackupClient returns a new backup client @@ -101,7 +101,7 @@ func (bc *Client) GetTS(ctx context.Context, duration time.Duration) (uint64, er } // SetStorage set ExternalStorage for client -func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) error { +func (bc *Client) SetStorage(ctx context.Context, backend *kvproto.StorageBackend, sendCreds bool) error { var err error bc.storage, err = storage.Create(ctx, backend, sendCreds) if err != nil { @@ -222,7 +222,7 @@ func BuildBackupRangeAndSchema( return nil, nil, errors.Trace(err) } - schema := backup.Schema{ + schema := kvproto.Schema{ Db: dbData, Table: tableData, } @@ -296,10 +296,7 @@ func GetBackupDDLJobs(dom *domain.Domain, lastBackupTS, backupTS uint64) ([]*mod func (bc *Client) BackupRanges( ctx context.Context, ranges []Range, - lastBackupTS uint64, - backupTS uint64, - rateLimit uint64, - concurrency uint32, + req kvproto.BackupRequest, updateCh chan<- struct{}, ) error { start := time.Now() @@ -313,8 +310,8 @@ func (bc *Client) BackupRanges( defer cancel() go func() { for _, r := range ranges { - err := bc.backupRange( - ctx, r.StartKey, r.EndKey, lastBackupTS, backupTS, rateLimit, concurrency, updateCh) + err := bc.BackupRange( + ctx, r.StartKey, r.EndKey, req, updateCh) if err != nil { errCh <- err return @@ -329,7 +326,7 @@ func (bc *Client) BackupRanges( finished := false for { - err := CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), backupTS) + err := CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), req.EndVersion) if err != nil { log.Error("check GC safepoint failed", zap.Error(err)) return err @@ -353,14 +350,11 @@ func (bc *Client) BackupRanges( } } -// backupRange make a backup of the given key range. -func (bc *Client) backupRange( +// BackupRange make a backup of the given key range. +func (bc *Client) BackupRange( ctx context.Context, startKey, endKey []byte, - lastBackupTS uint64, - backupTS uint64, - rateLimit uint64, - concurrency uint32, + req kvproto.BackupRequest, updateCh chan<- struct{}, ) (err error) { start := time.Now() @@ -377,8 +371,8 @@ func (bc *Client) backupRange( log.Info("backup started", zap.Binary("StartKey", startKey), zap.Binary("EndKey", endKey), - zap.Uint64("RateLimit", rateLimit), - zap.Uint32("Concurrency", concurrency)) + zap.Uint64("RateLimit", req.RateLimit), + zap.Uint32("Concurrency", req.Concurrency)) ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -388,16 +382,11 @@ func (bc *Client) backupRange( return errors.Trace(err) } - req := backup.BackupRequest{ - ClusterId: bc.clusterID, - StartKey: startKey, - EndKey: endKey, - StartVersion: lastBackupTS, - EndVersion: backupTS, - StorageBackend: bc.backend, - RateLimit: rateLimit, - Concurrency: concurrency, - } + req.ClusterId = bc.clusterID + req.StartKey = startKey + req.EndKey = endKey + req.StorageBackend = bc.backend + push := newPushDown(ctx, bc.mgr, len(allStores)) var results RangeTree @@ -410,17 +399,27 @@ func (bc *Client) backupRange( // Find and backup remaining ranges. // TODO: test fine grained backup. err = bc.fineGrainedBackup( - ctx, startKey, endKey, lastBackupTS, - backupTS, rateLimit, concurrency, results, updateCh) + ctx, startKey, endKey, req.StartVersion, + req.EndVersion, req.RateLimit, req.Concurrency, results, updateCh) if err != nil { return err } - bc.backupMeta.StartVersion = lastBackupTS - bc.backupMeta.EndVersion = backupTS - log.Info("backup time range", - zap.Reflect("StartVersion", lastBackupTS), - zap.Reflect("EndVersion", backupTS)) + bc.backupMeta.StartVersion = req.StartVersion + bc.backupMeta.EndVersion = req.EndVersion + bc.backupMeta.IsRawKv = req.IsRawKv + if req.IsRawKv { + bc.backupMeta.RawRanges = append(bc.backupMeta.RawRanges, + &kvproto.RawRange{StartKey: startKey, EndKey: endKey, Cf: req.Cf}) + log.Info("backup raw ranges", + zap.ByteString("startKey", startKey), + zap.ByteString("endKey", endKey), + zap.String("cf", req.Cf)) + } else { + log.Info("backup time range", + zap.Reflect("StartVersion", req.StartVersion), + zap.Reflect("EndVersion", req.EndVersion)) + } results.tree.Ascend(func(i btree.Item) bool { r := i.(*Range) @@ -479,7 +478,7 @@ func (bc *Client) fineGrainedBackup( } log.Info("start fine grained backup", zap.Int("incomplete", len(incomplete))) // Step2, retry backup on incomplete range - respCh := make(chan *backup.BackupResponse, 4) + respCh := make(chan *kvproto.BackupResponse, 4) errCh := make(chan error, 4) retry := make(chan Range, 4) @@ -566,15 +565,15 @@ func onBackupResponse( bo *tikv.Backoffer, backupTS uint64, lockResolver *tikv.LockResolver, - resp *backup.BackupResponse, -) (*backup.BackupResponse, int, error) { + resp *kvproto.BackupResponse, +) (*kvproto.BackupResponse, int, error) { log.Debug("onBackupResponse", zap.Reflect("resp", resp)) if resp.Error == nil { return resp, 0, nil } backoffMs := 0 switch v := resp.Error.Detail.(type) { - case *backup.Error_KvError: + case *kvproto.Error_KvError: if lockErr := v.KvError.Locked; lockErr != nil { // Try to resolve lock. log.Warn("backup occur kv error", zap.Reflect("error", v)) @@ -592,7 +591,7 @@ func onBackupResponse( log.Error("unexpect kv error", zap.Reflect("KvError", v.KvError)) return nil, backoffMs, errors.Errorf("onBackupResponse error %v", v) - case *backup.Error_RegionError: + case *kvproto.Error_RegionError: regionErr := v.RegionError // Ignore following errors. if !(regionErr.EpochNotMatch != nil || @@ -610,7 +609,7 @@ func onBackupResponse( // TODO: a better backoff. backoffMs = 1000 /* 1s */ return nil, backoffMs, nil - case *backup.Error_ClusterIdError: + case *kvproto.Error_ClusterIdError: log.Error("backup occur cluster ID error", zap.Reflect("error", v)) err := errors.Errorf("%v", resp.Error) @@ -631,7 +630,7 @@ func (bc *Client) handleFineGrained( backupTS uint64, rateLimit uint64, concurrency uint32, - respCh chan<- *backup.BackupResponse, + respCh chan<- *kvproto.BackupResponse, ) (int, error) { leader, pderr := bc.findRegionLeader(ctx, rg.StartKey) if pderr != nil { @@ -640,7 +639,7 @@ func (bc *Client) handleFineGrained( storeID := leader.GetStoreId() max := 0 - req := backup.BackupRequest{ + req := kvproto.BackupRequest{ ClusterId: bc.clusterID, StartKey: rg.StartKey, // TODO: the range may cross region. EndKey: rg.EndKey, @@ -659,7 +658,7 @@ func (bc *Client) handleFineGrained( err = SendBackup( ctx, storeID, client, req, // Handle responses with the same backoffer. - func(resp *backup.BackupResponse) error { + func(resp *kvproto.BackupResponse) error { response, backoffMs, err1 := onBackupResponse(bo, backupTS, lockResolver, resp) if err1 != nil { @@ -684,9 +683,9 @@ func (bc *Client) handleFineGrained( func SendBackup( ctx context.Context, storeID uint64, - client backup.BackupClient, - req backup.BackupRequest, - respFn func(*backup.BackupResponse) error, + client kvproto.BackupClient, + req kvproto.BackupRequest, + respFn func(*kvproto.BackupResponse) error, ) error { log.Info("try backup", zap.Any("backup request", req)) ctx, cancel := context.WithCancel(ctx) diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index d1f7858f6..6869c1199 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -379,7 +379,9 @@ func (mgr *Mgr) Close() { // Gracefully shutdown domain so it does not affect other TiDB DDL. // Must close domain before closing storage, otherwise it gets stuck forever. - mgr.dom.Close() + if mgr.dom != nil { + mgr.dom.Close() + } atomic.StoreUint32(&tikv.ShuttingDown, 1) mgr.storage.Close() diff --git a/pkg/task/backup.go b/pkg/task/backup.go index 31594a08f..2d9468394 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -5,6 +5,7 @@ import ( "time" "github.com/pingcap/errors" + kvproto "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/log" "github.com/pingcap/parser/model" "github.com/pingcap/tidb-tools/pkg/filter" @@ -131,8 +132,15 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig // Redirect to log if there is no log file to avoid unreadable output. updateCh := utils.StartProgress( ctx, cmdName, int64(approximateRegions), !cfg.LogProgress) + + req := kvproto.BackupRequest{ + StartVersion: cfg.LastBackupTS, + EndVersion: backupTS, + RateLimit: cfg.RateLimit, + Concurrency: cfg.Concurrency, + } err = client.BackupRanges( - ctx, ranges, cfg.LastBackupTS, backupTS, cfg.RateLimit, cfg.Concurrency, updateCh) + ctx, ranges, req, updateCh) if err != nil { return err } diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go new file mode 100644 index 000000000..51d5267a5 --- /dev/null +++ b/pkg/task/backup_raw.go @@ -0,0 +1,142 @@ +package task + +import ( + "bytes" + "context" + + "github.com/pingcap/errors" + kvproto "github.com/pingcap/kvproto/pkg/backup" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +const ( + flagKeyFormat = "format" + flagTiKVColumnFamily = "cf" + flagStartKey = "start" + flagEndKey = "end" +) + +// BackupRawConfig is the configuration specific for backup tasks. +type BackupRawConfig struct { + Config + + StartKey []byte `json:"start-key" toml:"start-key"` + EndKey []byte `json:"end-key" toml:"end-key"` + CF string `json:"cf" toml:"cf"` +} + +// DefineRawBackupFlags defines common flags for the backup command. +func DefineRawBackupFlags(command *cobra.Command) { + command.Flags().StringP(flagKeyFormat, "", "hex", "start/end key format, support raw|escaped|hex") + command.Flags().StringP(flagTiKVColumnFamily, "", "default", "backup specify cf, correspond to tikv cf") + command.Flags().StringP(flagStartKey, "", "", "backup raw kv start key, key is inclusive") + command.Flags().StringP(flagEndKey, "", "", "backup raw kv end key, key is exclusive") +} + +// ParseFromFlags parses the backup-related flags from the flag set. +func (cfg *BackupRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { + format, err := flags.GetString(flagKeyFormat) + if err != nil { + return err + } + start, err := flags.GetString(flagStartKey) + if err != nil { + return err + } + cfg.StartKey, err = utils.ParseKey(format, start) + if err != nil { + return err + } + end, err := flags.GetString(flagEndKey) + if err != nil { + return err + } + cfg.EndKey, err = utils.ParseKey(format, end) + if err != nil { + return err + } + + if bytes.Compare(cfg.StartKey, cfg.EndKey) >= 0 { + return errors.New("endKey must be greater than startKey") + } + + cfg.CF, err = flags.GetString(flagTiKVColumnFamily) + if err != nil { + return err + } + if err = cfg.Config.ParseFromFlags(flags); err != nil { + return errors.Trace(err) + } + return nil +} + +// RunBackupRaw starts a backup task inside the current goroutine. +func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *BackupRawConfig) error { + ctx, cancel := context.WithCancel(c) + defer cancel() + + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return err + } + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS) + if err != nil { + return err + } + defer mgr.Close() + + client, err := backup.NewBackupClient(ctx, mgr) + if err != nil { + return err + } + if err = client.SetStorage(ctx, u, cfg.SendCreds); err != nil { + return err + } + + defer summary.Summary(cmdName) + + backupRange := backup.Range{StartKey: cfg.StartKey, EndKey: cfg.EndKey} + + // The number of regions need to backup + approximateRegions, err := mgr.GetRegionCount(ctx, backupRange.StartKey, backupRange.EndKey) + if err != nil { + return err + } + + summary.CollectInt("backup total regions", approximateRegions) + + // Backup + // Redirect to log if there is no log file to avoid unreadable output. + updateCh := utils.StartProgress( + ctx, cmdName, int64(approximateRegions), !cfg.LogProgress) + + req := kvproto.BackupRequest{ + StartVersion: 0, + EndVersion: 0, + RateLimit: cfg.RateLimit, + Concurrency: cfg.Concurrency, + IsRawKv: true, + Cf: cfg.CF, + } + + err = client.BackupRange(ctx, backupRange.StartKey, backupRange.EndKey, req, updateCh) + if err != nil { + return err + } + // Backup has finished + close(updateCh) + + // Checksum + err = client.SaveBackupMeta(ctx, nil) + if err != nil { + return err + } + return nil +} diff --git a/pkg/utils/key.go b/pkg/utils/key.go new file mode 100644 index 000000000..8ed1109b0 --- /dev/null +++ b/pkg/utils/key.go @@ -0,0 +1,70 @@ +package utils + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "strings" + + "github.com/pingcap/errors" +) + +// ParseKey parse key by given format +func ParseKey(format, key string) ([]byte, error) { + switch format { + case "raw": + return []byte(key), nil + case "escaped": + return unescapedKey(key) + case "hex": + key, err := hex.DecodeString(key) + if err != nil { + return nil, errors.WithStack(err) + } + return key, nil + } + return nil, errors.New("unknown format") +} + +// Ref PD: https://github.com/pingcap/pd/blob/master/tools/pd-ctl/pdctl/command/region_command.go#L334 +func unescapedKey(text string) ([]byte, error) { + var buf []byte + r := bytes.NewBuffer([]byte(text)) + for { + c, err := r.ReadByte() + if err != nil { + if err != io.EOF { + return nil, errors.WithStack(err) + } + break + } + if c != '\\' { + buf = append(buf, c) + continue + } + n := r.Next(1) + if len(n) == 0 { + return nil, io.EOF + } + // See: https://golang.org/ref/spec#Rune_literals + if idx := strings.IndexByte(`abfnrtv\'"`, n[0]); idx != -1 { + buf = append(buf, []byte("\a\b\f\n\r\t\v\\'\"")[idx]) + continue + } + + switch n[0] { + case 'x': + fmt.Sscanf(string(r.Next(2)), "%02x", &c) + buf = append(buf, c) + default: + n = append(n, r.Next(2)...) + _, err := fmt.Sscanf(string(n), "%03o", &c) + if err != nil { + return nil, errors.WithStack(err) + } + buf = append(buf, c) + } + } + return buf, nil +} diff --git a/pkg/utils/key_test.go b/pkg/utils/key_test.go new file mode 100644 index 000000000..092962135 --- /dev/null +++ b/pkg/utils/key_test.go @@ -0,0 +1,32 @@ +package utils + +import ( + "encoding/hex" + + . "github.com/pingcap/check" +) + +type testKeySuite struct{} + +var _ = Suite(&testKeySuite{}) + +func (r *testKeySuite) TestParseKey(c *C) { + rawKey := "1234" + parsedKey, err := ParseKey("raw", rawKey) + c.Assert(err, IsNil) + c.Assert(parsedKey, BytesEquals, []byte(rawKey)) + + escapedKey := "\\a\\x1" + parsedKey, err = ParseKey("escaped", escapedKey) + c.Assert(err, IsNil) + c.Assert(parsedKey, BytesEquals, []byte("\a\x01")) + + hexKey := hex.EncodeToString([]byte("1234")) + parsedKey, err = ParseKey("hex", hexKey) + c.Assert(err, IsNil) + c.Assert(parsedKey, BytesEquals, []byte("1234")) + + _, err = ParseKey("notSupport", rawKey) + c.Assert(err, ErrorMatches, "*unknown format*") + +} diff --git a/tests/br_rawkv/client.go b/tests/br_rawkv/client.go new file mode 100644 index 000000000..bd13839f6 --- /dev/null +++ b/tests/br_rawkv/client.go @@ -0,0 +1,325 @@ +package main + +import ( + "bytes" + "encoding/hex" + "flag" + "fmt" + "hash/crc64" + "math/rand" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/store/tikv" + "github.com/prometheus/common/log" +) + +var ( + pdAddr = flag.String("pd", "127.0.0.1:2379", "Address of PD") + runMode = flag.String("mode", "", "Mode. One of 'rand-gen', 'checksum', 'scan' and 'diff'") + startKeyStr = flag.String("start-key", "", "Start key in hex") + endKeyStr = flag.String("end-key", "", "End key in hex") + keyMaxLen = flag.Int("key-max-len", 32, "Max length of keys for rand-gen mode") + concurrency = flag.Int("concurrency", 32, "Concurrency to run rand-gen") + duration = flag.Int("duration", 10, "duration(second) of rand-gen") +) + +func createClient(addr string) (*tikv.RawKVClient, error) { + cli, err := tikv.NewRawKVClient([]string{addr}, config.Security{}) + return cli, err +} + +func main() { + flag.Parse() + + startKey, err := hex.DecodeString(*startKeyStr) + if err != nil { + log.Fatalf("Invalid startKey: %v, err: %+v", startKeyStr, err) + } + endKey, err := hex.DecodeString(*endKeyStr) + if err != nil { + log.Fatalf("Invalid endKey: %v, err: %+v", endKeyStr, err) + } + if len(endKey) == 0 { + log.Fatal("Empty endKey is not supported yet") + } + + if *runMode == "test-rand-key" { + testRandKey(startKey, endKey, *keyMaxLen) + return + } + + client, err := createClient(*pdAddr) + if err != nil { + log.Fatalf("Failed to create client to %v, err: %+v", *pdAddr, err) + } + + switch *runMode { + case "rand-gen": + err = randGenWithDuration(client, startKey, endKey, *keyMaxLen, *concurrency, *duration) + case "checksum": + err = checksum(client, startKey, endKey) + case "scan": + err = scan(client, startKey, endKey) + case "delete": + err = deleteRange(client, startKey, endKey) + } + + if err != nil { + log.Fatalf("Error: %+v", err) + } +} + +func randGenWithDuration(client *tikv.RawKVClient, startKey, endKey []byte, + maxLen int, concurrency int, duration int) error { + var err error + ok := make(chan struct{}) + go func() { + err = randGen(client, startKey, endKey, maxLen, concurrency) + ok <- struct{}{} + }() + select { + case <-time.After(time.Second * time.Duration(duration)): + case <-ok: + } + return err +} + +func randGen(client *tikv.RawKVClient, startKey, endKey []byte, maxLen int, concurrency int) error { + log.Infof("Start rand-gen from %v to %v, maxLen %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey), maxLen) + log.Infof("Rand-gen will keep running. Please Ctrl+C to stop manually.") + + // Cannot generate shorter key than commonPrefix + commonPrefixLen := 0 + for ; commonPrefixLen < len(startKey) && commonPrefixLen < len(endKey) && + startKey[commonPrefixLen] == endKey[commonPrefixLen]; commonPrefixLen++ { + continue + } + + if maxLen < commonPrefixLen { + return errors.Errorf("maxLen (%v) < commonPrefixLen (%v)", maxLen, commonPrefixLen) + } + + const batchSize = 32 + + errCh := make(chan error, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for { + keys := make([][]byte, 0, batchSize) + values := make([][]byte, 0, batchSize) + + for i := 0; i < batchSize; i++ { + key := randKey(startKey, endKey, maxLen) + keys = append(keys, key) + value := randValue() + values = append(values, value) + } + + err := client.BatchPut(keys, values) + if err != nil { + errCh <- errors.Trace(err) + } + } + }() + } + + err := <-errCh + if err != nil { + return errors.Trace(err) + } + + return nil +} + +func testRandKey(startKey, endKey []byte, maxLen int) { + for { + k := randKey(startKey, endKey, maxLen) + if bytes.Compare(k, startKey) < 0 || bytes.Compare(k, endKey) >= 0 { + panic(hex.EncodeToString(k)) + } + } +} + +func randKey(startKey, endKey []byte, maxLen int) []byte { +Retry: + for { // Regenerate on fail + result := make([]byte, 0, maxLen) + + upperUnbounded := false + lowerUnbounded := false + + for i := 0; i < maxLen; i++ { + upperBound := 256 + if !upperUnbounded { + if i >= len(endKey) { + // The generated key is the same as endKey which is invalid. Regenerate it. + continue Retry + } + upperBound = int(endKey[i]) + 1 + } + + lowerBound := 0 + if !lowerUnbounded { + if i >= len(startKey) { + lowerUnbounded = true + } else { + lowerBound = int(startKey[i]) + } + } + + if lowerUnbounded { + if rand.Intn(257) == 0 { + return result + } + } + + value := rand.Intn(upperBound - lowerBound) + value += lowerBound + + if value < upperBound-1 { + upperUnbounded = true + } + if value > lowerBound { + lowerUnbounded = true + } + + result = append(result, uint8(value)) + } + + return result + } +} + +func randValue() []byte { + result := make([]byte, 0, 512) + for i := 0; i < 512; i++ { + value := rand.Intn(257) + if value == 256 { + if i > 0 { + return result + } + value-- + } + result = append(result, uint8(value)) + } + return result +} + +func checksum(client *tikv.RawKVClient, startKey, endKey []byte) error { + log.Infof("Start checkcum on range %v to %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + + scanner := newRawKVScanner(client, startKey, endKey) + digest := crc64.New(crc64.MakeTable(crc64.ECMA)) + + var res uint64 + + for { + k, v, err := scanner.Next() + if err != nil { + return errors.Trace(err) + } + if len(k) == 0 { + break + } + _, _ = digest.Write(k) + _, _ = digest.Write(v) + res ^= digest.Sum64() + } + + fmt.Printf("Checksum result: %016x\n", res) + return nil +} + +func deleteRange(client *tikv.RawKVClient, startKey, endKey []byte) error { + log.Infof("Start delete data in range %v to %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + return client.DeleteRange(startKey, endKey) +} + +func scan(client *tikv.RawKVClient, startKey, endKey []byte) error { + log.Infof("Start scanning data in range %v to %v", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + + scanner := newRawKVScanner(client, startKey, endKey) + + var key []byte + for { + k, v, err := scanner.Next() + if err != nil { + return errors.Trace(err) + } + if len(k) == 0 { + break + } + fmt.Printf("key: %v, value: %v\n", hex.EncodeToString(k), hex.EncodeToString(v)) + if bytes.Compare(key, k) >= 0 { + log.Errorf("Scan result is not in order. "+ + "Previous key: %v, Current key: %v", + hex.EncodeToString(key), hex.EncodeToString(k)) + } + } + + log.Infof("Finished Scanning.") + return nil +} + +const defaultScanBatchSize = 128 + +type rawKVScanner struct { + client *tikv.RawKVClient + batchSize int + + currentKey []byte + endKey []byte + + bufferKeys [][]byte + bufferValues [][]byte + bufferCursor int + noMore bool +} + +func newRawKVScanner(client *tikv.RawKVClient, startKey, endKey []byte) *rawKVScanner { + return &rawKVScanner{ + client: client, + batchSize: defaultScanBatchSize, + + currentKey: startKey, + endKey: endKey, + + noMore: false, + } +} + +func (s *rawKVScanner) Next() ([]byte, []byte, error) { + if s.bufferCursor >= len(s.bufferKeys) { + if s.noMore { + return nil, nil, nil + } + + s.bufferCursor = 0 + + batchSize := s.batchSize + var err error + s.bufferKeys, s.bufferValues, err = s.client.Scan(s.currentKey, s.endKey, batchSize) + if err != nil { + return nil, nil, errors.Trace(err) + } + + if len(s.bufferKeys) < batchSize { + s.noMore = true + } + + if len(s.bufferKeys) == 0 { + return nil, nil, nil + } + + bufferKey := s.bufferKeys[len(s.bufferKeys)-1] + bufferKey = append(bufferKey, 0) + s.currentKey = bufferKey + } + + key := s.bufferKeys[s.bufferCursor] + value := s.bufferValues[s.bufferCursor] + s.bufferCursor++ + return key, value, nil +} diff --git a/tests/br_rawkv/run.sh b/tests/br_rawkv/run.sh new file mode 100644 index 000000000..a3f62311f --- /dev/null +++ b/tests/br_rawkv/run.sh @@ -0,0 +1,52 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +BACKUP_DIR="raw_backup" + +# generate raw kv randomly in range[start-key, end-key) in 10s +bin/rawkv --pd $PD_ADDR --mode rand-gen --start-key 31 --end-key 3130303030303030 --duration 10 + +# output checksum +bin/rawkv --pd $PD_ADDR --mode checksum --start-key 31 --end-key 3130303030303030 > /$TEST_DIR/checksum.out + +checksum_ori=$(cat /$TEST_DIR/checksum.out | grep result | awk '{print $3}') + +# backup rawkv +echo "backup start..." +run_br --pd $PD_ADDR backup raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4 + +# delete data in range[start-key, end-key) +bin/rawkv --pd $PD_ADDR --mode delete --start-key 31 --end-key 3130303030303030 + +# TODO: Finish check after restore ready +# restore rawkv +# echo "restore start..." +# run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4 + +# output checksum after restore +# bin/rawkv --pd $PD_ADDR --mode checksum --start-key 31 --end-key 3130303030303030 > /$TEST_DIR/checksum.out + +checksum_new=$(cat /$TEST_DIR/checksum.out | grep result | awk '{print $3}') + +if [ "$checksum_ori" == "$checksum_new" ];then + echo "TEST: [$TEST_NAME] successed!" +else + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + + From 0a1a044764b7f9acfd2a32363388836415f21d36 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Thu, 5 Mar 2020 11:50:28 +0800 Subject: [PATCH 13/46] restore: speed up retry on not leader (#179) * tests: stable cluster start up Signed-off-by: Neil Shen * tests: fix unbound var Signed-off-by: Neil Shen * restore: speed retry on not leader Signed-off-by: Neil Shen * address comments Signed-off-by: Neil Shen * tests: add --cacert flag Signed-off-by: Neil Shen * make codecov green Signed-off-by: Neil Shen * address comments Signed-off-by: Neil Shen --- .codecov.yml | 10 +++ pkg/restore/backoff.go | 3 +- pkg/restore/import.go | 108 +++++++++++++++++++------------- tests/_utils/run_services | 36 +++++++++-- tests/br_table_partition/run.sh | 12 ++-- 5 files changed, 112 insertions(+), 57 deletions(-) create mode 100644 .codecov.yml diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 000000000..674895cd1 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,10 @@ +codecov: + require_ci_to_pass: yes + +coverage: + status: + project: + default: + # Allow the coverage to drop by 3% + threshold: 3% + patch: off diff --git a/pkg/restore/backoff.go b/pkg/restore/backoff.go index dae14e109..44a493138 100644 --- a/pkg/restore/backoff.go +++ b/pkg/restore/backoff.go @@ -11,7 +11,6 @@ import ( ) var ( - errNotLeader = errors.NewNoStackError("not leader") errEpochNotMatch = errors.NewNoStackError("epoch not match") errKeyNotInRegion = errors.NewNoStackError("key not in region") errRegionNotFound = errors.NewNoStackError("region not found") @@ -66,7 +65,7 @@ func newDownloadSSTBackoffer() utils.Backoffer { func (bo *importerBackoffer) NextBackoff(err error) time.Duration { switch errors.Cause(err) { - case errResp, errGrpc, errEpochNotMatch, errNotLeader: + case errResp, errGrpc, errEpochNotMatch: bo.delayTime = 2 * bo.delayTime bo.attempt-- case errRangeIsEmpty, errRewriteRuleNotFound: diff --git a/pkg/restore/import.go b/pkg/restore/import.go index 887ee3e88..c1e1b5dd8 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -171,9 +171,9 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul ctx, cancel := context.WithTimeout(importer.ctx, importScanRegionTime) defer cancel() // Scan regions covered by the file range - regionInfos, err1 := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) - if err1 != nil { - return errors.Trace(err1) + regionInfos, errScanRegion := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) + if errScanRegion != nil { + return errors.Trace(errScanRegion) } log.Debug("scan regions", zap.Stringer("file", file), zap.Int("count", len(regionInfos))) // Try to download and ingest the file in every region @@ -181,13 +181,13 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul info := regionInfo // Try to download file. var downloadMeta *import_sstpb.SSTMeta - err1 = utils.WithRetry(importer.ctx, func() error { + errDownload := utils.WithRetry(importer.ctx, func() error { var e error downloadMeta, e = importer.downloadSST(info, file, rewriteRules) return e }, newDownloadSSTBackoffer()) - if err1 != nil { - if err1 == errRewriteRuleNotFound || err1 == errRangeIsEmpty { + if errDownload != nil { + if errDownload == errRewriteRuleNotFound || errDownload == errRangeIsEmpty { // Skip this region continue } @@ -196,32 +196,68 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul zap.Stringer("region", info.Region), zap.Binary("startKey", startKey), zap.Binary("endKey", endKey), - zap.Error(err1)) - return err1 + zap.Error(errDownload)) + return errDownload } - err1 = importer.ingestSST(downloadMeta, info) - // If error is `NotLeader`, update the region info and retry - for errors.Cause(err1) == errNotLeader { - log.Debug("ingest sst returns not leader error, retry it", - zap.Stringer("region", info.Region)) - var newInfo *RegionInfo - newInfo, err1 = importer.metaClient.GetRegion(importer.ctx, info.Region.GetStartKey()) - if err1 != nil { - break + ingestResp, errIngest := importer.ingestSST(downloadMeta, info) + ingestRetry: + for errIngest == nil { + errPb := ingestResp.GetError() + if errPb == nil { + // Ingest success + break ingestRetry } - if !checkRegionEpoch(newInfo, info) { - err1 = errEpochNotMatch - break + switch { + case errPb.NotLeader != nil: + // If error is `NotLeader`, update the region info and retry + var newInfo *RegionInfo + if newLeader := errPb.GetNotLeader().GetLeader(); newLeader != nil { + newInfo = &RegionInfo{ + Leader: newLeader, + Region: info.Region, + } + } else { + // Slow path, get region from PD + newInfo, errIngest = importer.metaClient.GetRegion( + importer.ctx, info.Region.GetStartKey()) + if errIngest != nil { + break ingestRetry + } + } + log.Debug("ingest sst returns not leader error, retry it", + zap.Stringer("region", info.Region), + zap.Stringer("newLeader", newInfo.Leader)) + + if !checkRegionEpoch(newInfo, info) { + errIngest = errors.AddStack(errEpochNotMatch) + break ingestRetry + } + ingestResp, errIngest = importer.ingestSST(downloadMeta, newInfo) + case errPb.EpochNotMatch != nil: + // TODO handle epoch not match error + // 1. retry download if needed + // 2. retry ingest + errIngest = errors.AddStack(errEpochNotMatch) + break ingestRetry + case errPb.RegionNotFound != nil: + errIngest = errors.AddStack(errRegionNotFound) + break ingestRetry + case errPb.KeyNotInRegion != nil: + errIngest = errors.AddStack(errKeyNotInRegion) + break ingestRetry + default: + errIngest = errors.Errorf("ingest error %s", errPb) + break ingestRetry } - err1 = importer.ingestSST(downloadMeta, newInfo) } - if err1 != nil { + + if errIngest != nil { log.Error("ingest file failed", zap.Stringer("file", file), zap.Stringer("range", downloadMeta.GetRange()), zap.Stringer("region", info.Region), - zap.Error(err1)) - return err1 + zap.Error(errIngest)) + return errIngest } summary.CollectSuccessUnit(summary.TotalKV, file.TotalKvs) summary.CollectSuccessUnit(summary.TotalBytes, file.TotalBytes) @@ -290,7 +326,7 @@ func (importer *FileImporter) downloadSST( func (importer *FileImporter) ingestSST( sstMeta *import_sstpb.SSTMeta, regionInfo *RegionInfo, -) error { +) (*import_sstpb.IngestResponse, error) { leader := regionInfo.Leader if leader == nil { leader = regionInfo.Region.GetPeers()[0] @@ -304,26 +340,12 @@ func (importer *FileImporter) ingestSST( Context: reqCtx, Sst: sstMeta, } - log.Debug("download SST", zap.Stringer("sstMeta", sstMeta)) + log.Debug("ingest SST", zap.Stringer("sstMeta", sstMeta), zap.Reflect("leader", leader)) resp, err := importer.importClient.IngestSST(importer.ctx, leader.GetStoreId(), req) if err != nil { - if strings.Contains(err.Error(), "RegionNotFound") { - return errors.Trace(errRegionNotFound) - } - return errors.Trace(err) - } - respErr := resp.GetError() - if respErr != nil { - log.Debug("ingest sst resp error", zap.Stringer("error", respErr)) - if respErr.GetKeyNotInRegion() != nil { - return errors.Trace(errKeyNotInRegion) - } - if respErr.GetNotLeader() != nil { - return errors.Trace(errNotLeader) - } - return errors.Wrap(errResp, respErr.String()) + return nil, errors.Trace(err) } - return nil + return resp, nil } func checkRegionEpoch(new, old *RegionInfo) bool { @@ -347,5 +369,5 @@ func extractDownloadSSTError(e error) error { case strings.Contains(e.Error(), "Cannot read"): err = errCannotRead } - return errors.Trace(err) + return errors.Annotatef(err, "%s", e) } diff --git a/tests/_utils/run_services b/tests/_utils/run_services index 9ae0999bd..1118d7ccc 100644 --- a/tests/_utils/run_services +++ b/tests/_utils/run_services @@ -39,6 +39,7 @@ start_services() { stop_services echo "Starting PD..." + mkdir -p "$TEST_DIR/pd" bin/pd-server \ --client-urls "http://$PD_ADDR" \ --log-file "$TEST_DIR/pd.log" \ @@ -56,6 +57,7 @@ start_services() { echo "Starting TiKV..." for i in $(seq $TIKV_COUNT); do + mkdir -p "$TEST_DIR/tikv${i}" bin/tikv-server \ --pd "$PD_ADDR" \ -A "$TIKV_ADDR$i" \ @@ -64,7 +66,16 @@ start_services() { -C "tests/config/tikv.toml" \ -s "$TEST_DIR/tikv${i}" & done - sleep 1 + + echo "Waiting initializing TiKV..." + while ! curl -sf "http://$PD_ADDR/pd/api/v1/cluster/status" | grep '"is_initialized": true'; do + i=$((i+1)) + if [ "$i" -gt 10 ]; then + echo 'Failed to initialize TiKV cluster' + exit 1 + fi + sleep 3 + done echo "Starting TiDB..." bin/tidb-server \ @@ -116,7 +127,8 @@ start_services_withTLS() { --data-dir "$TEST_DIR/pd" & # wait until PD is online... i=0 - while ! curl -k --cert $1/certificates/client.pem \ + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ --key $1/certificates/client-key.pem \ -o /dev/null -sf "https://$PD_ADDR/pd/api/v1/version"; do i=$((i+1)) @@ -136,7 +148,19 @@ start_services_withTLS() { -C "$TIKV_CONFIG" \ -s "$TEST_DIR/tikv${i}" & done - sleep 1 + + echo "Waiting initializing TiKV..." + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ + --key $1/certificates/client-key.pem \ + -sf "https://$PD_ADDR/pd/api/v1/cluster/status" | grep '"is_initialized": true'; do + i=$((i+1)) + if [ "$i" -gt 10 ]; then + echo 'Failed to initialize TiKV cluster' + exit 1 + fi + sleep 3 + done echo "Starting TiDB..." bin/tidb-server \ @@ -149,7 +173,8 @@ start_services_withTLS() { echo "Verifying TiDB is started..." i=0 - while ! curl -k --cert $1/certificates/client.pem \ + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ --key $1/certificates/client-key.pem \ -o /dev/null -sf "https://$TIDB_IP:10080/status"; do i=$((i+1)) @@ -161,7 +186,8 @@ start_services_withTLS() { done i=0 - while ! curl -k --cert $1/certificates/client.pem \ + while ! curl --cacert $1/certificates/ca.pem \ + --cert $1/certificates/client.pem \ --key $1/certificates/client-key.pem \ "https://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do i=$((i+1)) diff --git a/tests/br_table_partition/run.sh b/tests/br_table_partition/run.sh index fe0ce874b..ce7fe1df1 100755 --- a/tests/br_table_partition/run.sh +++ b/tests/br_table_partition/run.sh @@ -30,25 +30,23 @@ done echo "backup start..." run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 -for i in $(seq $DB_COUNT); do - run_sql "DROP DATABASE $DB${i};" -done +run_sql "DROP DATABASE $DB;" # restore full echo "restore start..." run_br restore full -s "local://$TEST_DIR/$DB" --pd $PD_ADDR -for i in $(seq $DB_COUNT); do +for i in $(seq $TABLE_COUNT); do row_count_new[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE${i};" | awk '/COUNT/{print $2}') done fail=false -for i in $(seq $DB_COUNT); do +for i in $(seq $TABLE_COUNT); do if [ "${row_count_ori[i]}" != "${row_count_new[i]}" ];then fail=true - echo "TEST: [$TEST_NAME] fail on database $DB${i}" + echo "TEST: [$TEST_NAME] fail on table $DB.$TABLE${i}" fi - echo "database $DB${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}" + echo "table $DB.$TABLE${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}" done if $fail; then From 09fb715871d26cce9c8e995675461c90add8da47 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Thu, 5 Mar 2020 14:47:36 +0800 Subject: [PATCH 14/46] conn, restore: paginate scan regions (#165) * conn, restore: paginate scan regions Signed-off-by: Neil Shen * tests: large timeout Signed-off-by: Neil Shen --- pkg/conn/conn.go | 9 +++- pkg/restore/import.go | 5 +- pkg/restore/split.go | 7 ++- pkg/restore/split_test.go | 24 +++++---- pkg/restore/util.go | 33 ++++++++++++ pkg/restore/util_test.go | 106 ++++++++++++++++++++++++++++++++++++++ tests/_utils/run_services | 16 +++--- 7 files changed, 176 insertions(+), 24 deletions(-) diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 6869c1199..2ab0a0232 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/keepalive" "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/utils" ) const ( @@ -37,6 +38,7 @@ const ( clusterVersionPrefix = "pd/api/v1/config/cluster-version" regionCountPrefix = "pd/api/v1/stats/region" schdulerPrefix = "pd/api/v1/schedulers" + maxMsgSize = int(128 * utils.MB) // pd.ScanRegion may return a large response ) // Mgr manages connections to a TiDB cluster. @@ -125,7 +127,12 @@ func NewMgr( return nil, errors.Annotatef(failure, "pd address (%s) not available, please check network", pdAddrs) } - pdClient, err := pd.NewClient(addrs, securityOption) + maxCallMsgSize := []grpc.DialOption{ + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(maxMsgSize)), + } + pdClient, err := pd.NewClient( + addrs, securityOption, pd.WithGRPCDialOptions(maxCallMsgSize...)) if err != nil { log.Error("fail to create pd client", zap.Error(err)) return nil, err diff --git a/pkg/restore/import.go b/pkg/restore/import.go index c1e1b5dd8..b8928418d 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -23,6 +23,7 @@ import ( ) const importScanRegionTime = 10 * time.Second +const scanRegionPaginationLimit = int(128) // ImporterClient is used to import a file to TiKV type ImporterClient interface { @@ -171,7 +172,8 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul ctx, cancel := context.WithTimeout(importer.ctx, importScanRegionTime) defer cancel() // Scan regions covered by the file range - regionInfos, errScanRegion := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) + regionInfos, errScanRegion := paginateScanRegion( + ctx, importer.metaClient, startKey, endKey, scanRegionPaginationLimit) if errScanRegion != nil { return errors.Trace(errScanRegion) } @@ -199,6 +201,7 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul zap.Error(errDownload)) return errDownload } + ingestResp, errIngest := importer.ingestSST(downloadMeta, info) ingestRetry: for errIngest == nil { diff --git a/pkg/restore/split.go b/pkg/restore/split.go index 378e256c6..64bf83e8c 100644 --- a/pkg/restore/split.go +++ b/pkg/restore/split.go @@ -89,10 +89,9 @@ func (rs *RegionSplitter) Split( scatterRegions := make([]*RegionInfo, 0) SplitRegions: for i := 0; i < SplitRetryTimes; i++ { - var regions []*RegionInfo - regions, err = rs.client.ScanRegions(ctx, minKey, maxKey, 0) - if err != nil { - return errors.Trace(err) + regions, err1 := paginateScanRegion(ctx, rs.client, minKey, maxKey, scanRegionPaginationLimit) + if err1 != nil { + return errors.Trace(err1) } if len(regions) == 0 { log.Warn("cannot scan any region") diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go index 3ace5b8c8..a0dbc3678 100644 --- a/pkg/restore/split_test.go +++ b/pkg/restore/split_test.go @@ -10,6 +10,7 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule/placement" "github.com/pingcap/tidb/util/codec" ) @@ -18,13 +19,19 @@ type testClient struct { mu sync.RWMutex stores map[uint64]*metapb.Store regions map[uint64]*RegionInfo + regionsInfo *core.RegionsInfo // For now it's only used in ScanRegions nextRegionID uint64 } func newTestClient(stores map[uint64]*metapb.Store, regions map[uint64]*RegionInfo, nextRegionID uint64) *testClient { + regionsInfo := core.NewRegionsInfo() + for _, regionInfo := range regions { + regionsInfo.AddRegion(core.NewRegionInfo(regionInfo.Region, regionInfo.Leader)) + } return &testClient{ stores: stores, regions: regions, + regionsInfo: regionsInfo, nextRegionID: nextRegionID, } } @@ -142,16 +149,13 @@ func (c *testClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.Ge } func (c *testClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { - regions := make([]*RegionInfo, 0) - for _, region := range c.regions { - if limit > 0 && len(regions) >= limit { - break - } - if (len(region.Region.GetEndKey()) != 0 && bytes.Compare(region.Region.GetEndKey(), key) <= 0) || - bytes.Compare(region.Region.GetStartKey(), endKey) > 0 { - continue - } - regions = append(regions, region) + infos := c.regionsInfo.ScanRange(key, endKey, limit) + regions := make([]*RegionInfo, 0, len(infos)) + for _, info := range infos { + regions = append(regions, &RegionInfo{ + Region: info.GetMeta(), + Leader: info.GetLeader(), + }) } return regions, nil } diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 7b70c9806..0936c1085 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -3,6 +3,7 @@ package restore import ( "bytes" "context" + "encoding/hex" "strings" "time" @@ -324,3 +325,35 @@ func encodeKeyPrefix(key []byte) []byte { encodedPrefix = append(encodedPrefix, codec.EncodeBytes([]byte{}, key[:len(key)-ungroupedLen])...) return append(encodedPrefix[:len(encodedPrefix)-9], key[len(key)-ungroupedLen:]...) } + +// paginateScanRegion scan regions with a limit pagination and +// return all regions at once. +// It reduces max gRPC message size. +func paginateScanRegion( + ctx context.Context, client SplitClient, startKey, endKey []byte, limit int, +) ([]*RegionInfo, error) { + if len(endKey) != 0 && bytes.Compare(startKey, endKey) >= 0 { + return nil, errors.Errorf("startKey >= endKey, startKey %s, endkey %s", + hex.EncodeToString(startKey), hex.EncodeToString(endKey)) + } + + regions := []*RegionInfo{} + for { + batch, err := client.ScanRegions(ctx, startKey, endKey, limit) + if err != nil { + return nil, errors.Trace(err) + } + regions = append(regions, batch...) + if len(batch) < limit { + // No more region + break + } + startKey = batch[len(batch)-1].Region.GetEndKey() + if len(startKey) == 0 || + (len(endKey) > 0 && bytes.Compare(startKey, endKey) >= 0) { + // All key space have scanned + break + } + } + return regions, nil +} diff --git a/pkg/restore/util_test.go b/pkg/restore/util_test.go index bc4da9168..1b5e86b96 100644 --- a/pkg/restore/util_test.go +++ b/pkg/restore/util_test.go @@ -1,11 +1,15 @@ package restore import ( + "context" + "encoding/binary" + . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/codec" ) var _ = Suite(&testRestoreUtilSuite{}) @@ -103,3 +107,105 @@ func (s *testRestoreUtilSuite) TestValidateFileRanges(c *C) { ) c.Assert(err, ErrorMatches, "unexpected rewrite rules") } + +func (s *testRestoreUtilSuite) TestPaginateScanRegion(c *C) { + peers := make([]*metapb.Peer, 1) + peers[0] = &metapb.Peer{ + Id: 1, + StoreId: 1, + } + stores := make(map[uint64]*metapb.Store) + stores[1] = &metapb.Store{ + Id: 1, + } + + makeRegions := func(num uint64) (map[uint64]*RegionInfo, []*RegionInfo) { + regionsMap := make(map[uint64]*RegionInfo, num) + regions := make([]*RegionInfo, 0, num) + endKey := make([]byte, 8) + for i := uint64(0); i < num-1; i++ { + ri := &RegionInfo{ + Region: &metapb.Region{ + Id: i + 1, + Peers: peers, + }, + } + + if i != 0 { + startKey := make([]byte, 8) + binary.BigEndian.PutUint64(startKey, i) + ri.Region.StartKey = codec.EncodeBytes([]byte{}, startKey) + } + endKey = make([]byte, 8) + binary.BigEndian.PutUint64(endKey, i+1) + ri.Region.EndKey = codec.EncodeBytes([]byte{}, endKey) + + regionsMap[i] = ri + regions = append(regions, ri) + } + + if num == 1 { + endKey = []byte{} + } else { + endKey = codec.EncodeBytes([]byte{}, endKey) + } + ri := &RegionInfo{ + Region: &metapb.Region{ + Id: num, + Peers: peers, + StartKey: endKey, + EndKey: []byte{}, + }, + } + regionsMap[num] = ri + regions = append(regions, ri) + + return regionsMap, regions + } + + ctx := context.Background() + regionMap := make(map[uint64]*RegionInfo) + regions := []*RegionInfo{} + batch, err := paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(1) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(2) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(3) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(8) + batch, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{}, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions) + + regionMap, regions = makeRegions(8) + batch, err = paginateScanRegion( + ctx, newTestClient(stores, regionMap, 0), regions[1].Region.StartKey, []byte{}, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions[1:]) + + batch, err = paginateScanRegion( + ctx, newTestClient(stores, regionMap, 0), []byte{}, regions[6].Region.EndKey, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions[:7]) + + batch, err = paginateScanRegion( + ctx, newTestClient(stores, regionMap, 0), regions[1].Region.StartKey, regions[1].Region.EndKey, 3) + c.Assert(err, IsNil) + c.Assert(batch, DeepEquals, regions[1:2]) + + _, err = paginateScanRegion(ctx, newTestClient(stores, regionMap, 0), []byte{2}, []byte{1}, 3) + c.Assert(err, ErrorMatches, "startKey >= endKey.*") +} diff --git a/tests/_utils/run_services b/tests/_utils/run_services index 1118d7ccc..769b9b22a 100644 --- a/tests/_utils/run_services +++ b/tests/_utils/run_services @@ -48,7 +48,7 @@ start_services() { i=0 while ! curl -o /dev/null -sf "http://$PD_ADDR/pd/api/v1/version"; do i=$((i+1)) - if [ "$i" -gt 10 ]; then + if [ "$i" -gt 20 ]; then echo 'Failed to start PD' exit 1 fi @@ -70,7 +70,7 @@ start_services() { echo "Waiting initializing TiKV..." while ! curl -sf "http://$PD_ADDR/pd/api/v1/cluster/status" | grep '"is_initialized": true'; do i=$((i+1)) - if [ "$i" -gt 10 ]; then + if [ "$i" -gt 20 ]; then echo 'Failed to initialize TiKV cluster' exit 1 fi @@ -90,7 +90,7 @@ start_services() { i=0 while ! curl -o /dev/null -sf "http://$TIDB_IP:10080/status"; do i=$((i+1)) - if [ "$i" -gt 10 ]; then + if [ "$i" -gt 20 ]; then echo 'Failed to start TiDB' exit 1 fi @@ -100,7 +100,7 @@ start_services() { i=0 while ! curl "http://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do i=$((i+1)) - if [ "$i" -gt 10 ]; then + if [ "$i" -gt 20 ]; then echo 'Failed to bootstrap cluster' exit 1 fi @@ -132,7 +132,7 @@ start_services_withTLS() { --key $1/certificates/client-key.pem \ -o /dev/null -sf "https://$PD_ADDR/pd/api/v1/version"; do i=$((i+1)) - if [ "$i" -gt 10 ]; then + if [ "$i" -gt 20 ]; then echo 'Failed to start PD' exit 1 fi @@ -155,7 +155,7 @@ start_services_withTLS() { --key $1/certificates/client-key.pem \ -sf "https://$PD_ADDR/pd/api/v1/cluster/status" | grep '"is_initialized": true'; do i=$((i+1)) - if [ "$i" -gt 10 ]; then + if [ "$i" -gt 20 ]; then echo 'Failed to initialize TiKV cluster' exit 1 fi @@ -178,7 +178,7 @@ start_services_withTLS() { --key $1/certificates/client-key.pem \ -o /dev/null -sf "https://$TIDB_IP:10080/status"; do i=$((i+1)) - if [ "$i" -gt 10 ]; then + if [ "$i" -gt 20 ]; then echo 'Failed to start TiDB' exit 1 fi @@ -191,7 +191,7 @@ start_services_withTLS() { --key $1/certificates/client-key.pem \ "https://$PD_ADDR/pd/api/v1/cluster/status" -sf | grep -q "\"is_initialized\": true"; do i=$((i+1)) - if [ "$i" -gt 10 ]; then + if [ "$i" -gt 20 ]; then echo 'Failed to bootstrap cluster' exit 1 fi From 82b50160a92ac3f9eab3c3fac23c0940029dcdde Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Fri, 6 Mar 2020 11:02:08 +0800 Subject: [PATCH 15/46] Batch restore (#167) * *: unify Range and RangeTree Signed-off-by: Neil Shen * restore: split restore files into small batch Signed-off-by: Neil Shen * task: set default restore concurrency to 128 Signed-off-by: Neil Shen * restore: unused table worker pool Signed-off-by: Neil Shen * summary: sum up repeated duration and int Signed-off-by: Neil Shen * rtree: move rtree from utils to pkg Signed-off-by: Neil Shen --- cmd/validate.go | 7 +- pkg/backup/client.go | 29 ++-- pkg/backup/push.go | 8 +- pkg/restore/client.go | 20 ++- pkg/restore/range.go | 90 +---------- pkg/restore/range_test.go | 34 ++-- pkg/restore/split.go | 6 +- pkg/restore/split_test.go | 14 +- pkg/restore/util.go | 46 +++++- pkg/rtree/check.go | 31 ++++ pkg/{backup/range_tree.go => rtree/rtree.go} | 127 ++++++++------- .../rtree_test.go} | 146 +++++++++--------- pkg/summary/collector.go | 34 ++-- pkg/summary/collector_test.go | 46 ++++++ pkg/task/backup.go | 5 + pkg/task/backup_raw.go | 3 +- pkg/task/common.go | 7 +- pkg/task/restore.go | 67 ++++++-- 18 files changed, 426 insertions(+), 294 deletions(-) create mode 100644 pkg/rtree/check.go rename pkg/{backup/range_tree.go => rtree/rtree.go} (56%) rename pkg/{backup/range_tree_test.go => rtree/rtree_test.go} (72%) create mode 100644 pkg/summary/collector_test.go diff --git a/cmd/validate.go b/cmd/validate.go index 8bca7e553..d358995a3 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -19,6 +19,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) @@ -166,15 +167,15 @@ func newBackupMetaCommand() *cobra.Command { tables = append(tables, db.Tables...) } // Check if the ranges of files overlapped - rangeTree := restore.NewRangeTree() + rangeTree := rtree.NewRangeTree() for _, file := range files { - if out := rangeTree.InsertRange(restore.Range{ + if out := rangeTree.InsertRange(rtree.Range{ StartKey: file.GetStartKey(), EndKey: file.GetEndKey(), }); out != nil { log.Error( "file ranges overlapped", - zap.Stringer("out", out.(*restore.Range)), + zap.Stringer("out", out), zap.Stringer("file", file), ) } diff --git a/pkg/backup/client.go b/pkg/backup/client.go index fb2960962..1b5b6b645 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/util/ranger" "go.uber.org/zap" + "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" @@ -179,13 +180,13 @@ func BuildBackupRangeAndSchema( storage kv.Storage, tableFilter *filter.Filter, backupTS uint64, -) ([]Range, *Schemas, error) { +) ([]rtree.Range, *Schemas, error) { info, err := dom.GetSnapshotInfoSchema(backupTS) if err != nil { return nil, nil, errors.Trace(err) } - ranges := make([]Range, 0) + ranges := make([]rtree.Range, 0) backupSchemas := newBackupSchemas() for _, dbInfo := range info.AllSchemas() { // skip system databases @@ -233,7 +234,7 @@ func BuildBackupRangeAndSchema( return nil, nil, err } for _, r := range tableRanges { - ranges = append(ranges, Range{ + ranges = append(ranges, rtree.Range{ StartKey: r.StartKey, EndKey: r.EndKey, }) @@ -295,7 +296,7 @@ func GetBackupDDLJobs(dom *domain.Domain, lastBackupTS, backupTS uint64) ([]*mod // BackupRanges make a backup of the given key ranges. func (bc *Client) BackupRanges( ctx context.Context, - ranges []Range, + ranges []rtree.Range, req kvproto.BackupRequest, updateCh chan<- struct{}, ) error { @@ -389,12 +390,12 @@ func (bc *Client) BackupRange( push := newPushDown(ctx, bc.mgr, len(allStores)) - var results RangeTree + var results rtree.RangeTree results, err = push.pushBackup(req, allStores, updateCh) if err != nil { return err } - log.Info("finish backup push down", zap.Int("Ok", results.len())) + log.Info("finish backup push down", zap.Int("Ok", results.Len())) // Find and backup remaining ranges. // TODO: test fine grained backup. @@ -421,14 +422,14 @@ func (bc *Client) BackupRange( zap.Reflect("EndVersion", req.EndVersion)) } - results.tree.Ascend(func(i btree.Item) bool { - r := i.(*Range) + results.Ascend(func(i btree.Item) bool { + r := i.(*rtree.Range) bc.backupMeta.Files = append(bc.backupMeta.Files, r.Files...) return true }) // Check if there are duplicated files. - results.checkDupFiles() + rtree.CheckDupFiles(&results) return nil } @@ -466,13 +467,13 @@ func (bc *Client) fineGrainedBackup( backupTS uint64, rateLimit uint64, concurrency uint32, - rangeTree RangeTree, + rangeTree rtree.RangeTree, updateCh chan<- struct{}, ) error { bo := tikv.NewBackoffer(ctx, backupFineGrainedMaxBackoff) for { // Step1, check whether there is any incomplete range - incomplete := rangeTree.getIncompleteRange(startKey, endKey) + incomplete := rangeTree.GetIncompleteRange(startKey, endKey) if len(incomplete) == 0 { return nil } @@ -480,7 +481,7 @@ func (bc *Client) fineGrainedBackup( // Step2, retry backup on incomplete range respCh := make(chan *kvproto.BackupResponse, 4) errCh := make(chan error, 4) - retry := make(chan Range, 4) + retry := make(chan rtree.Range, 4) max := &struct { ms int @@ -539,7 +540,7 @@ func (bc *Client) fineGrainedBackup( zap.Binary("StartKey", resp.StartKey), zap.Binary("EndKey", resp.EndKey), ) - rangeTree.put(resp.StartKey, resp.EndKey, resp.Files) + rangeTree.Put(resp.StartKey, resp.EndKey, resp.Files) // Update progress updateCh <- struct{}{} @@ -625,7 +626,7 @@ func onBackupResponse( func (bc *Client) handleFineGrained( ctx context.Context, bo *tikv.Backoffer, - rg Range, + rg rtree.Range, lastBackupTS uint64, backupTS uint64, rateLimit uint64, diff --git a/pkg/backup/push.go b/pkg/backup/push.go index 23c4f01d4..803a8ec92 100644 --- a/pkg/backup/push.go +++ b/pkg/backup/push.go @@ -9,6 +9,8 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "go.uber.org/zap" + + "github.com/pingcap/br/pkg/rtree" ) // pushDown warps a backup task. @@ -35,9 +37,9 @@ func (push *pushDown) pushBackup( req backup.BackupRequest, stores []*metapb.Store, updateCh chan<- struct{}, -) (RangeTree, error) { +) (rtree.RangeTree, error) { // Push down backup tasks to all tikv instances. - res := newRangeTree() + res := rtree.NewRangeTree() wg := new(sync.WaitGroup) for _, s := range stores { storeID := s.GetId() @@ -82,7 +84,7 @@ func (push *pushDown) pushBackup( } if resp.GetError() == nil { // None error means range has been backuped successfully. - res.put( + res.Put( resp.GetStartKey(), resp.GetEndKey(), resp.GetFiles()) // Update progress diff --git a/pkg/restore/client.go b/pkg/restore/client.go index a7e5c4d08..46cdcaa24 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -39,11 +39,10 @@ type Client struct { ctx context.Context cancel context.CancelFunc - pdClient pd.Client - fileImporter FileImporter - workerPool *utils.WorkerPool - tableWorkerPool *utils.WorkerPool - tlsConf *tls.Config + pdClient pd.Client + fileImporter FileImporter + workerPool *utils.WorkerPool + tlsConf *tls.Config databases map[string]*utils.Database ddlJobs []*model.Job @@ -70,12 +69,11 @@ func NewRestoreClient( } return &Client{ - ctx: ctx, - cancel: cancel, - pdClient: pdClient, - tableWorkerPool: utils.NewWorkerPool(128, "table"), - db: db, - tlsConf: tlsConf, + ctx: ctx, + cancel: cancel, + pdClient: pdClient, + db: db, + tlsConf: tlsConf, }, nil } diff --git a/pkg/restore/range.go b/pkg/restore/range.go index f3914539e..97e2469dc 100644 --- a/pkg/restore/range.go +++ b/pkg/restore/range.go @@ -1,45 +1,19 @@ package restore import ( - "bytes" - "fmt" - - "github.com/google/btree" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/tidb/tablecodec" "go.uber.org/zap" -) - -// Range represents a range of keys. -type Range struct { - StartKey []byte - EndKey []byte -} - -// String formats a range to a string -func (r *Range) String() string { - return fmt.Sprintf("[%x %x]", r.StartKey, r.EndKey) -} -// Less compares a range with a btree.Item -func (r *Range) Less(than btree.Item) bool { - t := than.(*Range) - return len(r.EndKey) != 0 && bytes.Compare(r.EndKey, t.StartKey) <= 0 -} - -// contains returns if a key is included in the range. -func (r *Range) contains(key []byte) bool { - start, end := r.StartKey, r.EndKey - return bytes.Compare(key, start) >= 0 && - (len(end) == 0 || bytes.Compare(key, end) < 0) -} + "github.com/pingcap/br/pkg/rtree" +) // sortRanges checks if the range overlapped and sort them -func sortRanges(ranges []Range, rewriteRules *RewriteRules) ([]Range, error) { - rangeTree := NewRangeTree() +func sortRanges(ranges []rtree.Range, rewriteRules *RewriteRules) ([]rtree.Range, error) { + rangeTree := rtree.NewRangeTree() for _, rg := range ranges { if rewriteRules != nil { startID := tablecodec.DecodeTableID(rg.StartKey) @@ -77,64 +51,10 @@ func sortRanges(ranges []Range, rewriteRules *RewriteRules) ([]Range, error) { return nil, errors.Errorf("ranges overlapped: %s, %s", out, rg) } } - sortedRanges := make([]Range, 0, len(ranges)) - rangeTree.Ascend(func(rg *Range) bool { - if rg == nil { - return false - } - sortedRanges = append(sortedRanges, *rg) - return true - }) + sortedRanges := rangeTree.GetSortedRanges() return sortedRanges, nil } -// RangeTree stores the ranges in an orderly manner. -// All the ranges it stored do not overlap. -type RangeTree struct { - tree *btree.BTree -} - -// NewRangeTree returns a new RangeTree. -func NewRangeTree() *RangeTree { - return &RangeTree{tree: btree.New(32)} -} - -// Find returns nil or a range in the range tree -func (rt *RangeTree) Find(key []byte) *Range { - var ret *Range - r := &Range{ - StartKey: key, - } - rt.tree.DescendLessOrEqual(r, func(i btree.Item) bool { - ret = i.(*Range) - return false - }) - if ret == nil || !ret.contains(key) { - return nil - } - return ret -} - -// InsertRange inserts ranges into the range tree. -// it returns true if all ranges inserted successfully. -// it returns false if there are some overlapped ranges. -func (rt *RangeTree) InsertRange(rg Range) btree.Item { - return rt.tree.ReplaceOrInsert(&rg) -} - -// RangeIterator allows callers of Ascend to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend function will immediately return. -type RangeIterator func(rg *Range) bool - -// Ascend calls the iterator for every value in the tree within [first, last], -// until the iterator returns false. -func (rt *RangeTree) Ascend(iterator RangeIterator) { - rt.tree.Ascend(func(i btree.Item) bool { - return iterator(i.(*Range)) - }) -} - // RegionInfo includes a region and the leader of the region. type RegionInfo struct { Region *metapb.Region diff --git a/pkg/restore/range_test.go b/pkg/restore/range_test.go index a9edc5b82..371e79ebb 100644 --- a/pkg/restore/range_test.go +++ b/pkg/restore/range_test.go @@ -6,6 +6,8 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/tidb/tablecodec" + + "github.com/pingcap/br/pkg/rtree" ) type testRangeSuite struct{} @@ -21,8 +23,8 @@ var RangeEquals Checker = &rangeEquals{ } func (checker *rangeEquals) Check(params []interface{}, names []string) (result bool, error string) { - obtained := params[0].([]Range) - expected := params[1].([]Range) + obtained := params[0].([]rtree.Range) + expected := params[1].([]rtree.Range) if len(obtained) != len(expected) { return false, "" } @@ -44,20 +46,20 @@ func (s *testRangeSuite) TestSortRange(c *C) { Table: make([]*import_sstpb.RewriteRule, 0), Data: dataRules, } - ranges1 := []Range{ - {append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), - append(tablecodec.GenTableRecordPrefix(1), []byte("bbb")...)}, + ranges1 := []rtree.Range{ + {StartKey: append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), + EndKey: append(tablecodec.GenTableRecordPrefix(1), []byte("bbb")...), Files: nil}, } rs1, err := sortRanges(ranges1, rewriteRules) c.Assert(err, IsNil, Commentf("sort range1 failed: %v", err)) - c.Assert(rs1, RangeEquals, []Range{ - {append(tablecodec.GenTableRecordPrefix(4), []byte("aaa")...), - append(tablecodec.GenTableRecordPrefix(4), []byte("bbb")...)}, + c.Assert(rs1, RangeEquals, []rtree.Range{ + {StartKey: append(tablecodec.GenTableRecordPrefix(4), []byte("aaa")...), + EndKey: append(tablecodec.GenTableRecordPrefix(4), []byte("bbb")...), Files: nil}, }) - ranges2 := []Range{ - {append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), - append(tablecodec.GenTableRecordPrefix(2), []byte("bbb")...)}, + ranges2 := []rtree.Range{ + {StartKey: append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), + EndKey: append(tablecodec.GenTableRecordPrefix(2), []byte("bbb")...), Files: nil}, } _, err = sortRanges(ranges2, rewriteRules) c.Assert(err, ErrorMatches, ".*table id does not match.*") @@ -66,10 +68,10 @@ func (s *testRangeSuite) TestSortRange(c *C) { rewriteRules1 := initRewriteRules() rs3, err := sortRanges(ranges3, rewriteRules1) c.Assert(err, IsNil, Commentf("sort range1 failed: %v", err)) - c.Assert(rs3, RangeEquals, []Range{ - {[]byte("bbd"), []byte("bbf")}, - {[]byte("bbf"), []byte("bbj")}, - {[]byte("xxa"), []byte("xxe")}, - {[]byte("xxe"), []byte("xxz")}, + c.Assert(rs3, RangeEquals, []rtree.Range{ + {StartKey: []byte("bbd"), EndKey: []byte("bbf"), Files: nil}, + {StartKey: []byte("bbf"), EndKey: []byte("bbj"), Files: nil}, + {StartKey: []byte("xxa"), EndKey: []byte("xxe"), Files: nil}, + {StartKey: []byte("xxe"), EndKey: []byte("xxz"), Files: nil}, }) } diff --git a/pkg/restore/split.go b/pkg/restore/split.go index 64bf83e8c..4642ab853 100644 --- a/pkg/restore/split.go +++ b/pkg/restore/split.go @@ -13,6 +13,8 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" + + "github.com/pingcap/br/pkg/rtree" ) // Constants for split retry machinery. @@ -54,7 +56,7 @@ type OnSplitFunc func(key [][]byte) // note: all ranges and rewrite rules must have raw key. func (rs *RegionSplitter) Split( ctx context.Context, - ranges []Range, + ranges []rtree.Range, rewriteRules *RewriteRules, onSplit OnSplitFunc, ) error { @@ -252,7 +254,7 @@ func (rs *RegionSplitter) splitAndScatterRegions( // getSplitKeys checks if the regions should be split by the new prefix of the rewrites rule and the end key of // the ranges, groups the split keys by region id -func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionInfo) map[uint64][][]byte { +func getSplitKeys(rewriteRules *RewriteRules, ranges []rtree.Range, regions []*RegionInfo) map[uint64][][]byte { splitKeyMap := make(map[uint64][][]byte) checkKeys := make([][]byte, 0) for _, rule := range rewriteRules.Table { diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go index a0dbc3678..61896b114 100644 --- a/pkg/restore/split_test.go +++ b/pkg/restore/split_test.go @@ -13,6 +13,8 @@ import ( "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule/placement" "github.com/pingcap/tidb/util/codec" + + "github.com/pingcap/br/pkg/rtree" ) type testClient struct { @@ -238,21 +240,21 @@ func initTestClient() *testClient { } // range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) -func initRanges() []Range { - var ranges [4]Range - ranges[0] = Range{ +func initRanges() []rtree.Range { + var ranges [4]rtree.Range + ranges[0] = rtree.Range{ StartKey: []byte("aaa"), EndKey: []byte("aae"), } - ranges[1] = Range{ + ranges[1] = rtree.Range{ StartKey: []byte("aae"), EndKey: []byte("aaz"), } - ranges[2] = Range{ + ranges[2] = rtree.Range{ StartKey: []byte("ccd"), EndKey: []byte("ccf"), } - ranges[3] = Range{ + ranges[3] = rtree.Range{ StartKey: []byte("ccf"), EndKey: []byte("ccj"), } diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 0936c1085..eb59e625f 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" + "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/summary" ) @@ -154,8 +155,8 @@ func getSSTMetaFromFile( func ValidateFileRanges( files []*backup.File, rewriteRules *RewriteRules, -) ([]Range, error) { - ranges := make([]Range, 0, len(files)) +) ([]rtree.Range, error) { + ranges := make([]rtree.Range, 0, len(files)) fileAppended := make(map[string]bool) for _, file := range files { @@ -174,7 +175,7 @@ func ValidateFileRanges( zap.Stringer("file", file)) return nil, errors.New("table ids dont match") } - ranges = append(ranges, Range{ + ranges = append(ranges, rtree.Range{ StartKey: file.GetStartKey(), EndKey: file.GetEndKey(), }) @@ -184,6 +185,39 @@ func ValidateFileRanges( return ranges, nil } +// AttachFilesToRanges attach files to ranges. +// Panic if range is overlapped or no range for files. +func AttachFilesToRanges( + files []*backup.File, + ranges []rtree.Range, +) []rtree.Range { + rangeTree := rtree.NewRangeTree() + for _, rg := range ranges { + rangeTree.Update(rg) + } + for _, f := range files { + + rg := rangeTree.Find(&rtree.Range{ + StartKey: f.GetStartKey(), + EndKey: f.GetEndKey(), + }) + if rg == nil { + log.Fatal("range not found", + zap.Binary("startKey", f.GetStartKey()), + zap.Binary("endKey", f.GetEndKey())) + } + file := *f + rg.Files = append(rg.Files, &file) + } + if rangeTree.Len() != len(ranges) { + log.Fatal("ranges overlapped", + zap.Int("ranges length", len(ranges)), + zap.Int("tree length", rangeTree.Len())) + } + sortedRanges := rangeTree.GetSortedRanges() + return sortedRanges +} + // ValidateFileRewriteRule uses rewrite rules to validate the ranges of a file func ValidateFileRewriteRule(file *backup.File, rewriteRules *RewriteRules) error { // Check if the start key has a matched rewrite key @@ -276,7 +310,7 @@ func truncateTS(key []byte) []byte { func SplitRanges( ctx context.Context, client *Client, - ranges []Range, + ranges []rtree.Range, rewriteRules *RewriteRules, updateCh chan<- struct{}, ) error { @@ -300,6 +334,10 @@ func rewriteFileKeys(file *backup.File, rewriteRules *RewriteRules) (startKey, e if startID == endID { startKey, rule = rewriteRawKey(file.GetStartKey(), rewriteRules) if rewriteRules != nil && rule == nil { + log.Error("cannot find rewrite rule", + zap.Binary("startKey", file.GetStartKey()), + zap.Reflect("rewrite table", rewriteRules.Table), + zap.Reflect("rewrite data", rewriteRules.Data)) err = errors.New("cannot find rewrite rule for start key") return } diff --git a/pkg/rtree/check.go b/pkg/rtree/check.go new file mode 100644 index 000000000..08c98d2f4 --- /dev/null +++ b/pkg/rtree/check.go @@ -0,0 +1,31 @@ +package rtree + +import ( + "encoding/hex" + + "github.com/google/btree" + "github.com/pingcap/log" + "go.uber.org/zap" +) + +// CheckDupFiles checks if there are any files are duplicated. +func CheckDupFiles(rangeTree *RangeTree) { + // Name -> SHA256 + files := make(map[string][]byte) + rangeTree.Ascend(func(i btree.Item) bool { + rg := i.(*Range) + for _, f := range rg.Files { + old, ok := files[f.Name] + if ok { + log.Error("dup file", + zap.String("Name", f.Name), + zap.String("SHA256_1", hex.EncodeToString(old)), + zap.String("SHA256_2", hex.EncodeToString(f.Sha256)), + ) + } else { + files[f.Name] = f.Sha256 + } + } + return true + }) +} diff --git a/pkg/backup/range_tree.go b/pkg/rtree/rtree.go similarity index 56% rename from pkg/backup/range_tree.go rename to pkg/rtree/rtree.go index 4d4b3c695..e3c136803 100644 --- a/pkg/backup/range_tree.go +++ b/pkg/rtree/rtree.go @@ -1,8 +1,21 @@ -package backup +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rtree import ( "bytes" - "encoding/hex" + "fmt" "github.com/google/btree" "github.com/pingcap/kvproto/pkg/backup" @@ -15,10 +28,15 @@ type Range struct { StartKey []byte EndKey []byte Files []*backup.File - Error *backup.Error } -func (rg *Range) intersect( +// String formats a range to a string +func (rg *Range) String() string { + return fmt.Sprintf("[%x %x]", rg.StartKey, rg.EndKey) +} + +// Intersect returns +func (rg *Range) Intersect( start, end []byte, ) (subStart, subEnd []byte, isIntersect bool) { // empty mean the max end key @@ -49,8 +67,8 @@ func (rg *Range) intersect( return } -// contains check if the range contains the given key, [start, end) -func (rg *Range) contains(key []byte) bool { +// Contains check if the range contains the given key, [start, end) +func (rg *Range) Contains(key []byte) bool { start, end := rg.StartKey, rg.EndKey return bytes.Compare(key, start) >= 0 && (len(end) == 0 || bytes.Compare(key, end) < 0) @@ -65,31 +83,29 @@ func (rg *Range) Less(than btree.Item) bool { var _ btree.Item = &Range{} -// RangeTree is the result of a backup task +// RangeTree is sorted tree for Ranges. +// All the ranges it stored do not overlap. type RangeTree struct { - tree *btree.BTree + *btree.BTree } -func newRangeTree() RangeTree { +// NewRangeTree returns an empty range tree. +func NewRangeTree() RangeTree { return RangeTree{ - tree: btree.New(32), + BTree: btree.New(32), } } -func (rangeTree *RangeTree) len() int { - return rangeTree.tree.Len() -} - -// find is a helper function to find an item that contains the range start +// Find is a helper function to find an item that contains the range start // key. -func (rangeTree *RangeTree) find(rg *Range) *Range { +func (rangeTree *RangeTree) Find(rg *Range) *Range { var ret *Range - rangeTree.tree.DescendLessOrEqual(rg, func(i btree.Item) bool { + rangeTree.DescendLessOrEqual(rg, func(i btree.Item) bool { ret = i.(*Range) return false }) - if ret == nil || !ret.contains(rg.StartKey) { + if ret == nil || !ret.Contains(rg.StartKey) { return nil } @@ -104,13 +120,13 @@ func (rangeTree *RangeTree) getOverlaps(rg *Range) []*Range { // find() will return Range of range_a // and both startKey of range_a and range_b are less than endKey of range_d, // thus they are regarded as overlapped ranges. - found := rangeTree.find(rg) + found := rangeTree.Find(rg) if found == nil { found = rg } var overlaps []*Range - rangeTree.tree.AscendGreaterOrEqual(found, func(i btree.Item) bool { + rangeTree.AscendGreaterOrEqual(found, func(i btree.Item) bool { over := i.(*Range) if len(rg.EndKey) > 0 && bytes.Compare(rg.EndKey, over.StartKey) <= 0 { return false @@ -121,31 +137,57 @@ func (rangeTree *RangeTree) getOverlaps(rg *Range) []*Range { return overlaps } -func (rangeTree *RangeTree) update(rg *Range) { - overlaps := rangeTree.getOverlaps(rg) +// Update inserts range into tree and delete overlapping ranges. +func (rangeTree *RangeTree) Update(rg Range) { + overlaps := rangeTree.getOverlaps(&rg) // Range has backuped, overwrite overlapping range. for _, item := range overlaps { log.Info("delete overlapping range", zap.Binary("StartKey", item.StartKey), zap.Binary("EndKey", item.EndKey), ) - rangeTree.tree.Delete(item) + rangeTree.Delete(item) } - rangeTree.tree.ReplaceOrInsert(rg) + rangeTree.ReplaceOrInsert(&rg) } -func (rangeTree *RangeTree) put( +// Put forms a range and inserts it into tree. +func (rangeTree *RangeTree) Put( startKey, endKey []byte, files []*backup.File, ) { - rg := &Range{ + rg := Range{ StartKey: startKey, EndKey: endKey, Files: files, } - rangeTree.update(rg) + rangeTree.Update(rg) } -func (rangeTree *RangeTree) getIncompleteRange( +// InsertRange inserts ranges into the range tree. +// It returns a non-nil range if there are soe overlapped ranges. +func (rangeTree *RangeTree) InsertRange(rg Range) *Range { + out := rangeTree.ReplaceOrInsert(&rg) + if out == nil { + return nil + } + return out.(*Range) +} + +// GetSortedRanges collects and returns sorted ranges. +func (rangeTree *RangeTree) GetSortedRanges() []Range { + sortedRanges := make([]Range, 0, rangeTree.Len()) + rangeTree.Ascend(func(rg btree.Item) bool { + if rg == nil { + return false + } + sortedRanges = append(sortedRanges, *rg.(*Range)) + return true + }) + return sortedRanges +} + +// GetIncompleteRange returns missing range covered by startKey and endKey. +func (rangeTree *RangeTree) GetIncompleteRange( startKey, endKey []byte, ) []Range { if len(startKey) != 0 && bytes.Equal(startKey, endKey) { @@ -155,14 +197,14 @@ func (rangeTree *RangeTree) getIncompleteRange( requsetRange := Range{StartKey: startKey, EndKey: endKey} lastEndKey := startKey pviot := &Range{StartKey: startKey} - if first := rangeTree.find(pviot); first != nil { + if first := rangeTree.Find(pviot); first != nil { pviot.StartKey = first.StartKey } - rangeTree.tree.AscendGreaterOrEqual(pviot, func(i btree.Item) bool { + rangeTree.AscendGreaterOrEqual(pviot, func(i btree.Item) bool { rg := i.(*Range) if bytes.Compare(lastEndKey, rg.StartKey) < 0 { start, end, isIntersect := - requsetRange.intersect(lastEndKey, rg.StartKey) + requsetRange.Intersect(lastEndKey, rg.StartKey) if isIntersect { // There is a gap between the last item and the current item. incomplete = @@ -176,7 +218,7 @@ func (rangeTree *RangeTree) getIncompleteRange( // Check whether we need append the last range if !bytes.Equal(lastEndKey, endKey) && len(lastEndKey) != 0 && (len(endKey) == 0 || bytes.Compare(lastEndKey, endKey) < 0) { - start, end, isIntersect := requsetRange.intersect(lastEndKey, endKey) + start, end, isIntersect := requsetRange.Intersect(lastEndKey, endKey) if isIntersect { incomplete = append(incomplete, Range{StartKey: start, EndKey: end}) @@ -184,24 +226,3 @@ func (rangeTree *RangeTree) getIncompleteRange( } return incomplete } - -func (rangeTree *RangeTree) checkDupFiles() { - // Name -> SHA256 - files := make(map[string][]byte) - rangeTree.tree.Ascend(func(i btree.Item) bool { - rg := i.(*Range) - for _, f := range rg.Files { - old, ok := files[f.Name] - if ok { - log.Error("dup file", - zap.String("Name", f.Name), - zap.String("SHA256_1", hex.EncodeToString(old)), - zap.String("SHA256_2", hex.EncodeToString(f.Sha256)), - ) - } else { - files[f.Name] = f.Sha256 - } - } - return true - }) -} diff --git a/pkg/backup/range_tree_test.go b/pkg/rtree/rtree_test.go similarity index 72% rename from pkg/backup/range_tree_test.go rename to pkg/rtree/rtree_test.go index a7c2d1cd1..f4ec4f201 100644 --- a/pkg/backup/range_tree_test.go +++ b/pkg/rtree/rtree_test.go @@ -1,4 +1,4 @@ -// Copyright 2016 PingCAP, Inc. +// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package backup +package rtree import ( "fmt" @@ -31,63 +31,19 @@ func newRange(start, end []byte) *Range { } } -func (s *testRangeTreeSuite) TestRangeIntersect(c *C) { - rg := newRange([]byte("a"), []byte("c")) - - start, end, isIntersect := rg.intersect([]byte(""), []byte("")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("a")) - c.Assert(end, DeepEquals, []byte("c")) - - start, end, isIntersect = rg.intersect([]byte(""), []byte("a")) - c.Assert(isIntersect, Equals, false) - c.Assert(start, DeepEquals, []byte(nil)) - c.Assert(end, DeepEquals, []byte(nil)) - - start, end, isIntersect = rg.intersect([]byte(""), []byte("b")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("a")) - c.Assert(end, DeepEquals, []byte("b")) - - start, end, isIntersect = rg.intersect([]byte("a"), []byte("b")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("a")) - c.Assert(end, DeepEquals, []byte("b")) - - start, end, isIntersect = rg.intersect([]byte("aa"), []byte("b")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("aa")) - c.Assert(end, DeepEquals, []byte("b")) - - start, end, isIntersect = rg.intersect([]byte("b"), []byte("c")) - c.Assert(isIntersect, Equals, true) - c.Assert(start, DeepEquals, []byte("b")) - c.Assert(end, DeepEquals, []byte("c")) - - start, end, isIntersect = rg.intersect([]byte(""), []byte{1}) - c.Assert(isIntersect, Equals, false) - c.Assert(start, DeepEquals, []byte(nil)) - c.Assert(end, DeepEquals, []byte(nil)) - - start, end, isIntersect = rg.intersect([]byte("c"), []byte("")) - c.Assert(isIntersect, Equals, false) - c.Assert(start, DeepEquals, []byte(nil)) - c.Assert(end, DeepEquals, []byte(nil)) -} - func (s *testRangeTreeSuite) TestRangeTree(c *C) { - rangeTree := newRangeTree() - c.Assert(rangeTree.tree.Get(newRange([]byte(""), []byte(""))), IsNil) + rangeTree := NewRangeTree() + c.Assert(rangeTree.Get(newRange([]byte(""), []byte(""))), IsNil) search := func(key []byte) *Range { - rg := rangeTree.tree.Get(newRange(key, []byte(""))) + rg := rangeTree.Get(newRange(key, []byte(""))) if rg == nil { return nil } return rg.(*Range) } assertIncomplete := func(startKey, endKey []byte, ranges []Range) { - incomplete := rangeTree.getIncompleteRange(startKey, endKey) + incomplete := rangeTree.GetIncompleteRange(startKey, endKey) c.Logf("%#v %#v\n%#v\n%#v\n", startKey, endKey, incomplete, ranges) c.Assert(len(incomplete), Equals, len(ranges)) for idx, rg := range incomplete { @@ -111,8 +67,8 @@ func (s *testRangeTreeSuite) TestRangeTree(c *C) { rangeC := newRange([]byte("c"), []byte("d")) rangeD := newRange([]byte("d"), []byte("")) - rangeTree.update(rangeA) - c.Assert(rangeTree.len(), Equals, 1) + rangeTree.Update(*rangeA) + c.Assert(rangeTree.Len(), Equals, 1) assertIncomplete([]byte("a"), []byte("b"), []Range{}) assertIncomplete([]byte(""), []byte(""), []Range{ @@ -120,8 +76,8 @@ func (s *testRangeTreeSuite) TestRangeTree(c *C) { {StartKey: []byte("b"), EndKey: []byte("")}, }) - rangeTree.update(rangeC) - c.Assert(rangeTree.len(), Equals, 2) + rangeTree.Update(*rangeC) + c.Assert(rangeTree.Len(), Equals, 2) assertIncomplete([]byte("a"), []byte("c"), []Range{ {StartKey: []byte("b"), EndKey: []byte("c")}, }) @@ -136,55 +92,99 @@ func (s *testRangeTreeSuite) TestRangeTree(c *C) { }) c.Assert(search([]byte{}), IsNil) - c.Assert(search([]byte("a")), Equals, rangeA) + c.Assert(search([]byte("a")), DeepEquals, rangeA) c.Assert(search([]byte("b")), IsNil) - c.Assert(search([]byte("c")), Equals, rangeC) + c.Assert(search([]byte("c")), DeepEquals, rangeC) c.Assert(search([]byte("d")), IsNil) - rangeTree.update(rangeB) - c.Assert(rangeTree.len(), Equals, 3) - c.Assert(search([]byte("b")), Equals, rangeB) + rangeTree.Update(*rangeB) + c.Assert(rangeTree.Len(), Equals, 3) + c.Assert(search([]byte("b")), DeepEquals, rangeB) assertIncomplete([]byte(""), []byte(""), []Range{ {StartKey: []byte(""), EndKey: []byte("a")}, {StartKey: []byte("d"), EndKey: []byte("")}, }) - rangeTree.update(rangeD) - c.Assert(rangeTree.len(), Equals, 4) - c.Assert(search([]byte("d")), Equals, rangeD) + rangeTree.Update(*rangeD) + c.Assert(rangeTree.Len(), Equals, 4) + c.Assert(search([]byte("d")), DeepEquals, rangeD) assertIncomplete([]byte(""), []byte(""), []Range{ {StartKey: []byte(""), EndKey: []byte("a")}, }) // None incomplete for any range after insert range 0 - rangeTree.update(range0) - c.Assert(rangeTree.len(), Equals, 5) + rangeTree.Update(*range0) + c.Assert(rangeTree.Len(), Equals, 5) // Overwrite range B and C. rangeBD := newRange([]byte("b"), []byte("d")) - rangeTree.update(rangeBD) - c.Assert(rangeTree.len(), Equals, 4) + rangeTree.Update(*rangeBD) + c.Assert(rangeTree.Len(), Equals, 4) assertAllComplete() // Overwrite range BD, c-d should be empty - rangeTree.update(rangeB) - c.Assert(rangeTree.len(), Equals, 4) + rangeTree.Update(*rangeB) + c.Assert(rangeTree.Len(), Equals, 4) assertIncomplete([]byte(""), []byte(""), []Range{ {StartKey: []byte("c"), EndKey: []byte("d")}, }) - rangeTree.update(rangeC) - c.Assert(rangeTree.len(), Equals, 5) + rangeTree.Update(*rangeC) + c.Assert(rangeTree.Len(), Equals, 5) assertAllComplete() } +func (s *testRangeTreeSuite) TestRangeIntersect(c *C) { + rg := newRange([]byte("a"), []byte("c")) + + start, end, isIntersect := rg.Intersect([]byte(""), []byte("")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("a")) + c.Assert(end, DeepEquals, []byte("c")) + + start, end, isIntersect = rg.Intersect([]byte(""), []byte("a")) + c.Assert(isIntersect, Equals, false) + c.Assert(start, DeepEquals, []byte(nil)) + c.Assert(end, DeepEquals, []byte(nil)) + + start, end, isIntersect = rg.Intersect([]byte(""), []byte("b")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("a")) + c.Assert(end, DeepEquals, []byte("b")) + + start, end, isIntersect = rg.Intersect([]byte("a"), []byte("b")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("a")) + c.Assert(end, DeepEquals, []byte("b")) + + start, end, isIntersect = rg.Intersect([]byte("aa"), []byte("b")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("aa")) + c.Assert(end, DeepEquals, []byte("b")) + + start, end, isIntersect = rg.Intersect([]byte("b"), []byte("c")) + c.Assert(isIntersect, Equals, true) + c.Assert(start, DeepEquals, []byte("b")) + c.Assert(end, DeepEquals, []byte("c")) + + start, end, isIntersect = rg.Intersect([]byte(""), []byte{1}) + c.Assert(isIntersect, Equals, false) + c.Assert(start, DeepEquals, []byte(nil)) + c.Assert(end, DeepEquals, []byte(nil)) + + start, end, isIntersect = rg.Intersect([]byte("c"), []byte("")) + c.Assert(isIntersect, Equals, false) + c.Assert(start, DeepEquals, []byte(nil)) + c.Assert(end, DeepEquals, []byte(nil)) +} + func BenchmarkRangeTreeUpdate(b *testing.B) { - rangeTree := newRangeTree() + rangeTree := NewRangeTree() for i := 0; i < b.N; i++ { - item := &Range{ + item := Range{ StartKey: []byte(fmt.Sprintf("%20d", i)), EndKey: []byte(fmt.Sprintf("%20d", i+1))} - rangeTree.update(item) + rangeTree.Update(item) } } diff --git a/pkg/summary/collector.go b/pkg/summary/collector.go index cd5aac6c6..0fb1dfcf9 100644 --- a/pkg/summary/collector.go +++ b/pkg/summary/collector.go @@ -36,7 +36,9 @@ type LogCollector interface { Summary(name string) } -var collector = newLogCollector() +type logFunc func(msg string, fields ...zap.Field) + +var collector = newLogCollector(log.Info) type logCollector struct { mu sync.Mutex @@ -45,16 +47,21 @@ type logCollector struct { successCosts map[string]time.Duration successData map[string]uint64 failureReasons map[string]error - fields []zap.Field + durations map[string]time.Duration + ints map[string]int + + log logFunc } -func newLogCollector() LogCollector { +func newLogCollector(log logFunc) LogCollector { return &logCollector{ unitCount: 0, - fields: make([]zap.Field, 0), successCosts: make(map[string]time.Duration), successData: make(map[string]uint64), failureReasons: make(map[string]error), + durations: make(map[string]time.Duration), + ints: make(map[string]int), + log: log, } } @@ -97,19 +104,20 @@ func (tc *logCollector) CollectFailureUnit(name string, reason error) { func (tc *logCollector) CollectDuration(name string, t time.Duration) { tc.mu.Lock() defer tc.mu.Unlock() - tc.fields = append(tc.fields, zap.Duration(name, t)) + tc.durations[name] += t } func (tc *logCollector) CollectInt(name string, t int) { tc.mu.Lock() defer tc.mu.Unlock() - tc.fields = append(tc.fields, zap.Int(name, t)) + tc.ints[name] += t } func (tc *logCollector) Summary(name string) { tc.mu.Lock() defer func() { - tc.fields = tc.fields[:0] + tc.durations = make(map[string]time.Duration) + tc.ints = make(map[string]int) tc.successCosts = make(map[string]time.Duration) tc.failureReasons = make(map[string]error) tc.mu.Unlock() @@ -131,11 +139,17 @@ func (tc *logCollector) Summary(name string) { } } - logFields := tc.fields + logFields := make([]zap.Field, 0, len(tc.durations)+len(tc.ints)) + for key, val := range tc.durations { + logFields = append(logFields, zap.Duration(key, val)) + } + for key, val := range tc.ints { + logFields = append(logFields, zap.Int(key, val)) + } + if len(tc.failureReasons) != 0 { names := make([]string, 0, len(tc.failureReasons)) for name := range tc.failureReasons { - // logFields = append(logFields, zap.NamedError(name, reason)) names = append(names, name) } logFields = append(logFields, zap.Strings(msg, names)) @@ -162,7 +176,7 @@ func (tc *logCollector) Summary(name string) { msg += fmt.Sprintf(", %s: %d", name, data) } - log.Info(name+" summary: "+msg, logFields...) + tc.log(name+" summary: "+msg, logFields...) } // SetLogCollector allow pass LogCollector outside diff --git a/pkg/summary/collector_test.go b/pkg/summary/collector_test.go new file mode 100644 index 000000000..6a8704db2 --- /dev/null +++ b/pkg/summary/collector_test.go @@ -0,0 +1,46 @@ +package summary + +import ( + "testing" + "time" + + . "github.com/pingcap/check" + "go.uber.org/zap" +) + +func TestT(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testCollectorSuite{}) + +type testCollectorSuite struct { +} + +func (suit *testCollectorSuite) TestSumDurationInt(c *C) { + fields := []zap.Field{} + logger := func(msg string, fs ...zap.Field) { + fields = append(fields, fs...) + } + col := newLogCollector(logger) + col.CollectDuration("a", time.Second) + col.CollectDuration("b", time.Second) + col.CollectDuration("b", time.Second) + col.CollectInt("c", 2) + col.CollectInt("c", 2) + col.Summary("foo") + + c.Assert(len(fields), Equals, 3) + assertContains := func(field zap.Field) { + for _, f := range fields { + if f.Key == field.Key { + c.Assert(f, DeepEquals, field) + return + } + } + c.Error(fields, "do not contain", field) + } + assertContains(zap.Duration("a", time.Second)) + assertContains(zap.Duration("b", 2*time.Second)) + assertContains(zap.Int("c", 4)) +} diff --git a/pkg/task/backup.go b/pkg/task/backup.go index 2d9468394..4a2b6da12 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -22,6 +22,8 @@ import ( const ( flagBackupTimeago = "timeago" flagLastBackupTS = "lastbackupts" + + defaultBackupConcurrency = 4 ) // BackupConfig is the configuration specific for backup tasks. @@ -59,6 +61,9 @@ func (cfg *BackupConfig) ParseFromFlags(flags *pflag.FlagSet) error { if err = cfg.Config.ParseFromFlags(flags); err != nil { return errors.Trace(err) } + if cfg.Config.Concurrency == 0 { + cfg.Config.Concurrency = defaultBackupConcurrency + } return nil } diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go index 51d5267a5..721902afb 100644 --- a/pkg/task/backup_raw.go +++ b/pkg/task/backup_raw.go @@ -11,6 +11,7 @@ import ( "github.com/pingcap/br/pkg/backup" "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" @@ -102,7 +103,7 @@ func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *BackupRaw defer summary.Summary(cmdName) - backupRange := backup.Range{StartKey: cfg.StartKey, EndKey: cfg.EndKey} + backupRange := rtree.Range{StartKey: cfg.StartKey, EndKey: cfg.EndKey} // The number of regions need to backup approximateRegions, err := mgr.GetRegionCount(ctx, backupRange.StartKey, backupRange.EndKey) diff --git a/pkg/task/common.go b/pkg/task/common.go index 2de01b326..1e03177cb 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -100,9 +100,14 @@ func DefineCommonFlags(flags *pflag.FlagSet) { flags.String(flagKey, "", "Private key path for TLS connection") flags.Uint64(flagRateLimit, 0, "The rate limit of the task, MB/s per node") - flags.Uint32(flagConcurrency, 4, "The size of thread pool on each node that executes the task") flags.Bool(flagChecksum, true, "Run checksum at end of task") + // Default concurrency is different for backup and restore. + // Leave it 0 and let them adjust the value. + flags.Uint32(flagConcurrency, 0, "The size of thread pool on each node that executes the task") + // It may confuse users , so just hide it. + _ = flags.MarkHidden(flagConcurrency) + flags.Uint64(flagRateLimitUnit, utils.MB, "The unit of rate limit") _ = flags.MarkHidden(flagRateLimitUnit) diff --git a/pkg/task/restore.go b/pkg/task/restore.go index ef1ac861f..f5020403a 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -13,6 +13,7 @@ import ( "github.com/pingcap/br/pkg/conn" "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) @@ -31,6 +32,11 @@ var schedulers = map[string]struct{}{ "shuffle-hot-region-scheduler": {}, } +const ( + defaultRestoreConcurrency = 128 + maxRestoreBatchSizeLimit = 256 +) + // RestoreConfig is the configuration specific for restore tasks. type RestoreConfig struct { Config @@ -52,7 +58,14 @@ func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { if err != nil { return errors.Trace(err) } - return cfg.Config.ParseFromFlags(flags) + err = cfg.Config.ParseFromFlags(flags) + if err != nil { + return errors.Trace(err) + } + if cfg.Config.Concurrency == 0 { + cfg.Config.Concurrency = defaultRestoreConcurrency + } + return nil } // RunRestore starts a restore task inside the current goroutine. @@ -123,6 +136,8 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf } summary.CollectInt("restore ranges", len(ranges)) + ranges = restore.AttachFilesToRanges(files, ranges) + // Redirect to log if there is no log file to avoid unreadable output. updateCh := utils.StartProgress( ctx, @@ -131,12 +146,13 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf int64(len(ranges)+len(files)), !cfg.LogProgress) - err = restore.SplitRanges(ctx, client, ranges, rewriteRules, updateCh) + clusterCfg, err := restorePreWork(ctx, client, mgr) if err != nil { - log.Error("split regions failed", zap.Error(err)) return err } + // Do not reset timestamp if we are doing incremental restore, because + // we are not allowed to decrease timestamp. if !client.IsIncremental() { if err = client.ResetTS(cfg.PD); err != nil { log.Error("reset pd TS failed", zap.Error(err)) @@ -144,20 +160,47 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf } } - removedSchedulers, err := restorePreWork(ctx, client, mgr) - if err != nil { - return err + // Restore sst files in batch. + batchSize := int(cfg.Concurrency) + if batchSize > maxRestoreBatchSizeLimit { + batchSize = maxRestoreBatchSizeLimit // 256 + } + for { + if len(ranges) == 0 { + break + } + if batchSize > len(ranges) { + batchSize = len(ranges) + } + var rangeBatch []rtree.Range + ranges, rangeBatch = ranges[batchSize:], ranges[0:batchSize:batchSize] + + // Split regions by the given rangeBatch. + err = restore.SplitRanges(ctx, client, rangeBatch, rewriteRules, updateCh) + if err != nil { + log.Error("split regions failed", zap.Error(err)) + return err + } + + // Collect related files in the given rangeBatch. + fileBatch := make([]*backup.File, 0, 2*len(rangeBatch)) + for _, rg := range rangeBatch { + fileBatch = append(fileBatch, rg.Files...) + } + + // After split, we can restore backup files. + err = client.RestoreFiles(fileBatch, rewriteRules, updateCh) + if err != nil { + break + } } - err = client.RestoreFiles(files, rewriteRules, updateCh) - // always run the post-work even on error, so we don't stuck in the import mode or emptied schedulers - postErr := restorePostWork(ctx, client, mgr, removedSchedulers) + // Always run the post-work even on error, so we don't stuck in the import + // mode or emptied schedulers + err = restorePostWork(ctx, client, mgr, clusterCfg) if err != nil { return err } - if postErr != nil { - return postErr - } // Restore has finished. close(updateCh) From 3fc9ee25d1c243ab22e2f289a72e66961f3efe0a Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Sat, 7 Mar 2020 18:33:55 +0800 Subject: [PATCH 16/46] README, docker: add quick start (#181) * README, docker: add quick start Signed-off-by: Neil Shen * cmd: disable some TiDB log Signed-off-by: Neil Shen * docker: build go-ycsb automatically Signed-off-by: Neil Shen * cmd: add TODO about TiDB logs Signed-off-by: Neil Shen --- .dockerignore | 1 + .gitignore | 2 + README.md | 35 ++++++++ cmd/cmd.go | 16 ++-- docker-compose.yaml | 194 ++++++++++++++++++++++++++++++++++++++++ docker/Dockerfile | 24 +++++ docker/config/pd.toml | 18 ++++ docker/config/tidb.toml | 9 ++ docker/config/tikv.toml | 22 +++++ 9 files changed, 315 insertions(+), 6 deletions(-) create mode 120000 .dockerignore create mode 100644 docker-compose.yaml create mode 100644 docker/Dockerfile create mode 100644 docker/config/pd.toml create mode 100644 docker/config/tidb.toml create mode 100644 docker/config/tikv.toml diff --git a/.dockerignore b/.dockerignore new file mode 120000 index 000000000..3e4e48b0b --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/.gitignore b/.gitignore index e104ab6e8..e61a56bde 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ backupmeta *.ngo *.coverprofile coverage.txt +docker/data/ +docker/logs/ diff --git a/README.md b/README.md index 55444fdec..6207d98eb 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,41 @@ Notice BR supports building with Go version `Go >= 1.13` When BR is built successfully, you can find binary in the `bin` directory. +## Quick start + +```sh +# Start TiDB cluster +docker-compose -f docker-compose.yaml rm -s -v && \ +docker-compose -f docker-compose.yaml build && \ +docker-compose -f docker-compose.yaml up --remove-orphans + +# Attach to control container to run BR +docker exec -it br_control_1 bash + +# Load testing data to TiDB +go-ycsb load mysql -p workload=core \ + -p mysql.host=tidb -p mysql.port=4000 -p mysql.user=root \ + -p recordcount=100000 -p threadcount=100 + +# How many rows do we get? 100000 rows. +mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" + +# Build BR and backup! +make release && \ +bin/br backup full --pd pd0:2379 --storage "local:///data/backup/full" \ + --log-file "/logs/br_backup.log" + +# Let's drop database. +mysql -uroot -htidb -P4000 -E -e "DROP DATABASE test; SHOW DATABASES;" + +# Restore! +bin/br restore full --pd pd0:2379 --storage "local:///data/backup/full" \ + --log-file "/logs/br_restore.log" + +# How many rows do we get again? Expected to be 100000 rows. +mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" +``` + ## Contributing Contributions are welcomed and greatly appreciated. See [CONTRIBUTING](./CONTRIBUTING.md) diff --git a/cmd/cmd.go b/cmd/cmd.go index 83355e5dd..3fa287ca5 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -86,16 +86,20 @@ func Init(cmd *cobra.Command) (err error) { err = e return } + tidbLogCfg := logutil.LogConfig{} if len(slowLogFilename) != 0 { - slowCfg := logutil.LogConfig{SlowQueryFile: slowLogFilename} - e = logutil.InitLogger(&slowCfg) - if e != nil { - err = e - return - } + tidbLogCfg.SlowQueryFile = slowLogFilename } else { // Hack! Discard slow log by setting log level to PanicLevel logutil.SlowQueryLogger.SetLevel(logrus.PanicLevel) + // Disable annoying TiDB Log. + // TODO: some error logs outputs randomly, we need to fix them in TiDB. + tidbLogCfg.Level = "fatal" + } + e = logutil.InitLogger(&tidbLogCfg) + if e != nil { + err = e + return } // Initialize the pprof server. diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 000000000..4d84c67fa --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,194 @@ +--- +# Source: tidb-docker-compose/templates/docker-compose.yml +version: '2.1' + +services: + control: + image: control:latest + build: + context: . + dockerfile: ./docker/Dockerfile + volumes: + - ./docker/data:/data + - ./docker/logs:/logs + command: -c "/usr/bin/tail -f /dev/null" + depends_on: + - "tidb" + restart: on-failure + + pd0: + image: pingcap/pd:latest + ports: + - "2379" + volumes: + - ./docker/config/pd.toml:/pd.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --name=pd0 + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd0:2379 + - --advertise-peer-urls=http://pd0:2380 + - --initial-cluster=pd0=http://pd0:2380 + - --data-dir=/data/pd0 + - --config=/pd.toml + - --log-file=/logs/pd0.log + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv0: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv0:20160 + - --data-dir=/data/tikv0 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv0.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv1: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv1:20160 + - --data-dir=/data/tikv1 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv1.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv2: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv2:20160 + - --data-dir=/data/tikv2 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv2.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv3: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv3:20160 + - --data-dir=/data/tikv3 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv3.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv4: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv4:20160 + - --data-dir=/data/tikv4 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv4.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tidb: + image: pingcap/tidb:latest + ports: + - "4000:4000" + - "10080:10080" + volumes: + - ./docker/config/tidb.toml:/tidb.toml:ro + - ./docker/logs:/logs + command: + - --store=tikv + - --path=pd0:2379 + - --config=/tidb.toml + - --log-file=/logs/tidb.log + - --advertise-address=tidb + depends_on: + - "tikv0" + - "tikv1" + - "tikv2" + - "tikv3" + - "tikv4" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tidb-vision: + image: pingcap/tidb-vision:latest + environment: + PD_ENDPOINT: pd0:2379 + ports: + - "8010:8010" + restart: on-failure diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 000000000..c93d22ab4 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,24 @@ +FROM golang:1.13.8-buster as builder + +# For loading data to TiDB +WORKDIR /go/src/github.com/pingcap/ +RUN git clone https://github.com/pingcap/go-ycsb.git && \ + cd go-ycsb && \ + make + +FROM golang:1.13.8-buster + +RUN apt-get update && apt-get install -y --no-install-recommends \ + git \ + curl \ + vim \ + less \ + default-mysql-client \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /go/src/github.com/pingcap/br +COPY . . + +COPY --from=builder /go/src/github.com/pingcap/go-ycsb/bin/go-ycsb /go/bin/go-ycsb + +ENTRYPOINT ["/bin/bash"] diff --git a/docker/config/pd.toml b/docker/config/pd.toml new file mode 100644 index 000000000..e6fb173d1 --- /dev/null +++ b/docker/config/pd.toml @@ -0,0 +1,18 @@ +# PD Configuration. +[schedule] +# Disbale Region Merge +max-merge-region-size = 0 +max-merge-region-key = 0 +merge-schedule-limit = 0 + +max-snapshot-count = 10 +max-pending-peer-count = 32 +max-store-down-time = "30m" +leader-schedule-limit = 4 +region-schedule-limit = 4 +replica-schedule-limit = 8 +tolerant-size-ratio = 5.0 + +[replication] +# The number of replicas for each region. +max-replicas = 3 diff --git a/docker/config/tidb.toml b/docker/config/tidb.toml new file mode 100644 index 000000000..3ef20cc07 --- /dev/null +++ b/docker/config/tidb.toml @@ -0,0 +1,9 @@ +# Run ddl worker on this tidb-server. +run-ddl = true + +# Schema lease duration, very dangerous to change only if you know what you do. +lease = "360s" + +# When create table, split a separated region for it. It is recommended to +# turn off this option if there will be a large number of tables created. +split-table = true diff --git a/docker/config/tikv.toml b/docker/config/tikv.toml new file mode 100644 index 000000000..6528e447f --- /dev/null +++ b/docker/config/tikv.toml @@ -0,0 +1,22 @@ +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = true + +[coprocessor] +# Make region split more aggressive. +region-max-keys = 100 +region-split-keys = 80 + +[rocksdb] +# Number of open files that can be used by the DB. You may need to +# increase this if your database has a large working set. Value -1 means +# files opened are always kept open. You can estimate number of files based +# on target_file_size_base and target_file_size_multiplier for level-based +# compaction. +# If max-open-files = -1, RocksDB will prefetch index and filter blocks into +# block cache at startup, so if your database has a large working set, it will +# take several minutes to open the db. +max-open-files = 1024 + +[raftdb] +max-open-files = 1024 From 237fe5b9012a333b7c8da4c94209c4c839d6b7bd Mon Sep 17 00:00:00 2001 From: 3pointer Date: Tue, 10 Mar 2020 15:05:46 +0800 Subject: [PATCH 17/46] *: update tidb dependency build with go1.14 (#176) --- Makefile | 17 +++++++++-------- cmd/backup.go | 5 +++-- cmd/restore.go | 5 +++-- cmd/validate.go | 5 +++-- go.mod | 8 ++++---- go.sum | 36 ++++++++++++++++++++---------------- main.go | 4 +--- pkg/restore/util.go | 5 +++++ pkg/storage/parse_test.go | 2 +- pkg/storage/s3_test.go | 2 +- 10 files changed, 50 insertions(+), 39 deletions(-) diff --git a/Makefile b/Makefile index eea680b74..01458c70c 100644 --- a/Makefile +++ b/Makefile @@ -12,27 +12,28 @@ LDFLAGS += -X "$(BR_PKG)/pkg/utils.BRBuildTS=$(shell date -u '+%Y-%m-%d %I:%M:%S LDFLAGS += -X "$(BR_PKG)/pkg/utils.BRGitHash=$(shell git rev-parse HEAD)" LDFLAGS += -X "$(BR_PKG)/pkg/utils.BRGitBranch=$(shell git rev-parse --abbrev-ref HEAD)" -all: check test build +ifeq ("$(WITH_RACE)", "1") + RACEFLAG = -race +endif -release: - GO111MODULE=on go build -ldflags '$(LDFLAGS)' -o bin/br +all: check test build build: - GO111MODULE=on go build -ldflags '$(LDFLAGS)' -race -o bin/br + GO111MODULE=on go build -ldflags '$(LDFLAGS)' ${RACEFLAG} -o bin/br build_for_integration_test: GO111MODULE=on go test -c -cover -covermode=count \ -coverpkg=$(BR_PKG)/... \ -o bin/br.test # build key locker - GO111MODULE=on go build -race -o bin/locker tests/br_key_locked/*.go + GO111MODULE=on go build ${RACEFLAG} -o bin/locker tests/br_key_locked/*.go # build gc - GO111MODULE=on go build -race -o bin/gc tests/br_z_gc_safepoint/*.go + GO111MODULE=on go build ${RACEFLAG} -o bin/gc tests/br_z_gc_safepoint/*.go # build rawkv client - GO111MODULE=on go build -race -o bin/rawkv tests/br_rawkv/*.go + GO111MODULE=on go build ${RACEFLAG} -o bin/rawkv tests/br_rawkv/*.go test: - GO111MODULE=on go test -race -tags leak ./... + GO111MODULE=on go test ${RACEFLAG} -tags leak ./... testcover: GO111MODULE=on retool do overalls \ diff --git a/cmd/backup.go b/cmd/backup.go index a0a6bcecb..5dc6e3a32 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -29,8 +29,9 @@ func runBackupRawCommand(command *cobra.Command, cmdName string) error { // NewBackupCommand return a full backup subcommand. func NewBackupCommand() *cobra.Command { command := &cobra.Command{ - Use: "backup", - Short: "backup a TiDB cluster", + Use: "backup", + Short: "backup a TiDB cluster", + SilenceUsage: false, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err diff --git a/cmd/restore.go b/cmd/restore.go index 9f7c47bdb..f508f0342 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -20,8 +20,9 @@ func runRestoreCommand(command *cobra.Command, cmdName string) error { // NewRestoreCommand returns a restore subcommand func NewRestoreCommand() *cobra.Command { command := &cobra.Command{ - Use: "restore", - Short: "restore a TiKV cluster from a backup", + Use: "restore", + Short: "restore a TiKV cluster from a backup", + SilenceUsage: false, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err diff --git a/cmd/validate.go b/cmd/validate.go index d358995a3..91ee645ac 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -27,8 +27,9 @@ import ( // NewValidateCommand return a debug subcommand. func NewValidateCommand() *cobra.Command { meta := &cobra.Command{ - Use: "validate ", - Short: "commands to check/debug backup data", + Use: "validate ", + Short: "commands to check/debug backup data", + SilenceUsage: false, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err diff --git a/go.mod b/go.mod index ebad44174..0cd8e7a99 100644 --- a/go.mod +++ b/go.mod @@ -20,11 +20,11 @@ require ( github.com/onsi/gomega v1.8.1 // indirect github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 - github.com/pingcap/kvproto v0.0.0-20200217103621-528e82bf7248 + github.com/pingcap/kvproto v0.0.0-20200221125103-35b65c96516e github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd - github.com/pingcap/parser v0.0.0-20200218113622-517beb2e39c2 + github.com/pingcap/parser v0.0.0-20200301092054-bfc519c0a57f github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497 - github.com/pingcap/tidb v1.1.0-beta.0.20200223044457-aedea3ec5e1e + github.com/pingcap/tidb v1.1.0-beta.0.20200305113516-ac15dd336c93 github.com/pingcap/tidb-tools v4.0.0-beta+incompatible github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 github.com/prometheus/client_golang v1.0.0 @@ -36,7 +36,7 @@ require ( github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 go.opencensus.io v0.22.2 // indirect - go.uber.org/zap v1.13.0 + go.uber.org/zap v1.14.0 golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/tools v0.0.0-20200226224502-204d844ad48d // indirect diff --git a/go.sum b/go.sum index 26526840e..65a64ffea 100644 --- a/go.sum +++ b/go.sum @@ -138,8 +138,8 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= @@ -309,22 +309,22 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20191213111810-93cb7c623c8b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20200217103621-528e82bf7248 h1:DhGKu4ACa5v0Z70J1NWrc9ti+OqihhxmyzsK7YDTpVQ= -github.com/pingcap/kvproto v0.0.0-20200217103621-528e82bf7248/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200221125103-35b65c96516e h1:z7j9uyuG/6I4god5h5NbsbMDSfhoOYAvVW6JxhwdHHw= +github.com/pingcap/kvproto v0.0.0-20200221125103-35b65c96516e/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20200218113622-517beb2e39c2 h1:DsymejjOFdljM1q0BJ8yBZUYQ7718+7JTO046Rqd/3k= -github.com/pingcap/parser v0.0.0-20200218113622-517beb2e39c2/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/parser v0.0.0-20200301092054-bfc519c0a57f h1:SfzX0ZDTyXgzLExMsJ385DTMIaX7CeBQMCGQKdQYO7o= +github.com/pingcap/parser v0.0.0-20200301092054-bfc519c0a57f/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497 h1:FzLErYtcXnSxtC469OuVDlgBbh0trJZzNxw0mNKzyls= github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497/go.mod h1:cfT/xu4Zz+Tkq95QrLgEBZ9ikRcgzy4alHqqoaTftqI= github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b h1:EEyo/SCRswLGuSk+7SB86Ak1p8bS6HL1Mi4Dhyuv6zg= github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= -github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd h1:k7CIHMFVKjHsda3PKkiN4zv++NEnexlUwiJEhryWpG0= -github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= -github.com/pingcap/tidb v1.1.0-beta.0.20200223044457-aedea3ec5e1e h1:HPSJdnkI6mt0qEIbSkJzVsq99929Ki5VblkJMmlqhI0= -github.com/pingcap/tidb v1.1.0-beta.0.20200223044457-aedea3ec5e1e/go.mod h1:zzO/kysmwHMkr0caH2NmuSAKLdsySXKDQGTCYrb7Gx8= +github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1 h1:YUnUZ914SHFMsOSe/xgH5DKK/thtRma8X8hcszRo3CA= +github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/tidb v1.1.0-beta.0.20200305113516-ac15dd336c93 h1:UT35i5wbPOj99EMzf/pkqrq2asguXmZD5Q1P6mnRb9U= +github.com/pingcap/tidb v1.1.0-beta.0.20200305113516-ac15dd336c93/go.mod h1:fgAq363ZYMeSvCc1jzOEHeG9001fRjoFQiBIRdXdPKo= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible h1:H1jg0aDWz2SLRh3hNBo2HFtnuHtudIUvBumU7syRkic= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tidb-tools v4.0.0-beta+incompatible h1:+XJdcVLCM8GDgXiMS6lFV59N3XPVOqtNHeWNLVrb2pg= @@ -437,8 +437,8 @@ go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4= go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= @@ -447,6 +447,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -454,8 +456,8 @@ go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.12.0 h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.0 h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -572,9 +574,9 @@ golang.org/x/tools v0.0.0-20191107010934-f79515f33823 h1:akkRBeitX2EZP59KdtKw310 golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200206050830-dd0d5d485177 h1:E2vxBajJgSA3TcJhDGTh/kP3VnsvXKl9jSijv+h7svQ= -golang.org/x/tools v0.0.0-20200206050830-dd0d5d485177/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200226224502-204d844ad48d h1:loGv/4fxITSrCD4t2P8ZF4oUC4RlRFDAsczcoUS2g6c= golang.org/x/tools v0.0.0-20200226224502-204d844ad48d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -647,6 +649,8 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/main.go b/main.go index 103699614..7b7cbfc97 100644 --- a/main.go +++ b/main.go @@ -7,7 +7,6 @@ import ( "os/signal" "syscall" - "github.com/pingcap/errors" "github.com/spf13/cobra" "github.com/pingcap/br/cmd" @@ -42,7 +41,7 @@ func main() { Use: "br", Short: "br is a TiDB/TiKV cluster backup restore tool.", TraverseChildren: true, - SilenceUsage: true, + SilenceUsage: false, } cmd.AddFlags(rootCmd) cmd.SetDefaultContext(ctx) @@ -53,7 +52,6 @@ func main() { ) rootCmd.SetArgs(os.Args[1:]) if err := rootCmd.Execute(); err != nil { - rootCmd.Println(errors.ErrorStack(err)) os.Exit(1) } } diff --git a/pkg/restore/util.go b/pkg/restore/util.go index eb59e625f..1054388f7 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -38,6 +38,11 @@ func (alloc *idAllocator) Alloc(tableID int64, n uint64, increment, offset int64 return alloc.id, alloc.id, nil } +func (alloc *idAllocator) AllocSeqCache(sequenceID int64) (min int64, max int64, round int64, err error) { + // TODO fix this function after support backup sequence + return 0, 0, 0, nil +} + func (alloc *idAllocator) Rebase(tableID, newBase int64, allocIDs bool) error { return nil } diff --git a/pkg/storage/parse_test.go b/pkg/storage/parse_test.go index d72b8a5b3..c5a53fbb0 100644 --- a/pkg/storage/parse_test.go +++ b/pkg/storage/parse_test.go @@ -19,7 +19,7 @@ var _ = Suite(&testStorageSuite{}) func (r *testStorageSuite) TestCreateStorage(c *C) { _, err := ParseBackend("1invalid:", nil) - c.Assert(err, ErrorMatches, "parse 1invalid:: first path segment in URL cannot contain colon") + c.Assert(err, ErrorMatches, "parse (.*)1invalid:(.*): first path segment in URL cannot contain colon") _, err = ParseBackend("net:storage", nil) c.Assert(err, ErrorMatches, "storage net not support yet") diff --git a/pkg/storage/s3_test.go b/pkg/storage/s3_test.go index 3eaf1c206..f1bb415a6 100644 --- a/pkg/storage/s3_test.go +++ b/pkg/storage/s3_test.go @@ -72,7 +72,7 @@ func (r *testStorageSuite) TestApply(c *C) { options: S3BackendOptions{ Endpoint: "!http:12345", }, - errMsg: "parse !http:12345: first path segment in URL cannot contain colon", + errMsg: "parse (.*)!http:12345(.*): first path segment in URL cannot contain colon", errReturn: true, }, } From 0672ab3e38406f5d68ea95021ea9986be9b0b136 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Wed, 11 Mar 2020 12:30:36 +0800 Subject: [PATCH 18/46] *: add license header (#182) * rtree: move checkFile into backup Signed-off-by: Neil Shen * *: add license header Signed-off-by: Neil Shen * Update LICENSE.md Co-Authored-By: kennytm Co-authored-by: kennytm Co-authored-by: 3pointer --- LICENSE.md | 4 ++-- cmd/backup.go | 2 ++ cmd/cmd.go | 2 ++ cmd/restore.go | 2 ++ cmd/validate.go | 2 ++ pkg/{rtree => backup}/check.go | 12 ++++++++---- pkg/backup/client.go | 4 +++- pkg/backup/client_test.go | 2 ++ pkg/backup/metrics.go | 2 ++ pkg/backup/push.go | 2 ++ pkg/backup/safe_point.go | 2 ++ pkg/backup/safe_point_test.go | 2 ++ pkg/backup/schema.go | 2 ++ pkg/backup/schema_test.go | 2 ++ pkg/checksum/executor.go | 2 ++ pkg/checksum/executor_test.go | 2 ++ pkg/conn/conn.go | 2 ++ pkg/conn/conn_test.go | 2 ++ pkg/glue/glue.go | 2 ++ pkg/gluetidb/glue.go | 2 ++ pkg/mock/mock_cluster.go | 2 ++ pkg/mock/mock_cluster_test.go | 2 ++ pkg/restore/backoff.go | 2 ++ pkg/restore/backoff_test.go | 2 ++ pkg/restore/client.go | 2 ++ pkg/restore/client_test.go | 2 ++ pkg/restore/db.go | 2 ++ pkg/restore/db_test.go | 2 ++ pkg/restore/import.go | 2 ++ pkg/restore/range.go | 2 ++ pkg/restore/range_test.go | 2 ++ pkg/restore/split.go | 2 ++ pkg/restore/split_client.go | 2 ++ pkg/restore/split_test.go | 2 ++ pkg/restore/util.go | 2 ++ pkg/restore/util_test.go | 2 ++ pkg/rtree/rtree.go | 13 +------------ pkg/rtree/rtree_test.go | 13 +------------ pkg/storage/flags.go | 2 ++ pkg/storage/gcs.go | 2 ++ pkg/storage/gcs_test.go | 2 ++ pkg/storage/local.go | 2 ++ pkg/storage/local_unix.go | 2 ++ pkg/storage/local_windows.go | 2 ++ pkg/storage/noop.go | 2 ++ pkg/storage/parse.go | 2 ++ pkg/storage/parse_test.go | 2 ++ pkg/storage/s3.go | 2 ++ pkg/storage/s3_test.go | 2 ++ pkg/storage/storage.go | 2 ++ pkg/summary/collector.go | 2 ++ pkg/summary/collector_test.go | 2 ++ pkg/summary/summary.go | 2 ++ pkg/task/backup.go | 2 ++ pkg/task/backup_raw.go | 2 ++ pkg/task/common.go | 2 ++ pkg/task/restore.go | 2 ++ pkg/utils/key.go | 2 ++ pkg/utils/key_test.go | 2 ++ pkg/utils/progress.go | 2 ++ pkg/utils/progress_test.go | 2 ++ pkg/utils/retry.go | 2 ++ pkg/utils/schema.go | 2 ++ pkg/utils/schema_test.go | 2 ++ pkg/utils/tso.go | 2 ++ pkg/utils/unit.go | 2 ++ pkg/utils/unit_test.go | 2 ++ pkg/utils/utils_test.go | 2 ++ pkg/utils/version.go | 2 ++ pkg/utils/worker.go | 2 ++ 70 files changed, 145 insertions(+), 31 deletions(-) rename pkg/{rtree => backup}/check.go (67%) diff --git a/LICENSE.md b/LICENSE.md index 675c2ec95..4eedc0116 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -186,7 +186,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -198,4 +198,4 @@ Apache License distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file + limitations under the License. diff --git a/cmd/backup.go b/cmd/backup.go index 5dc6e3a32..83c2348e0 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( diff --git a/cmd/cmd.go b/cmd/cmd.go index 3fa287ca5..5b2801894 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( diff --git a/cmd/restore.go b/cmd/restore.go index f508f0342..948ffdc3c 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( diff --git a/cmd/validate.go b/cmd/validate.go index 91ee645ac..d2ec1c6ec 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package cmd import ( diff --git a/pkg/rtree/check.go b/pkg/backup/check.go similarity index 67% rename from pkg/rtree/check.go rename to pkg/backup/check.go index 08c98d2f4..38b2d927d 100644 --- a/pkg/rtree/check.go +++ b/pkg/backup/check.go @@ -1,4 +1,6 @@ -package rtree +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package backup import ( "encoding/hex" @@ -6,14 +8,16 @@ import ( "github.com/google/btree" "github.com/pingcap/log" "go.uber.org/zap" + + "github.com/pingcap/br/pkg/rtree" ) -// CheckDupFiles checks if there are any files are duplicated. -func CheckDupFiles(rangeTree *RangeTree) { +// checkDupFiles checks if there are any files are duplicated. +func checkDupFiles(rangeTree *rtree.RangeTree) { // Name -> SHA256 files := make(map[string][]byte) rangeTree.Ascend(func(i btree.Item) bool { - rg := i.(*Range) + rg := i.(*rtree.Range) for _, f := range rg.Files { old, ok := files[f.Name] if ok { diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 1b5b6b645..aa16d072f 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( @@ -429,7 +431,7 @@ func (bc *Client) BackupRange( }) // Check if there are duplicated files. - rtree.CheckDupFiles(&results) + checkDupFiles(&results) return nil } diff --git a/pkg/backup/client_test.go b/pkg/backup/client_test.go index ddff45299..bf2700cd4 100644 --- a/pkg/backup/client_test.go +++ b/pkg/backup/client_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( diff --git a/pkg/backup/metrics.go b/pkg/backup/metrics.go index fb982cc24..67d5fe1e5 100644 --- a/pkg/backup/metrics.go +++ b/pkg/backup/metrics.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( diff --git a/pkg/backup/push.go b/pkg/backup/push.go index 803a8ec92..4aaffa7e2 100644 --- a/pkg/backup/push.go +++ b/pkg/backup/push.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( diff --git a/pkg/backup/safe_point.go b/pkg/backup/safe_point.go index bb73bc7d9..aa2e812b2 100644 --- a/pkg/backup/safe_point.go +++ b/pkg/backup/safe_point.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( diff --git a/pkg/backup/safe_point_test.go b/pkg/backup/safe_point_test.go index 5a4939191..16f40db26 100644 --- a/pkg/backup/safe_point_test.go +++ b/pkg/backup/safe_point_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( diff --git a/pkg/backup/schema.go b/pkg/backup/schema.go index 66e4beec7..18583d094 100644 --- a/pkg/backup/schema.go +++ b/pkg/backup/schema.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( diff --git a/pkg/backup/schema_test.go b/pkg/backup/schema_test.go index a1514ba4a..3b3bef897 100644 --- a/pkg/backup/schema_test.go +++ b/pkg/backup/schema_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package backup import ( diff --git a/pkg/checksum/executor.go b/pkg/checksum/executor.go index 30e8f11c8..fac944fa0 100644 --- a/pkg/checksum/executor.go +++ b/pkg/checksum/executor.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package checksum import ( diff --git a/pkg/checksum/executor_test.go b/pkg/checksum/executor_test.go index e9db6267b..43c90761d 100644 --- a/pkg/checksum/executor_test.go +++ b/pkg/checksum/executor_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package checksum import ( diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 2ab0a0232..c32546796 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package conn import ( diff --git a/pkg/conn/conn_test.go b/pkg/conn/conn_test.go index 90516ae92..c120697dd 100644 --- a/pkg/conn/conn_test.go +++ b/pkg/conn/conn_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package conn import ( diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go index b680370aa..a0cd74e64 100644 --- a/pkg/glue/glue.go +++ b/pkg/glue/glue.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package glue import ( diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go index 27ae01c37..94efb1e0c 100644 --- a/pkg/gluetidb/glue.go +++ b/pkg/gluetidb/glue.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package gluetidb import ( diff --git a/pkg/mock/mock_cluster.go b/pkg/mock/mock_cluster.go index aee9666ed..6779b5bc6 100644 --- a/pkg/mock/mock_cluster.go +++ b/pkg/mock/mock_cluster.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package mock import ( diff --git a/pkg/mock/mock_cluster_test.go b/pkg/mock/mock_cluster_test.go index e7ffc6e85..1db0f5a8c 100644 --- a/pkg/mock/mock_cluster_test.go +++ b/pkg/mock/mock_cluster_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package mock import ( diff --git a/pkg/restore/backoff.go b/pkg/restore/backoff.go index 44a493138..ae5cddd66 100644 --- a/pkg/restore/backoff.go +++ b/pkg/restore/backoff.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/backoff_test.go b/pkg/restore/backoff_test.go index 73161a9f6..11accedd2 100644 --- a/pkg/restore/backoff_test.go +++ b/pkg/restore/backoff_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 46cdcaa24..38cf2d5a6 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/client_test.go b/pkg/restore/client_test.go index b67bbcfd7..3f8cb71f8 100644 --- a/pkg/restore/client_test.go +++ b/pkg/restore/client_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/db.go b/pkg/restore/db.go index 22a1a4794..7251b9f24 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/db_test.go b/pkg/restore/db_test.go index b1e9e947c..3f77a53dd 100644 --- a/pkg/restore/db_test.go +++ b/pkg/restore/db_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/import.go b/pkg/restore/import.go index b8928418d..fb537584f 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/range.go b/pkg/restore/range.go index 97e2469dc..0d5192ca9 100644 --- a/pkg/restore/range.go +++ b/pkg/restore/range.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/range_test.go b/pkg/restore/range_test.go index 371e79ebb..37561f6b4 100644 --- a/pkg/restore/range_test.go +++ b/pkg/restore/range_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/split.go b/pkg/restore/split.go index 4642ab853..dc0bab80a 100644 --- a/pkg/restore/split.go +++ b/pkg/restore/split.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/split_client.go b/pkg/restore/split_client.go index 9ab3ed7f1..14cde92a3 100644 --- a/pkg/restore/split_client.go +++ b/pkg/restore/split_client.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go index 61896b114..75b27f378 100644 --- a/pkg/restore/split_test.go +++ b/pkg/restore/split_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 1054388f7..03af2b3c0 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/restore/util_test.go b/pkg/restore/util_test.go index 1b5e86b96..d1a738fdb 100644 --- a/pkg/restore/util_test.go +++ b/pkg/restore/util_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package restore import ( diff --git a/pkg/rtree/rtree.go b/pkg/rtree/rtree.go index e3c136803..08b757af5 100644 --- a/pkg/rtree/rtree.go +++ b/pkg/rtree/rtree.go @@ -1,15 +1,4 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. package rtree diff --git a/pkg/rtree/rtree_test.go b/pkg/rtree/rtree_test.go index f4ec4f201..d3e151e25 100644 --- a/pkg/rtree/rtree_test.go +++ b/pkg/rtree/rtree_test.go @@ -1,15 +1,4 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. package rtree diff --git a/pkg/storage/flags.go b/pkg/storage/flags.go index 2340467ba..c828f57a1 100644 --- a/pkg/storage/flags.go +++ b/pkg/storage/flags.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index 2eb310c3a..7e105929f 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/gcs_test.go b/pkg/storage/gcs_test.go index 10bb44371..60a26f616 100644 --- a/pkg/storage/gcs_test.go +++ b/pkg/storage/gcs_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/local.go b/pkg/storage/local.go index 77ca7f6a4..d2555a978 100644 --- a/pkg/storage/local.go +++ b/pkg/storage/local.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/local_unix.go b/pkg/storage/local_unix.go index be0050e83..aedf7c637 100644 --- a/pkg/storage/local_unix.go +++ b/pkg/storage/local_unix.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + // +build !windows package storage diff --git a/pkg/storage/local_windows.go b/pkg/storage/local_windows.go index a3ab2b784..cb784fad4 100644 --- a/pkg/storage/local_windows.go +++ b/pkg/storage/local_windows.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + // +build windows package storage diff --git a/pkg/storage/noop.go b/pkg/storage/noop.go index 17b1dea55..1ee698342 100644 --- a/pkg/storage/noop.go +++ b/pkg/storage/noop.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import "context" diff --git a/pkg/storage/parse.go b/pkg/storage/parse.go index c470d5458..d75e7663d 100644 --- a/pkg/storage/parse.go +++ b/pkg/storage/parse.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/parse_test.go b/pkg/storage/parse_test.go index c5a53fbb0..3f1bc4d4f 100644 --- a/pkg/storage/parse_test.go +++ b/pkg/storage/parse_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 8e04769b5..4758cad6d 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/s3_test.go b/pkg/storage/s3_test.go index f1bb415a6..bd35b6faf 100644 --- a/pkg/storage/s3_test.go +++ b/pkg/storage/s3_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index f9ae368ae..91143ca54 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package storage import ( diff --git a/pkg/summary/collector.go b/pkg/summary/collector.go index 0fb1dfcf9..42488cb82 100644 --- a/pkg/summary/collector.go +++ b/pkg/summary/collector.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package summary import ( diff --git a/pkg/summary/collector_test.go b/pkg/summary/collector_test.go index 6a8704db2..7dff32dd1 100644 --- a/pkg/summary/collector_test.go +++ b/pkg/summary/collector_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package summary import ( diff --git a/pkg/summary/summary.go b/pkg/summary/summary.go index 88d3fb143..08a16c00a 100644 --- a/pkg/summary/summary.go +++ b/pkg/summary/summary.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package summary import "time" diff --git a/pkg/task/backup.go b/pkg/task/backup.go index 4a2b6da12..b4ece838d 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package task import ( diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go index 721902afb..aa980b1a0 100644 --- a/pkg/task/backup_raw.go +++ b/pkg/task/backup_raw.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package task import ( diff --git a/pkg/task/common.go b/pkg/task/common.go index 1e03177cb..57134c60a 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package task import ( diff --git a/pkg/task/restore.go b/pkg/task/restore.go index f5020403a..f8333d7ff 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package task import ( diff --git a/pkg/utils/key.go b/pkg/utils/key.go index 8ed1109b0..ecaa5fce2 100644 --- a/pkg/utils/key.go +++ b/pkg/utils/key.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/key_test.go b/pkg/utils/key_test.go index 092962135..e314fbeb5 100644 --- a/pkg/utils/key_test.go +++ b/pkg/utils/key_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/progress.go b/pkg/utils/progress.go index 8c66093f0..3ed147f0a 100644 --- a/pkg/utils/progress.go +++ b/pkg/utils/progress.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/progress_test.go b/pkg/utils/progress_test.go index 7c1d9c947..0d76abd8f 100644 --- a/pkg/utils/progress_test.go +++ b/pkg/utils/progress_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/retry.go b/pkg/utils/retry.go index a8f446764..1dbbcdad2 100644 --- a/pkg/utils/retry.go +++ b/pkg/utils/retry.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/schema.go b/pkg/utils/schema.go index e1aec1225..bc22768e5 100644 --- a/pkg/utils/schema.go +++ b/pkg/utils/schema.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/schema_test.go b/pkg/utils/schema_test.go index 336b6d4f8..22456be83 100644 --- a/pkg/utils/schema_test.go +++ b/pkg/utils/schema_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/tso.go b/pkg/utils/tso.go index 6f9ca1aa4..c90cd3575 100644 --- a/pkg/utils/tso.go +++ b/pkg/utils/tso.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/unit.go b/pkg/utils/unit.go index a12dcb6c2..253d97eb6 100644 --- a/pkg/utils/unit.go +++ b/pkg/utils/unit.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils // unit of storage diff --git a/pkg/utils/unit_test.go b/pkg/utils/unit_test.go index 5b3c00530..6cf89e316 100644 --- a/pkg/utils/unit_test.go +++ b/pkg/utils/unit_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/utils_test.go b/pkg/utils/utils_test.go index f82e28c69..ff8affa7c 100644 --- a/pkg/utils/utils_test.go +++ b/pkg/utils/utils_test.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/version.go b/pkg/utils/version.go index 13a3c7a92..e3d46e301 100644 --- a/pkg/utils/version.go +++ b/pkg/utils/version.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( diff --git a/pkg/utils/worker.go b/pkg/utils/worker.go index a77bae090..2d800ddcd 100644 --- a/pkg/utils/worker.go +++ b/pkg/utils/worker.go @@ -1,3 +1,5 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + package utils import ( From 9caa6de14d1a153c54d93f342dcb99926fad48aa Mon Sep 17 00:00:00 2001 From: kennytm Date: Wed, 11 Mar 2020 15:54:20 +0800 Subject: [PATCH 19/46] conn: support not shutting down the storage when closing the connection (#185) Co-authored-by: 3pointer --- pkg/conn/conn.go | 21 +++++++++++++-------- pkg/glue/glue.go | 4 ++++ pkg/gluetidb/glue.go | 5 +++++ 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index c32546796..87d2e9580 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -57,6 +57,7 @@ type Mgr struct { mu sync.Mutex clis map[uint64]*grpc.ClientConn } + ownsStorage bool } type pdHTTPRequest func(context.Context, string, string, *http.Client, string, io.Reader) ([]byte, error) @@ -167,10 +168,11 @@ func NewMgr( } mgr := &Mgr{ - pdClient: pdClient, - storage: storage, - dom: dom, - tlsConf: tlsConf, + pdClient: pdClient, + storage: storage, + dom: dom, + tlsConf: tlsConf, + ownsStorage: g.OwnsStorage(), } mgr.pdHTTP.addrs = processedAddrs mgr.pdHTTP.cli = cli @@ -388,11 +390,14 @@ func (mgr *Mgr) Close() { // Gracefully shutdown domain so it does not affect other TiDB DDL. // Must close domain before closing storage, otherwise it gets stuck forever. - if mgr.dom != nil { - mgr.dom.Close() + if mgr.ownsStorage { + if mgr.dom != nil { + mgr.dom.Close() + } + + atomic.StoreUint32(&tikv.ShuttingDown, 1) + mgr.storage.Close() } - atomic.StoreUint32(&tikv.ShuttingDown, 1) - mgr.storage.Close() mgr.pdClient.Close() } diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go index a0cd74e64..5d5281335 100644 --- a/pkg/glue/glue.go +++ b/pkg/glue/glue.go @@ -17,6 +17,10 @@ type Glue interface { BootstrapSession(store kv.Storage) (*domain.Domain, error) CreateSession(store kv.Storage) (Session, error) Open(path string, option pd.SecurityOption) (kv.Storage, error) + + // OwnsStorage returns whether the storage returned by Open() is owned + // If this method returns false, the connection manager will never close the storage. + OwnsStorage() bool } // Session is an abstraction of the session.Session interface. diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go index 94efb1e0c..da14459dd 100644 --- a/pkg/gluetidb/glue.go +++ b/pkg/gluetidb/glue.go @@ -52,6 +52,11 @@ func (Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { return tikv.Driver{}.Open(path) } +// OwnsStorage implements glue.Glue +func (Glue) OwnsStorage() bool { + return true +} + // Execute implements glue.Session func (gs *tidbSession) Execute(ctx context.Context, sql string) error { _, err := gs.se.Execute(ctx, sql) From 7d59284f4d17fd68628149901316fa64a400cab5 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Wed, 11 Mar 2020 16:09:39 +0800 Subject: [PATCH 20/46] conn: use GetDomain to avoid some TiDB breaking changes (#186) * conn: use GetDomain to avoid some TiDB breaking changes Signed-off-by: Neil Shen * minor usability improvement Signed-off-by: Neil Shen Co-authored-by: kennytm --- cmd/backup.go | 2 +- cmd/restore.go | 2 +- pkg/conn/conn.go | 2 +- pkg/glue/glue.go | 2 +- pkg/gluetidb/glue.go | 6 +++--- pkg/utils/progress.go | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/backup.go b/cmd/backup.go index 83c2348e0..844003a6d 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -32,7 +32,7 @@ func runBackupRawCommand(command *cobra.Command, cmdName string) error { func NewBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "backup", - Short: "backup a TiDB cluster", + Short: "backup a TiDB/TiKV cluster", SilenceUsage: false, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { diff --git a/cmd/restore.go b/cmd/restore.go index 948ffdc3c..0b2792a25 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -23,7 +23,7 @@ func runRestoreCommand(command *cobra.Command, cmdName string) error { func NewRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "restore", - Short: "restore a TiKV cluster from a backup", + Short: "restore a TiDB/TiKV cluster", SilenceUsage: false, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 87d2e9580..4e38a0499 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -162,7 +162,7 @@ func NewMgr( return nil, errors.Errorf("tikv cluster not health %+v", stores) } - dom, err := g.BootstrapSession(storage) + dom, err := g.GetDomain(storage) if err != nil { return nil, errors.Trace(err) } diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go index 5d5281335..e2016732d 100644 --- a/pkg/glue/glue.go +++ b/pkg/glue/glue.go @@ -14,7 +14,7 @@ import ( // Glue is an abstraction of TiDB function calls used in BR. type Glue interface { - BootstrapSession(store kv.Storage) (*domain.Domain, error) + GetDomain(store kv.Storage) (*domain.Domain, error) CreateSession(store kv.Storage) (Session, error) Open(path string, option pd.SecurityOption) (kv.Storage, error) diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go index da14459dd..aa96c145f 100644 --- a/pkg/gluetidb/glue.go +++ b/pkg/gluetidb/glue.go @@ -26,9 +26,9 @@ type tidbSession struct { se session.Session } -// BootstrapSession implements glue.Glue -func (Glue) BootstrapSession(store kv.Storage) (*domain.Domain, error) { - return session.BootstrapSession(store) +// GetDomain implements glue.Glue +func (Glue) GetDomain(store kv.Storage) (*domain.Domain, error) { + return session.GetDomain(store) } // CreateSession implements glue.Glue diff --git a/pkg/utils/progress.go b/pkg/utils/progress.go index 3ed147f0a..da6b20364 100644 --- a/pkg/utils/progress.go +++ b/pkg/utils/progress.go @@ -57,7 +57,7 @@ func (pp *ProgressPrinter) goPrintProgress( bar.Set(pb.Color, true) bar.SetWriter(&wrappedWriter{name: pp.name}) } else { - tmpl := `{{string . "barName" | red}} {{ bar . "<" "-" (cycle . "-" "\\" "|" "/" ) "." ">"}} {{percent .}}` + tmpl := `{{string . "barName" | green}} {{ bar . "<" "-" (cycle . "-" "\\" "|" "/" ) "." ">"}} {{percent .}}` bar = pb.ProgressBarTemplate(tmpl).Start64(pp.total) bar.Set("barName", pp.name) } From 0e2549699ac515610cd785ba70c9a6585b97949d Mon Sep 17 00:00:00 2001 From: 5kbpers <20279863+5kbpers@users.noreply.github.com> Date: Wed, 11 Mar 2020 19:39:44 +0800 Subject: [PATCH 21/46] fix check safepoint & unhide experimental features (#175) * backup: check safepoint for last backup ts Signed-off-by: 5kbpers * check lastbackupts > 0 Signed-off-by: 5kbpers * unhide experimental features Signed-off-by: 5kbpers * address comment Signed-off-by: 5kbpers * Update tests/br_z_gc_safepoint/run.sh Co-Authored-By: kennytm Co-authored-by: kennytm --- cmd/backup.go | 3 ++- pkg/backup/client.go | 11 +++++++++-- pkg/storage/gcs.go | 27 +++++---------------------- pkg/storage/s3.go | 21 ++++++++------------- pkg/task/backup.go | 8 ++++++-- pkg/task/restore.go | 5 ++--- tests/br_z_gc_safepoint/run.sh | 22 +++++++++++++++++++++- 7 files changed, 53 insertions(+), 44 deletions(-) diff --git a/cmd/backup.go b/cmd/backup.go index 844003a6d..b856dae38 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -102,9 +102,10 @@ func newTableBackupCommand() *cobra.Command { // newRawBackupCommand return a raw kv range backup subcommand. func newRawBackupCommand() *cobra.Command { + // TODO: remove experimental tag if it's stable command := &cobra.Command{ Use: "raw", - Short: "backup a raw kv range from TiKV cluster", + Short: "(experimental) backup a raw kv range from TiKV cluster", RunE: func(command *cobra.Command, _ []string) error { return runBackupRawCommand(command, "Raw backup") }, diff --git a/pkg/backup/client.go b/pkg/backup/client.go index aa16d072f..07a8fb5f1 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -323,8 +323,8 @@ func (bc *Client) BackupRanges( close(errCh) }() - // Check GC safepoint every 30s. - t := time.NewTicker(time.Second * 30) + // Check GC safepoint every 5s. + t := time.NewTicker(time.Second * 5) defer t.Stop() finished := false @@ -334,6 +334,13 @@ func (bc *Client) BackupRanges( log.Error("check GC safepoint failed", zap.Error(err)) return err } + if req.StartVersion > 0 { + err = CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), req.StartVersion) + if err != nil { + log.Error("Check gc safepoint for last backup ts failed", zap.Error(err)) + return err + } + } if finished { // Return error (if there is any) before finishing backup. return err diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index 7e105929f..4af3ea059 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -48,28 +48,11 @@ func (options *GCSBackendOptions) apply(gcs *backup.GCS) error { } func defineGCSFlags(flags *pflag.FlagSet) { - flags.String(gcsEndpointOption, "", "Set the GCS endpoint URL") - flags.String(gcsStorageClassOption, "", - `Specify the GCS storage class for objects. -If it is not set, objects uploaded are -followed by the default storage class of the bucket. -See https://cloud.google.com/storage/docs/storage-classes -for valid values.`) - flags.String(gcsPredefinedACL, "", - `Specify the GCS predefined acl for objects. -If it is not set, objects uploaded are -followed by the acl of bucket scope. -See https://cloud.google.com/storage/docs/access-control/lists#predefined-acl -for valid values.`) - flags.String(gcsCredentialsFile, "", - `Set the GCS credentials file path. -You can get one from -https://console.cloud.google.com/apis/credentials.`) - - _ = flags.MarkHidden(gcsEndpointOption) - _ = flags.MarkHidden(gcsStorageClassOption) - _ = flags.MarkHidden(gcsPredefinedACL) - _ = flags.MarkHidden(gcsCredentialsFile) + // TODO: remove experimental tag if it's stable + flags.String(gcsEndpointOption, "", "(experimental) Set the GCS endpoint URL") + flags.String(gcsStorageClassOption, "", "(experimental) Specify the GCS storage class for objects") + flags.String(gcsPredefinedACL, "", "(experimental) Specify the GCS predefined acl for objects") + flags.String(gcsCredentialsFile, "", "(experimental) Set the GCS credentials file path") } func (options *GCSBackendOptions) parseFromFlags(flags *pflag.FlagSet) error { diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 4758cad6d..bf24b9a2b 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -104,19 +104,14 @@ func (options *S3BackendOptions) apply(s3 *backup.S3) error { } func defineS3Flags(flags *pflag.FlagSet) { - flags.String(s3EndpointOption, "", "Set the S3 endpoint URL, please specify the http or https scheme explicitly") - flags.String(s3RegionOption, "", "Set the S3 region, e.g. us-east-1") - flags.String(s3StorageClassOption, "", "Set the S3 storage class, e.g. STANDARD") - flags.String(s3SSEOption, "", "Set the S3 server-side encryption algorithm, e.g. AES256") - flags.String(s3ACLOption, "", "Set the S3 canned ACLs, e.g. authenticated-read") - flags.String(s3ProviderOption, "", "Set the S3 provider, e.g. aws, alibaba, ceph") - - _ = flags.MarkHidden(s3EndpointOption) - _ = flags.MarkHidden(s3RegionOption) - _ = flags.MarkHidden(s3StorageClassOption) - _ = flags.MarkHidden(s3SSEOption) - _ = flags.MarkHidden(s3ACLOption) - _ = flags.MarkHidden(s3ProviderOption) + // TODO: remove experimental tag if it's stable + flags.String(s3EndpointOption, "", + "(experimental) Set the S3 endpoint URL, please specify the http or https scheme explicitly") + flags.String(s3RegionOption, "", "(experimental) Set the S3 region, e.g. us-east-1") + flags.String(s3StorageClassOption, "", "(experimental) Set the S3 storage class, e.g. STANDARD") + flags.String(s3SSEOption, "", "(experimental) Set the S3 server-side encryption algorithm, e.g. AES256") + flags.String(s3ACLOption, "", "(experimental) Set the S3 canned ACLs, e.g. authenticated-read") + flags.String(s3ProviderOption, "", "(experimental) Set the S3 provider, e.g. aws, alibaba, ceph") } func (options *S3BackendOptions) parseFromFlags(flags *pflag.FlagSet) error { diff --git a/pkg/task/backup.go b/pkg/task/backup.go index b4ece838d..bee2102f5 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -42,8 +42,8 @@ func DefineBackupFlags(flags *pflag.FlagSet) { flagBackupTimeago, 0, "The history version of the backup task, e.g. 1m, 1h. Do not exceed GCSafePoint") - flags.Uint64(flagLastBackupTS, 0, "the last time backup ts") - _ = flags.MarkHidden(flagLastBackupTS) + // TODO: remove experimental tag if it's stable + flags.Uint64(flagLastBackupTS, 0, "(experimental) the last time backup ts") } // ParseFromFlags parses the backup-related flags from the flag set. @@ -111,6 +111,10 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig ddlJobs := make([]*model.Job, 0) if cfg.LastBackupTS > 0 { + if backupTS < cfg.LastBackupTS { + log.Error("LastBackupTS is larger than current TS") + return errors.New("LastBackupTS is larger than current TS") + } err = backup.CheckGCSafepoint(ctx, mgr.GetPDClient(), cfg.LastBackupTS) if err != nil { log.Error("Check gc safepoint for last backup ts failed", zap.Error(err)) diff --git a/pkg/task/restore.go b/pkg/task/restore.go index f8333d7ff..c759fe8d5 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -48,9 +48,8 @@ type RestoreConfig struct { // DefineRestoreFlags defines common flags for the restore command. func DefineRestoreFlags(flags *pflag.FlagSet) { - flags.Bool("online", false, "Whether online when restore") - // TODO remove hidden flag if it's stable - _ = flags.MarkHidden("online") + // TODO remove experimental tag if it's stable + flags.Bool("online", false, "(experimental) Whether online when restore") } // ParseFromFlags parses the restore-related flags from the flag set. diff --git a/tests/br_z_gc_safepoint/run.sh b/tests/br_z_gc_safepoint/run.sh index 916ca1fa8..a76e97501 100755 --- a/tests/br_z_gc_safepoint/run.sh +++ b/tests/br_z_gc_safepoint/run.sh @@ -23,6 +23,8 @@ set -eu DB="$TEST_NAME" TABLE="usertable" +MAX_UINT64=9223372036854775807 + run_sql "CREATE DATABASE $DB;" go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB @@ -39,7 +41,25 @@ echo "backup start (expect fail)..." run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 if [ "$backup_gc_fail" -ne "1" ];then - echo "TEST: [$TEST_NAME] failed!" + echo "TEST: [$TEST_NAME] test check backup ts failed!" + exit 1 +fi + +backup_gc_fail=0 +echo "incremental backup start (expect fail)..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --lastbackupts 1 --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 + +if [ "$backup_gc_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] test check last backup ts failed!" + exit 1 +fi + +backup_gc_fail=0 +echo "incremental backup with max_uint64 start (expect fail)..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --lastbackupts $MAX_UINT64 --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 + +if [ "$backup_gc_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] test check max backup ts failed!" exit 1 fi From 3419d8a9c67c1dc44473820992f074d876c6d51e Mon Sep 17 00:00:00 2001 From: 3pointer Date: Thu, 12 Mar 2020 15:04:16 +0800 Subject: [PATCH 22/46] support backupts (#172) * support backupts * address comment * address comment * fix space --- pkg/backup/client.go | 44 +++++++++++++++++++++--------------- pkg/backup/client_test.go | 17 +++++++++----- pkg/task/backup.go | 47 +++++++++++++++++++++++++++++++++++++-- pkg/task/backup_test.go | 36 ++++++++++++++++++++++++++++++ pkg/task/common.go | 2 +- 5 files changed, 120 insertions(+), 26 deletions(-) create mode 100644 pkg/task/backup_test.go diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 07a8fb5f1..fdc51157e 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -73,25 +73,33 @@ func NewBackupClient(ctx context.Context, mgr ClientMgr) (*Client, error) { } // GetTS returns the latest timestamp. -func (bc *Client) GetTS(ctx context.Context, duration time.Duration) (uint64, error) { - p, l, err := bc.mgr.GetPDClient().GetTS(ctx) - if err != nil { - return 0, errors.Trace(err) - } - backupTS := oracle.ComposeTS(p, l) - - switch { - case duration < 0: - return 0, errors.New("negative timeago is not allowed") - case duration > 0: - log.Info("backup time ago", zap.Duration("timeago", duration)) - - backupTime := oracle.GetTimeFromTS(backupTS) - backupAgo := backupTime.Add(-duration) - if backupTS < oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) { - return 0, errors.New("backup ts overflow please choose a smaller timeago") +func (bc *Client) GetTS(ctx context.Context, duration time.Duration, ts uint64) (uint64, error) { + var ( + backupTS uint64 + err error + ) + if ts > 0 { + backupTS = ts + } else { + p, l, err := bc.mgr.GetPDClient().GetTS(ctx) + if err != nil { + return 0, errors.Trace(err) + } + backupTS = oracle.ComposeTS(p, l) + + switch { + case duration < 0: + return 0, errors.New("negative timeago is not allowed") + case duration > 0: + log.Info("backup time ago", zap.Duration("timeago", duration)) + + backupTime := oracle.GetTimeFromTS(backupTS) + backupAgo := backupTime.Add(-duration) + if backupTS < oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) { + return 0, errors.New("backup ts overflow please choose a smaller timeago") + } + backupTS = oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) } - backupTS = oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) } // check backup time do not exceed GCSafePoint diff --git a/pkg/backup/client_test.go b/pkg/backup/client_test.go index bf2700cd4..63f3d5d5f 100644 --- a/pkg/backup/client_test.go +++ b/pkg/backup/client_test.go @@ -55,7 +55,7 @@ func (r *testBackup) TestGetTS(c *C) { // timeago not work expectedDuration := 0 currentTs := time.Now().UnixNano() / int64(time.Millisecond) - ts, err := r.backupClient.GetTS(r.ctx, 0) + ts, err := r.backupClient.GetTS(r.ctx, 0, 0) c.Assert(err, IsNil) pdTs := oracle.ExtractPhysical(ts) duration := int(currentTs - pdTs) @@ -65,7 +65,7 @@ func (r *testBackup) TestGetTS(c *C) { // timeago = "1.5m" expectedDuration = 90000 currentTs = time.Now().UnixNano() / int64(time.Millisecond) - ts, err = r.backupClient.GetTS(r.ctx, 90*time.Second) + ts, err = r.backupClient.GetTS(r.ctx, 90*time.Second, 0) c.Assert(err, IsNil) pdTs = oracle.ExtractPhysical(ts) duration = int(currentTs - pdTs) @@ -73,11 +73,11 @@ func (r *testBackup) TestGetTS(c *C) { c.Assert(duration, Less, expectedDuration+deviation) // timeago = "-1m" - _, err = r.backupClient.GetTS(r.ctx, -time.Minute) + _, err = r.backupClient.GetTS(r.ctx, -time.Minute, 0) c.Assert(err, ErrorMatches, "negative timeago is not allowed") // timeago = "1000000h" overflows - _, err = r.backupClient.GetTS(r.ctx, 1000000*time.Hour) + _, err = r.backupClient.GetTS(r.ctx, 1000000*time.Hour, 0) c.Assert(err, ErrorMatches, "backup ts overflow.*") // timeago = "10h" exceed GCSafePoint @@ -86,8 +86,15 @@ func (r *testBackup) TestGetTS(c *C) { now := oracle.ComposeTS(p, l) _, err = r.backupClient.mgr.GetPDClient().UpdateGCSafePoint(r.ctx, now) c.Assert(err, IsNil) - _, err = r.backupClient.GetTS(r.ctx, 10*time.Hour) + _, err = r.backupClient.GetTS(r.ctx, 10*time.Hour, 0) c.Assert(err, ErrorMatches, "GC safepoint [0-9]+ exceed TS [0-9]+") + + // timeago and backupts both exists, use backupts + backupts := oracle.ComposeTS(p+10, l) + ts, err = r.backupClient.GetTS(r.ctx, time.Minute, backupts) + c.Assert(err, IsNil) + c.Assert(ts, Equals, backupts) + } func (r *testBackup) TestBuildTableRange(c *C) { diff --git a/pkg/task/backup.go b/pkg/task/backup.go index bee2102f5..ab22c9039 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -4,13 +4,18 @@ package task import ( "context" + "strconv" "time" "github.com/pingcap/errors" kvproto "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/log" "github.com/pingcap/parser/model" + "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/types" "github.com/spf13/pflag" "go.uber.org/zap" @@ -23,6 +28,7 @@ import ( const ( flagBackupTimeago = "timeago" + flagBackupTS = "backupts" flagLastBackupTS = "lastbackupts" defaultBackupConcurrency = 4 @@ -33,6 +39,7 @@ type BackupConfig struct { Config TimeAgo time.Duration `json:"time-ago" toml:"time-ago"` + BackupTS uint64 `json:"backup-ts" toml:"backup-ts"` LastBackupTS uint64 `json:"last-backup-ts" toml:"last-backup-ts"` } @@ -43,7 +50,10 @@ func DefineBackupFlags(flags *pflag.FlagSet) { "The history version of the backup task, e.g. 1m, 1h. Do not exceed GCSafePoint") // TODO: remove experimental tag if it's stable - flags.Uint64(flagLastBackupTS, 0, "(experimental) the last time backup ts") + flags.Uint64(flagLastBackupTS, 0, "(experimental) the last time backup ts,"+ + " use for incremental backup, support TSO only") + flags.String(flagBackupTS, "", "the backup ts support TSO or datetime,"+ + " e.g. '400036290571534337', '2018-05-11 01:42:23'") } // ParseFromFlags parses the backup-related flags from the flag set. @@ -60,6 +70,15 @@ func (cfg *BackupConfig) ParseFromFlags(flags *pflag.FlagSet) error { if err != nil { return errors.Trace(err) } + backupTS, err := flags.GetString(flagBackupTS) + if err != nil { + return errors.Trace(err) + } + cfg.BackupTS, err = parseTSString(backupTS) + if err != nil { + return errors.Trace(err) + } + if err = cfg.Config.ParseFromFlags(flags); err != nil { return errors.Trace(err) } @@ -96,7 +115,7 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig return err } - backupTS, err := client.GetTS(ctx, cfg.TimeAgo) + backupTS, err := client.GetTS(ctx, cfg.TimeAgo, cfg.BackupTS) if err != nil { return err } @@ -198,3 +217,27 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig } return nil } + +// parseTSString port from tidb setSnapshotTS +func parseTSString(ts string) (uint64, error) { + if len(ts) == 0 { + return 0, nil + } + if tso, err := strconv.ParseUint(ts, 10, 64); err == nil { + return tso, nil + } + + loc := time.Local + sc := &stmtctx.StatementContext{ + TimeZone: loc, + } + t, err := types.ParseTime(sc, ts, mysql.TypeTimestamp, types.MaxFsp) + if err != nil { + return 0, errors.Trace(err) + } + t1, err := t.GoTime(loc) + if err != nil { + return 0, errors.Trace(err) + } + return variable.GoTimeToTS(t1), nil +} diff --git a/pkg/task/backup_test.go b/pkg/task/backup_test.go new file mode 100644 index 000000000..6bd60515b --- /dev/null +++ b/pkg/task/backup_test.go @@ -0,0 +1,36 @@ +package task + +import ( + "testing" + "time" + + . "github.com/pingcap/check" +) + +var _ = Suite(&testBackupSuite{}) + +func TestT(t *testing.T) { + TestingT(t) +} + +type testBackupSuite struct{} + +func (s *testBackupSuite) TestParseTSString(c *C) { + var ( + ts uint64 + err error + ) + + ts, err = parseTSString("") + c.Assert(err, IsNil) + c.Assert(int(ts), Equals, 0) + + ts, err = parseTSString("400036290571534337") + c.Assert(err, IsNil) + c.Assert(int(ts), Equals, 400036290571534337) + + _, offset := time.Now().Local().Zone() + ts, err = parseTSString("2018-05-11 01:42:23") + c.Assert(err, IsNil) + c.Assert(int(ts), Equals, 400032515489792000-(offset*1000)<<18) +} diff --git a/pkg/task/common.go b/pkg/task/common.go index 57134c60a..94e3d8c87 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -95,7 +95,7 @@ type Config struct { // DefineCommonFlags defines the flags common to all BRIE commands. func DefineCommonFlags(flags *pflag.FlagSet) { flags.BoolP(flagSendCreds, "c", true, "Whether send credentials to tikv") - flags.StringP(flagStorage, "s", "", `specify the url where backup storage, eg, "local:///path/to/save"`) + flags.StringP(flagStorage, "s", "", `specify the url where backup storage, eg, "s3:///path/to/save"`) flags.StringSliceP(flagPD, "u", []string{"127.0.0.1:2379"}, "PD address") flags.String(flagCA, "", "CA certificate path for TLS connection") flags.String(flagCert, "", "Certificate path for TLS connection") From e476c8251b2545152ddb01189c899d6e1dc7d76e Mon Sep 17 00:00:00 2001 From: kennytm Date: Thu, 12 Mar 2020 19:16:18 +0800 Subject: [PATCH 23/46] *: update pd deps to v4 (#184) Co-authored-by: 3pointer --- cmd/validate.go | 2 +- go.mod | 13 +-- go.sum | 168 +++++++++++++++++++++++++++------- pkg/backup/client.go | 2 +- pkg/backup/safe_point.go | 2 +- pkg/backup/safe_point_test.go | 2 +- pkg/conn/conn.go | 2 +- pkg/conn/conn_test.go | 4 +- pkg/glue/glue.go | 2 +- pkg/gluetidb/glue.go | 2 +- pkg/mock/mock_cluster.go | 4 +- pkg/restore/client.go | 2 +- pkg/restore/import.go | 2 +- pkg/restore/split_client.go | 4 +- pkg/restore/split_test.go | 4 +- pkg/task/common.go | 2 +- tests/br_key_locked/codec.go | 2 +- tests/br_key_locked/locker.go | 2 +- tests/br_z_gc_safepoint/gc.go | 2 +- 19 files changed, 161 insertions(+), 62 deletions(-) diff --git a/cmd/validate.go b/cmd/validate.go index d2ec1c6ec..baf30200f 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -16,7 +16,7 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - "github.com/pingcap/pd/pkg/mock/mockid" + "github.com/pingcap/pd/v4/pkg/mock/mockid" "github.com/spf13/cobra" "go.uber.org/zap" diff --git a/go.mod b/go.mod index 0cd8e7a99..50b81eed8 100644 --- a/go.mod +++ b/go.mod @@ -20,12 +20,12 @@ require ( github.com/onsi/gomega v1.8.1 // indirect github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 - github.com/pingcap/kvproto v0.0.0-20200221125103-35b65c96516e + github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5 github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd - github.com/pingcap/parser v0.0.0-20200301092054-bfc519c0a57f - github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497 - github.com/pingcap/tidb v1.1.0-beta.0.20200305113516-ac15dd336c93 - github.com/pingcap/tidb-tools v4.0.0-beta+incompatible + github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84 + github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 + github.com/pingcap/tidb v1.1.0-beta.0.20200310133602-7c39e5e5e0bc + github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 github.com/prometheus/client_golang v1.0.0 github.com/prometheus/common v0.4.1 @@ -37,10 +37,7 @@ require ( go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 go.opencensus.io v0.22.2 // indirect go.uber.org/zap v1.14.0 - golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/tools v0.0.0-20200226224502-204d844ad48d // indirect google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 - gopkg.in/yaml.v2 v2.2.7 // indirect ) diff --git a/go.sum b/go.sum index 65a64ffea..8c5ae9cff 100644 --- a/go.sum +++ b/go.sum @@ -20,17 +20,25 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/appleboy/gin-jwt/v2 v2.6.3/go.mod h1:MfPYA4ogzvOcVkRwAxT7quHOtQmVKDpTwxyUrC2DNw0= +github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.26.1 h1:JGQggXhOiNJIqsmbYUl3cYtJZUffeOWlHtxfzGK7WPI= github.com/aws/aws-sdk-go v1.26.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -68,6 +76,7 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= @@ -80,6 +89,7 @@ github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgraph-io/ristretto v0.0.1 h1:cJwdnj42uV8Jg4+KLrYovLiCgIfz9wtWm6E6KA+1tLs= github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= @@ -98,6 +108,7 @@ github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3C github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= @@ -108,13 +119,37 @@ github.com/fsouza/fake-gcs-server v1.15.0 h1:ss/ztlt10Y64A5qslmxZKsiqW/i28t5DkRt github.com/fsouza/fake-gcs-server v1.15.0/go.mod h1:HNxAJ/+FY/XSsxuwz8iIYdp2GtMmPbJ8WQjjGMxd6Qk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w= +github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w= -github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -123,6 +158,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -167,12 +203,6 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -187,6 +217,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1 github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69/go.mod h1:YLEMZOtU+AZ7dhN9T/IpGhXVGly2bvkJQ+zxj3WeVQo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -198,13 +229,21 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jeremywohl/flatten v0.0.0-20190921043622-d936035e55cf h1:Ut4tTtPNmInWiEWJRernsWm688R0RN6PFO8sZhwI0sk= github.com/jeremywohl/flatten v0.0.0-20190921043622-d936035e55cf/go.mod h1:4AmD/VxjWcI5SRB0n6szE2A6s2fsNHDLO0nAlMHgfLQ= +github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joomcode/errorx v1.0.1/go.mod h1:kgco15ekB6cs+4Xjzo7SPeXzx38PbJzBwbnu9qfVNHQ= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= @@ -226,17 +265,26 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -245,6 +293,7 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -286,10 +335,12 @@ github.com/pelletier/go-toml v1.3.0 h1:e5+lF2E4Y2WCIxBefVowBuB0iHrUH4HZ8q+6mGF7f github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= +github.com/pingcap-incubator/tidb-dashboard v0.0.0-20200302022638-35a6e979dca9/go.mod h1:YUceA4BHY/MTtp63yZLTYP22waFSwMNo9lXq2FDtzVw= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4 h1:iRtOAQ6FXkY/BGvst3CDfTva4nTqh6CL8WXvanLdbu0= github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= +github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 h1:R8gStypOBmpnHEx1qi//SaqxJVI4inOqljg/Aj5/390= github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= @@ -308,33 +359,35 @@ github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d/go.mod h1:fMRU1BA1y+r89 github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20191213111810-93cb7c623c8b/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20200221125103-35b65c96516e h1:z7j9uyuG/6I4god5h5NbsbMDSfhoOYAvVW6JxhwdHHw= -github.com/pingcap/kvproto v0.0.0-20200221125103-35b65c96516e/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200214064158-62d31900d88e/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200221034943-a2aa1d1e20a8/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5 h1:knEvP4R5v5b2T107/Q6VzB0C8/6T7NXB/V7Vl1FtQsg= +github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20200301092054-bfc519c0a57f h1:SfzX0ZDTyXgzLExMsJ385DTMIaX7CeBQMCGQKdQYO7o= -github.com/pingcap/parser v0.0.0-20200301092054-bfc519c0a57f/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= -github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497 h1:FzLErYtcXnSxtC469OuVDlgBbh0trJZzNxw0mNKzyls= -github.com/pingcap/pd v1.1.0-beta.0.20200106144140-f5a7aa985497/go.mod h1:cfT/xu4Zz+Tkq95QrLgEBZ9ikRcgzy4alHqqoaTftqI= -github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b h1:EEyo/SCRswLGuSk+7SB86Ak1p8bS6HL1Mi4Dhyuv6zg= -github.com/pingcap/sysutil v0.0.0-20191216090214-5f9620d22b3b/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84 h1:u5FOwUw9muF8mBTZVV1dQhoAKiEo2Ci54CxN9XchEEY= +github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 h1:Yrp99FnjHAEuDrSBql2l0IqCtJX7KwJbTsD5hIArkvk= +github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3/go.mod h1:25GfNw6+Jcr9kca5rtmTb4gKCJ4jOpow2zV2S9Dgafs= +github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1 h1:YUnUZ914SHFMsOSe/xgH5DKK/thtRma8X8hcszRo3CA= github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= -github.com/pingcap/tidb v1.1.0-beta.0.20200305113516-ac15dd336c93 h1:UT35i5wbPOj99EMzf/pkqrq2asguXmZD5Q1P6mnRb9U= -github.com/pingcap/tidb v1.1.0-beta.0.20200305113516-ac15dd336c93/go.mod h1:fgAq363ZYMeSvCc1jzOEHeG9001fRjoFQiBIRdXdPKo= -github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible h1:H1jg0aDWz2SLRh3hNBo2HFtnuHtudIUvBumU7syRkic= -github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tidb-tools v4.0.0-beta+incompatible h1:+XJdcVLCM8GDgXiMS6lFV59N3XPVOqtNHeWNLVrb2pg= -github.com/pingcap/tidb-tools v4.0.0-beta+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= +github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb h1:bDbgLaNTRNK6Qw7KjvEqqfCQstY8WMEcXyXTU7yzYKg= +github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= +github.com/pingcap/tidb v1.1.0-beta.0.20200310133602-7c39e5e5e0bc h1:1aW3qTRJZjnosvXt1b75KL73b28XRJWBx6jtTtHsybg= +github.com/pingcap/tidb v1.1.0-beta.0.20200310133602-7c39e5e5e0bc/go.mod h1:WTmfs5zrUGMPw3Enn5FI3buzkU8BDuJ6BhsO/JC239U= +github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible h1:84F7MFMfdAYObrznvRslmVu43aoihrlL+7mMyMlOi0o= +github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 h1:aJPXrT1u4VfUSGFA2oQVwl4pOXzqe+YI6wed01cjDH4= github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -355,7 +408,10 @@ github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qq github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.19.10+incompatible h1:lA4Pi29JEVIQIgATSeftHSY0rMGI9CLrl2ZvDLiahto= github.com/shirou/gopsutil v2.19.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -363,6 +419,7 @@ github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+D github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca h1:3fECS8atRjByijiI8yYiuwLwQ2ZxXobW7ua/8GRB3pI= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= @@ -390,17 +447,27 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E= +github.com/swaggo/gin-swagger v1.2.0/go.mod h1:qlH2+W7zXGZkczuL+r2nEBR2JTT+/lX05Nn6vPhc7OI= +github.com/swaggo/http-swagger v0.0.0-20200103000832-0e9263c4b516/go.mod h1:O1lAbCgAAX/KZ80LM/OXwtWFI/5TvZlwxSg8Cq08PV0= +github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= +github.com/swaggo/swag v1.6.3/go.mod h1:wcc83tB4Mb2aNiL/HP4MFeQdpHUrca+Rp/DRNgWAUio= +github.com/swaggo/swag v1.6.5/go.mod h1:Y7ZLSS0d0DdxhWGVhQdu+Bu1QhaF5k0RD7FKdiAykeY= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 h1:uSDYjYejelKyceA6DiCsngFof9jAyeaSyX9XC5a1a7Q= github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= +github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 h1:lYIiVDtZnyTWlNwiAxLj0bbpTcx1BWCFhXjfsvmPdNc= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -412,11 +479,16 @@ github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1 github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo= github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= -github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= -github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v0.3.0 h1:PaXOb61mWeZJxc1Ji2xJjpVg9QfPo0rrB+lHyBxGNSU= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= @@ -426,6 +498,7 @@ github.com/yookoala/realpath v1.0.0 h1:7OA9pj4FZd+oZDsyvXWQvjn5oBdcHRTV44PpdMSuI github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc= go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -456,14 +529,18 @@ go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.12.0 h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.0 h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -484,8 +561,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -493,6 +570,8 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -506,11 +585,15 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191011234655-491137f69257 h1:ry8e2D+cwaV6hk7lb3aRTjjZo24shrbK0e11QEOkTIg= -golang.org/x/net v0.0.0-20191011234655-491137f69257/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -521,12 +604,15 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -535,11 +621,14 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -560,7 +649,10 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -574,14 +666,18 @@ golang.org/x/tools v0.0.0-20191107010934-f79515f33823 h1:akkRBeitX2EZP59KdtKw310 golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200226224502-204d844ad48d h1:loGv/4fxITSrCD4t2P8ZF4oUC4RlRFDAsczcoUS2g6c= -golang.org/x/tools v0.0.0-20200226224502-204d844ad48d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200301222351-066e0c02454c h1:FD7jysxM+EJqg5UYYy3XYDsAiUickFsn4UiaanJkf8c= +golang.org/x/tools v0.0.0-20200301222351-066e0c02454c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb h1:iKlO7ROJc6SttHKlxzwGytRtBUqX4VARrNTgP2YLX5M= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -629,6 +725,10 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/go-playground/validator.v9 v9.31.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -643,6 +743,8 @@ gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/backup/client.go b/pkg/backup/client.go index fdc51157e..15be96bf4 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -17,7 +17,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" diff --git a/pkg/backup/safe_point.go b/pkg/backup/safe_point.go index aa2e812b2..d4d431ded 100644 --- a/pkg/backup/safe_point.go +++ b/pkg/backup/safe_point.go @@ -7,7 +7,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "go.uber.org/zap" ) diff --git a/pkg/backup/safe_point_test.go b/pkg/backup/safe_point_test.go index 16f40db26..cdc071686 100644 --- a/pkg/backup/safe_point_test.go +++ b/pkg/backup/safe_point_test.go @@ -7,7 +7,7 @@ import ( "sync" . "github.com/pingcap/check" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/br/pkg/mock" diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 4e38a0499..66c68dc49 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -21,7 +21,7 @@ import ( "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/codec" diff --git a/pkg/conn/conn_test.go b/pkg/conn/conn_test.go index c120697dd..9cbb963e7 100644 --- a/pkg/conn/conn_test.go +++ b/pkg/conn/conn_test.go @@ -15,8 +15,8 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/pd/server/core" - "github.com/pingcap/pd/server/statistics" + "github.com/pingcap/pd/v4/server/core" + "github.com/pingcap/pd/v4/server/statistics" "github.com/pingcap/tidb/util/codec" ) diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go index e2016732d..f2f3ff55e 100644 --- a/pkg/glue/glue.go +++ b/pkg/glue/glue.go @@ -6,7 +6,7 @@ import ( "context" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go index aa96c145f..333053b97 100644 --- a/pkg/gluetidb/glue.go +++ b/pkg/gluetidb/glue.go @@ -7,7 +7,7 @@ import ( "context" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" diff --git a/pkg/mock/mock_cluster.go b/pkg/mock/mock_cluster.go index 6779b5bc6..f43702ed3 100644 --- a/pkg/mock/mock_cluster.go +++ b/pkg/mock/mock_cluster.go @@ -16,8 +16,8 @@ import ( "github.com/go-sql-driver/mysql" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" - "github.com/pingcap/pd/pkg/tempurl" + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/pd/v4/pkg/tempurl" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 38cf2d5a6..26873a22e 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -16,7 +16,7 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/tikv/oracle" diff --git a/pkg/restore/import.go b/pkg/restore/import.go index fb537584f..f98e0fc13 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -15,7 +15,7 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/log" - "github.com/pingcap/pd/pkg/codec" + "github.com/pingcap/pd/v4/pkg/codec" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" diff --git a/pkg/restore/split_client.go b/pkg/restore/split_client.go index 14cde92a3..d9b5f8677 100644 --- a/pkg/restore/split_client.go +++ b/pkg/restore/split_client.go @@ -20,8 +20,8 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/kvproto/pkg/tikvpb" - pd "github.com/pingcap/pd/client" - "github.com/pingcap/pd/server/schedule/placement" + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/pd/v4/server/schedule/placement" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go index 75b27f378..b21cbf781 100644 --- a/pkg/restore/split_test.go +++ b/pkg/restore/split_test.go @@ -12,8 +12,8 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/pd/server/core" - "github.com/pingcap/pd/server/schedule/placement" + "github.com/pingcap/pd/v4/server/core" + "github.com/pingcap/pd/v4/server/schedule/placement" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/br/pkg/rtree" diff --git a/pkg/task/common.go b/pkg/task/common.go index 94e3d8c87..80f5eb258 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -12,7 +12,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/store/tikv" "github.com/spf13/cobra" diff --git a/tests/br_key_locked/codec.go b/tests/br_key_locked/codec.go index cd02c35d7..39ff110e5 100644 --- a/tests/br_key_locked/codec.go +++ b/tests/br_key_locked/codec.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/util/codec" ) diff --git a/tests/br_key_locked/locker.go b/tests/br_key_locked/locker.go index 25f5b526f..9825faff7 100644 --- a/tests/br_key_locked/locker.go +++ b/tests/br_key_locked/locker.go @@ -33,7 +33,7 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/store/tikv/tikvrpc" diff --git a/tests/br_z_gc_safepoint/gc.go b/tests/br_z_gc_safepoint/gc.go index a18367259..d5a30361e 100644 --- a/tests/br_z_gc_safepoint/gc.go +++ b/tests/br_z_gc_safepoint/gc.go @@ -21,7 +21,7 @@ import ( "time" "github.com/pingcap/log" - pd "github.com/pingcap/pd/client" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/store/tikv/oracle" "go.uber.org/zap" ) From 9c2bf9da3ddcd4791dc3f96012e7f6811f16a50c Mon Sep 17 00:00:00 2001 From: disksing Date: Fri, 13 Mar 2020 11:53:48 +0800 Subject: [PATCH 24/46] restore: support online restore (#114) Signed-off-by: disksing --- pkg/restore/client.go | 174 +++++++++++++++++++++++++++++++- pkg/task/restore.go | 41 ++++++++ tests/br_db_online/run.sh | 54 ++++++++++ tests/br_db_online_newkv/run.sh | 77 ++++++++++++++ tests/config/restore-tikv.toml | 17 ++++ 5 files changed, 358 insertions(+), 5 deletions(-) create mode 100755 tests/br_db_online/run.sh create mode 100755 tests/br_db_online_newkv/run.sh create mode 100644 tests/config/restore-tikv.toml diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 26873a22e..93cc97567 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -5,21 +5,28 @@ package restore import ( "context" "crypto/tls" + "encoding/hex" "encoding/json" + "fmt" "math" "sort" + "strconv" "sync" "time" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/pd/v4/server/schedule/placement" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/backoff" @@ -42,6 +49,7 @@ type Client struct { cancel context.CancelFunc pdClient pd.Client + toolClient SplitClient fileImporter FileImporter workerPool *utils.WorkerPool tlsConf *tls.Config @@ -53,6 +61,8 @@ type Client struct { rateLimit uint64 isOnline bool hasSpeedLimited bool + + restoreStores []uint64 } // NewRestoreClient returns a new RestoreClient @@ -71,11 +81,12 @@ func NewRestoreClient( } return &Client{ - ctx: ctx, - cancel: cancel, - pdClient: pdClient, - db: db, - tlsConf: tlsConf, + ctx: ctx, + cancel: cancel, + pdClient: pdClient, + toolClient: NewSplitClient(pdClient, tlsConf), + db: db, + tlsConf: tlsConf, }, nil } @@ -457,6 +468,159 @@ func (rc *Client) ValidateChecksum( return nil } +const ( + restoreLabelKey = "exclusive" + restoreLabelValue = "restore" +) + +// LoadRestoreStores loads the stores used to restore data. +func (rc *Client) LoadRestoreStores(ctx context.Context) error { + if !rc.isOnline { + return nil + } + + stores, err := rc.pdClient.GetAllStores(ctx) + if err != nil { + return err + } + for _, s := range stores { + if s.GetState() != metapb.StoreState_Up { + continue + } + for _, l := range s.GetLabels() { + if l.GetKey() == restoreLabelKey && l.GetValue() == restoreLabelValue { + rc.restoreStores = append(rc.restoreStores, s.GetId()) + break + } + } + } + log.Info("load restore stores", zap.Uint64s("store-ids", rc.restoreStores)) + return nil +} + +// ResetRestoreLabels removes the exclusive labels of the restore stores. +func (rc *Client) ResetRestoreLabels(ctx context.Context) error { + if !rc.isOnline { + return nil + } + log.Info("start reseting store labels") + return rc.toolClient.SetStoresLabel(ctx, rc.restoreStores, restoreLabelKey, "") +} + +// SetupPlacementRules sets rules for the tables' regions. +func (rc *Client) SetupPlacementRules(ctx context.Context, tables []*model.TableInfo) error { + if !rc.isOnline || len(rc.restoreStores) == 0 { + return nil + } + log.Info("start setting placement rules") + rule, err := rc.toolClient.GetPlacementRule(ctx, "pd", "default") + if err != nil { + return err + } + rule.Index = 100 + rule.Override = true + rule.LabelConstraints = append(rule.LabelConstraints, placement.LabelConstraint{ + Key: restoreLabelKey, + Op: "in", + Values: []string{restoreLabelValue}, + }) + for _, t := range tables { + rule.ID = rc.getRuleID(t.ID) + rule.StartKeyHex = hex.EncodeToString(codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID))) + rule.EndKeyHex = hex.EncodeToString(codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID+1))) + err = rc.toolClient.SetPlacementRule(ctx, rule) + if err != nil { + return err + } + } + log.Info("finish setting placement rules") + return nil +} + +// WaitPlacementSchedule waits PD to move tables to restore stores. +func (rc *Client) WaitPlacementSchedule(ctx context.Context, tables []*model.TableInfo) error { + if !rc.isOnline || len(rc.restoreStores) == 0 { + return nil + } + log.Info("start waiting placement schedule") + ticker := time.NewTicker(time.Second * 10) + defer ticker.Stop() + for { + select { + case <-ticker.C: + ok, progress, err := rc.checkRegions(ctx, tables) + if err != nil { + return err + } + if ok { + log.Info("finish waiting placement schedule") + return nil + } + log.Info("placement schedule progress: " + progress) + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (rc *Client) checkRegions(ctx context.Context, tables []*model.TableInfo) (bool, string, error) { + for i, t := range tables { + start := codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID)) + end := codec.EncodeBytes([]byte{}, tablecodec.EncodeTablePrefix(t.ID+1)) + ok, regionProgress, err := rc.checkRange(ctx, start, end) + if err != nil { + return false, "", err + } + if !ok { + return false, fmt.Sprintf("table %v/%v, %s", i, len(tables), regionProgress), nil + } + } + return true, "", nil +} + +func (rc *Client) checkRange(ctx context.Context, start, end []byte) (bool, string, error) { + regions, err := rc.toolClient.ScanRegions(ctx, start, end, -1) + if err != nil { + return false, "", err + } + for i, r := range regions { + NEXT_PEER: + for _, p := range r.Region.GetPeers() { + for _, storeID := range rc.restoreStores { + if p.GetStoreId() == storeID { + continue NEXT_PEER + } + } + return false, fmt.Sprintf("region %v/%v", i, len(regions)), nil + } + } + return true, "", nil +} + +// ResetPlacementRules removes placement rules for tables. +func (rc *Client) ResetPlacementRules(ctx context.Context, tables []*model.TableInfo) error { + if !rc.isOnline || len(rc.restoreStores) == 0 { + return nil + } + log.Info("start reseting placement rules") + var failedTables []int64 + for _, t := range tables { + err := rc.toolClient.DeletePlacementRule(ctx, "pd", rc.getRuleID(t.ID)) + if err != nil { + log.Info("failed to delete placement rule for table", zap.Int64("table-id", t.ID)) + failedTables = append(failedTables, t.ID) + } + } + if len(failedTables) > 0 { + return errors.Errorf("failed to delete placement rules for tables %v", failedTables) + } + return nil +} + +func (rc *Client) getRuleID(tableID int64) string { + return "restore-t" + strconv.FormatInt(tableID, 10) +} + // IsIncremental returns whether this backup is incremental func (rc *Client) IsIncremental() bool { return !(rc.backupMeta.StartVersion == rc.backupMeta.EndVersion || diff --git a/pkg/task/restore.go b/pkg/task/restore.go index c759fe8d5..a02e49cf1 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -8,6 +8,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/log" + "github.com/pingcap/parser/model" "github.com/pingcap/tidb-tools/pkg/filter" "github.com/spf13/pflag" "go.uber.org/zap" @@ -91,6 +92,10 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf if cfg.Online { client.EnableOnline() } + err = client.LoadRestoreStores(ctx) + if err != nil { + return err + } defer summary.Summary(cmdName) @@ -137,6 +142,10 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf } summary.CollectInt("restore ranges", len(ranges)) + if err = splitPrepareWork(ctx, client, newTables); err != nil { + return err + } + ranges = restore.AttachFilesToRanges(files, ranges) // Redirect to log if there is no log file to avoid unreadable output. @@ -203,6 +212,10 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf return err } + if err = splitPostWork(ctx, client, newTables); err != nil { + return err + } + // Restore has finished. close(updateCh) @@ -305,3 +318,31 @@ func addPDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, removedSchedulers } return nil } + +func splitPrepareWork(ctx context.Context, client *restore.Client, tables []*model.TableInfo) error { + err := client.SetupPlacementRules(ctx, tables) + if err != nil { + log.Error("setup placement rules failed", zap.Error(err)) + return errors.Trace(err) + } + + err = client.WaitPlacementSchedule(ctx, tables) + if err != nil { + log.Error("wait placement schedule failed", zap.Error(err)) + return errors.Trace(err) + } + return nil +} + +func splitPostWork(ctx context.Context, client *restore.Client, tables []*model.TableInfo) error { + err := client.ResetPlacementRules(ctx, tables) + if err != nil { + return errors.Trace(err) + } + + err = client.ResetRestoreLabels(ctx) + if err != nil { + return errors.Trace(err) + } + return nil +} diff --git a/tests/br_db_online/run.sh b/tests/br_db_online/run.sh new file mode 100755 index 000000000..95c3121d4 --- /dev/null +++ b/tests/br_db_online/run.sh @@ -0,0 +1,54 @@ +#!/bin/sh +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" + +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" + +run_sql "CREATE TABLE $DB.usertable2 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable2 VALUES (\"c\", \"d\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 + +run_sql "DROP DATABASE $DB;" + +# restore db +echo "restore start..." +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --online + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "2" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_db_online_newkv/run.sh b/tests/br_db_online_newkv/run.sh new file mode 100755 index 000000000..d8c3f15ff --- /dev/null +++ b/tests/br_db_online_newkv/run.sh @@ -0,0 +1,77 @@ +#!/bin/sh +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" + +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" + +run_sql "CREATE TABLE $DB.usertable2 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable2 VALUES (\"c\", \"d\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 + +run_sql "DROP DATABASE $DB;" + +# enable placement rules +echo "config set enable-placement-rules true" | pd-ctl + +# add new tikv for restore +# actaul tikv_addr are TIKV_ADDR${i} +TIKV_ADDR="127.0.0.1:2017" +TIKV_STATUS_ADDR="127.0.0.1:2019" +TIKV_COUNT=3 + +echo "Starting restore TiKV..." +for i in $(seq $TIKV_COUNT); do + tikv-server \ + --pd "$PD_ADDR" \ + -A "$TIKV_ADDR$i" \ + --status-addr "$TIKV_STATUS_ADDR$i" \ + --log-file "$TEST_DIR/restore-tikv${i}.log" \ + -C "tests/config/restore-tikv.toml" \ + -s "$TEST_DIR/restore-tikv${i}" & +done +sleep 5 + +# restore db +echo "restore start..." +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --online + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "2" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +echo "config set enable-placement-rules false" | pd-ctl + +run_sql "DROP DATABASE $DB;" diff --git a/tests/config/restore-tikv.toml b/tests/config/restore-tikv.toml new file mode 100644 index 000000000..010711cd4 --- /dev/null +++ b/tests/config/restore-tikv.toml @@ -0,0 +1,17 @@ +# config of tikv + +[server] +labels = { exclusive = "restore" } + +[coprocessor] +region-max-keys = 20 +region-split-keys = 12 + +[rocksdb] +max-open-files = 4096 +[raftdb] +max-open-files = 4096 +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = false +capacity = "10GB" \ No newline at end of file From 512855ddcdbc9ff0297263e42d01a06ebf212be5 Mon Sep 17 00:00:00 2001 From: WangXiangUSTC Date: Sun, 15 Mar 2020 20:18:22 +0800 Subject: [PATCH 25/46] metrics: add grafana scripts (#140) * add grafana scripts * fix Co-authored-by: 3pointer Co-authored-by: glorv Co-authored-by: kennytm --- metrics/grafana/br.json | 1690 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 1690 insertions(+) create mode 100644 metrics/grafana/br.json diff --git a/metrics/grafana/br.json b/metrics/grafana/br.json new file mode 100644 index 000000000..d211b4914 --- /dev/null +++ b/metrics/grafana/br.json @@ -0,0 +1,1690 @@ +{ + "__inputs": [ + { + "name": "DS_TEST-CLUSTER", + "label": "test-cluster", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.1.6" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "panel", + "id": "heatmap", + "name": "Heatmap", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "${DS_TEST-CLUSTER}", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 16, + "iteration": 1577953179687, + "links": [], + "panels": [ + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 15, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_thread_cpu_seconds_total{instance=~\"$instance\", name=~\"backup_worker.*\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "backup-worker", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "A", + "step": 4 + }, + { + "expr": "sum(rate(tikv_thread_cpu_seconds_total{instance=~\"$instance\", name=~\"backup_endpoint\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "backup-endpoint", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Backup CPU Utilization", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 13, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_disk_io_time_seconds_total[1m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} - {{device}}", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "IO Utilization", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 2, + "description": "", + "fill": 1, + "gridPos": { + "h": 7, + "w": 7, + "x": 0, + "y": 8 + }, + "id": 10, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(tikv_backup_error_counter[1m])", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{error}} {{instance}}", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Backup Errors", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 9, + "x": 7, + "y": 8 + }, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_backup_range_size_bytes_sum{instance=~\"$instance\"}[1m]))", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "backup-flow", + "metric": "", + "refId": "A", + "step": 4 + }, + { + "expr": "rate(tikv_backup_range_size_bytes_sum[1m])", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "metric": "", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "BackupSST Generation Throughput", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 8 + }, + "id": 6, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_backup_range_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}} - 99%", + "metric": "", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_backup_range_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}} - 95%", + "refId": "B", + "step": 4 + }, + { + "expr": "sum(rate(tikv_backup_range_duration_seconds_sum{instance=~\"$instance\"}[1m])) by (type) / sum(rate(tikv_backup_range_duration_seconds_count{instance=~\"$instance\"}[1m])) by (type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{type}} - avg", + "refId": "C", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "One Backup Range Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 15 + }, + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": true, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_backup_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": " 99%", + "metric": "", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_backup_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "95%", + "refId": "B", + "step": 4 + }, + { + "expr": "sum(rate(tikv_backup_request_duration_seconds_sum{instance=~\"$instance\"}[1m])) / sum(rate(tikv_backup_request_duration_seconds_count{instance=~\"$instance\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "avg", + "refId": "C", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "One Backup Subtask Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": "", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 15 + }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(1, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-100%", + "refId": "E" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-99%", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Checksum Request Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 1, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Backup", + "type": "row" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 17, + "panels": [], + "title": "Restore", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 2 + }, + "id": 21, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "max", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_thread_cpu_seconds_total{instance=~\"$instance\"}[1m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 2 + }, + "id": 19, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_disk_io_time_seconds_total[1m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}} - {{device}}", + "metric": "tikv_thread_cpu_seconds_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "IO Utilization", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": " \tThe number of leaders on each TiKV instance", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 10 + }, + "id": 25, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "lines": false + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(tikv_raftstore_region_count{instance=~\"$instance\", type=\"leader\"}) by (instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + }, + { + "expr": "delta(tikv_raftstore_region_count{instance=~\"$instance\", type=\"leader\"}[30s]) < -10", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Leader", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": "The number of Regions on each TiKV instance", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 10 + }, + "id": 29, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(tikv_raftstore_region_count{instance=~\"$instance\", type=\"region\"}) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Region", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 33, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-99%", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-95%", + "refId": "B" + }, + { + "expr": "sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (type) / sum(rate(tikv_import_download_duration_bucket{instance=~\"$instance\"}[1m])) by (type)", + "format": "time_series", + "hide": true, + "intervalFactor": 1, + "legendFormat": "{{type}}-avg", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_import_ingest_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-99%", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(tikv_import_ingest_duration_bucket{instance=~\"$instance\"}[1m])) by (le, type))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{type}}-95%", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Process SST Duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 31, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(tikv_import_download_bytes_sum{instance=~\"$instance\"}[1m]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "download-flow", + "refId": "A" + }, + { + "expr": "rate(tikv_import_download_bytes_sum[1m])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DownLoad SST Throughput", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "fill": 1, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 26 + }, + "id": 27, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(tikv_import_error_counter[1m])", + "format": "time_series", + "hide": true, + "intervalFactor": 2, + "legendFormat": "{{error}}-{{instance}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Restore Errors", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_TEST-CLUSTER}", + "decimals": 1, + "description": "", + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 26 + }, + "id": 23, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "max": true, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(1, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-100%", + "refId": "E" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(tikv_coprocessor_request_duration_seconds_bucket{instance=~\"$instance\"}[1m])) by (le,req))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{req}}-99%", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Checksum Request Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 1, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 18, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_TEST-CLUSTER}", + "definition": "label_values(tikv_engine_size_bytes, instance)", + "hide": 0, + "includeAll": true, + "label": "Instance", + "multi": false, + "name": "instance", + "options": [], + "query": "label_values(tikv_engine_size_bytes, instance)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Test-Cluster-Backup & Restore", + "uid": "AzvioWLWz", + "version": 25 +} From 6b88e51ae6429c75f8515eb45d0fd448913e9440 Mon Sep 17 00:00:00 2001 From: kennytm Date: Mon, 16 Mar 2020 14:13:20 +0800 Subject: [PATCH 26/46] filter out all TiFlash nodes when retrieving lists of stores from PD (#187) * conn: ignore nodes with label engine=tiflash * conn: disallow TiFlash on restore, only skip TiFlash on backup --- pkg/backup/client.go | 3 +- pkg/conn/conn.go | 51 +++++++++++++++++++++++- pkg/conn/conn_test.go | 88 ++++++++++++++++++++++++++++++++++++++++++ pkg/restore/client.go | 5 ++- pkg/task/backup.go | 3 +- pkg/task/backup_raw.go | 3 +- pkg/task/common.go | 10 ++++- pkg/task/restore.go | 2 +- 8 files changed, 155 insertions(+), 10 deletions(-) diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 15be96bf4..13236388b 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/tidb/util/ranger" "go.uber.org/zap" + "github.com/pingcap/br/pkg/conn" "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" @@ -395,7 +396,7 @@ func (bc *Client) BackupRange( defer cancel() var allStores []*metapb.Store - allStores, err = bc.mgr.GetPDClient().GetAllStores(ctx, pd.WithExcludeTombstone()) + allStores, err = conn.GetAllTiKVStores(ctx, bc.mgr.GetPDClient(), conn.SkipTiFlash) if err != nil { return errors.Trace(err) } diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 66c68dc49..cdfc78168 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -92,6 +92,51 @@ func pdRequest( return r, nil } +// UnexpectedStoreBehavior is the action to do in GetAllTiKVStores when a +// non-TiKV store (e.g. TiFlash store) is found. +type UnexpectedStoreBehavior uint8 + +const ( + // ErrorOnTiFlash causes GetAllTiKVStores to return error when the store is + // found to be a TiFlash node. + ErrorOnTiFlash UnexpectedStoreBehavior = 0 + // SkipTiFlash causes GetAllTiKVStores to skip the store when it is found to + // be a TiFlash node. + SkipTiFlash UnexpectedStoreBehavior = 1 +) + +// GetAllTiKVStores returns all TiKV stores registered to the PD client. The +// stores must not be a tombstone and must never contain a label `engine=tiflash`. +func GetAllTiKVStores( + ctx context.Context, + pdClient pd.Client, + unexpectedStoreBehavior UnexpectedStoreBehavior, +) ([]*metapb.Store, error) { + // get all live stores. + stores, err := pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) + if err != nil { + return nil, err + } + + // filter out all stores which are TiFlash. + j := 0 +skipStore: + for _, store := range stores { + for _, label := range store.Labels { + if label.Key == "engine" && label.Value == "tiflash" { + if unexpectedStoreBehavior == SkipTiFlash { + continue skipStore + } + return nil, errors.Errorf( + "cannot restore to a cluster with active TiFlash stores (store %d at %s)", store.Id, store.Address) + } + } + stores[j] = store + j++ + } + return stores[:j], nil +} + // NewMgr creates a new Mgr. func NewMgr( ctx context.Context, @@ -99,7 +144,9 @@ func NewMgr( pdAddrs string, storage tikv.Storage, tlsConf *tls.Config, - securityOption pd.SecurityOption) (*Mgr, error) { + securityOption pd.SecurityOption, + unexpectedStoreBehavior UnexpectedStoreBehavior, +) (*Mgr, error) { addrs := strings.Split(pdAddrs, ",") failure := errors.Errorf("pd address (%s) has wrong format", pdAddrs) @@ -143,7 +190,7 @@ func NewMgr( log.Info("new mgr", zap.String("pdAddrs", pdAddrs)) // Check live tikv. - stores, err := pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) + stores, err := GetAllTiKVStores(ctx, pdClient, unexpectedStoreBehavior) if err != nil { log.Error("fail to get store", zap.Error(err)) return nil, err diff --git a/pkg/conn/conn_test.go b/pkg/conn/conn_test.go index 9cbb963e7..572798e23 100644 --- a/pkg/conn/conn_test.go +++ b/pkg/conn/conn_test.go @@ -15,6 +15,7 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/pd/v4/server/core" "github.com/pingcap/pd/v4/server/statistics" "github.com/pingcap/tidb/util/codec" @@ -149,3 +150,90 @@ func (s *testClientSuite) TestRegionCount(c *C) { c.Assert(err, IsNil) c.Assert(resp, Equals, 2) } + +type fakePDClient struct { + pd.Client + stores []*metapb.Store +} + +func (fpdc fakePDClient) GetAllStores(context.Context, ...pd.GetStoreOption) ([]*metapb.Store, error) { + return append([]*metapb.Store{}, fpdc.stores...), nil +} + +func (s *testClientSuite) TestGetAllTiKVStores(c *C) { + testCases := []struct { + stores []*metapb.Store + unexpectedStoreBehavior UnexpectedStoreBehavior + expectedStores map[uint64]int + expectedError string + }{ + { + stores: []*metapb.Store{ + {Id: 1}, + }, + unexpectedStoreBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + }, + unexpectedStoreBehavior: ErrorOnTiFlash, + expectedStores: map[uint64]int{1: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + }, + unexpectedStoreBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + }, + unexpectedStoreBehavior: ErrorOnTiFlash, + expectedError: "cannot restore to a cluster with active TiFlash stores.*", + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + {Id: 3}, + {Id: 4, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tikv"}}}, + {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, + {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, + }, + unexpectedStoreBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1, 3: 1, 4: 1, 6: 1}, + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + {Id: 3}, + {Id: 4, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tikv"}}}, + {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, + {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, + }, + unexpectedStoreBehavior: ErrorOnTiFlash, + expectedError: "cannot restore to a cluster with active TiFlash stores.*", + }, + } + + for _, testCase := range testCases { + pdClient := fakePDClient{stores: testCase.stores} + stores, err := GetAllTiKVStores(context.Background(), pdClient, testCase.unexpectedStoreBehavior) + if len(testCase.expectedError) != 0 { + c.Assert(err, ErrorMatches, testCase.expectedError) + continue + } + foundStores := make(map[uint64]int) + for _, store := range stores { + foundStores[store.Id]++ + } + c.Assert(foundStores, DeepEquals, testCase.expectedStores) + } +} diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 93cc97567..914b94607 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/keepalive" "github.com/pingcap/br/pkg/checksum" + "github.com/pingcap/br/pkg/conn" "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" @@ -261,7 +262,7 @@ func (rc *Client) ExecDDLs(ddlJobs []*model.Job) error { func (rc *Client) setSpeedLimit() error { if !rc.hasSpeedLimited && rc.rateLimit != 0 { - stores, err := rc.pdClient.GetAllStores(rc.ctx, pd.WithExcludeTombstone()) + stores, err := conn.GetAllTiKVStores(rc.ctx, rc.pdClient, conn.ErrorOnTiFlash) if err != nil { return err } @@ -345,7 +346,7 @@ func (rc *Client) SwitchToNormalMode(ctx context.Context) error { } func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMode) error { - stores, err := rc.pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) + stores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.ErrorOnTiFlash) if err != nil { return errors.Trace(err) } diff --git a/pkg/task/backup.go b/pkg/task/backup.go index ab22c9039..8d9613047 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -20,6 +20,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/conn" "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" @@ -101,7 +102,7 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig if err != nil { return err } - mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS) + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) if err != nil { return err } diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go index aa980b1a0..a51e80e95 100644 --- a/pkg/task/backup_raw.go +++ b/pkg/task/backup_raw.go @@ -12,6 +12,7 @@ import ( "github.com/spf13/pflag" "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/conn" "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/storage" @@ -89,7 +90,7 @@ func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *BackupRaw if err != nil { return err } - mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS) + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) if err != nil { return err } diff --git a/pkg/task/common.go b/pkg/task/common.go index 80f5eb258..859c4206d 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -208,7 +208,13 @@ func (cfg *Config) ParseFromFlags(flags *pflag.FlagSet) error { } // newMgr creates a new mgr at the given PD address. -func newMgr(ctx context.Context, g glue.Glue, pds []string, tlsConfig TLSConfig) (*conn.Mgr, error) { +func newMgr( + ctx context.Context, + g glue.Glue, + pds []string, + tlsConfig TLSConfig, + unexpectedStoreBehavior conn.UnexpectedStoreBehavior, +) (*conn.Mgr, error) { var ( tlsConf *tls.Config err error @@ -234,7 +240,7 @@ func newMgr(ctx context.Context, g glue.Glue, pds []string, tlsConfig TLSConfig) if err != nil { return nil, err } - return conn.NewMgr(ctx, g, pdAddress, store.(tikv.Storage), tlsConf, securityOption) + return conn.NewMgr(ctx, g, pdAddress, store.(tikv.Storage), tlsConf, securityOption, unexpectedStoreBehavior) } // GetStorage gets the storage backend from the config. diff --git a/pkg/task/restore.go b/pkg/task/restore.go index a02e49cf1..c443b4c0f 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -75,7 +75,7 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf ctx, cancel := context.WithCancel(c) defer cancel() - mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS) + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.ErrorOnTiFlash) if err != nil { return err } From 156f003da8e24089f7ecc68483301ba095fb7774 Mon Sep 17 00:00:00 2001 From: kennytm Date: Mon, 16 Mar 2020 14:28:24 +0800 Subject: [PATCH 27/46] Create integration test for S3 storage (#174) --- Makefile | 1 + tests/README.md | 3 +- tests/_utils/run_services | 6 +-- tests/br_s3/run.sh | 93 +++++++++++++++++++++++++++++++++++++++ tests/br_s3/workload | 12 +++++ tests/download_tools.sh | 57 ++++++++++++++++++++++++ tests/run.sh | 6 +-- 7 files changed, 171 insertions(+), 7 deletions(-) create mode 100755 tests/br_s3/run.sh create mode 100644 tests/br_s3/workload create mode 100755 tests/download_tools.sh diff --git a/Makefile b/Makefile index 01458c70c..779bfdb10 100644 --- a/Makefile +++ b/Makefile @@ -49,6 +49,7 @@ integration_test: build build_for_integration_test @which bin/pd-server @which bin/pd-ctl @which bin/go-ycsb + @which bin/minio @which bin/br tests/run.sh diff --git a/tests/README.md b/tests/README.md index 9f307a8a6..814241b4a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -18,6 +18,7 @@ programs. * `mysql` (the CLI client) * `curl` + * `s3cmd` 3. The user executing the tests must have permission to create the folder `/tmp/backup_restore_test`. All test artifacts will be written into this folder. @@ -45,4 +46,4 @@ The script should exit with a nonzero error code on failure. Several convenient commands are provided: -* `run_sql ` — Executes an SQL query on the TiDB database \ No newline at end of file +* `run_sql ` — Executes an SQL query on the TiDB database diff --git a/tests/_utils/run_services b/tests/_utils/run_services index 769b9b22a..07fe1a2ad 100644 --- a/tests/_utils/run_services +++ b/tests/_utils/run_services @@ -90,7 +90,7 @@ start_services() { i=0 while ! curl -o /dev/null -sf "http://$TIDB_IP:10080/status"; do i=$((i+1)) - if [ "$i" -gt 20 ]; then + if [ "$i" -gt 50 ]; then echo 'Failed to start TiDB' exit 1 fi @@ -178,7 +178,7 @@ start_services_withTLS() { --key $1/certificates/client-key.pem \ -o /dev/null -sf "https://$TIDB_IP:10080/status"; do i=$((i+1)) - if [ "$i" -gt 20 ]; then + if [ "$i" -gt 50 ]; then echo 'Failed to start TiDB' exit 1 fi @@ -197,4 +197,4 @@ start_services_withTLS() { fi sleep 3 done -} \ No newline at end of file +} diff --git a/tests/br_s3/run.sh b/tests/br_s3/run.sh new file mode 100755 index 000000000..422a1270d --- /dev/null +++ b/tests/br_s3/run.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux +DB="$TEST_NAME" +TABLE="usertable" +DB_COUNT=3 + +# start the s3 server +export MINIO_ACCESS_KEY=brs3accesskey +export MINIO_SECRET_KEY=brs3secretkey +export MINIO_BROWSER=off +export AWS_ACCESS_KEY_ID=$MINIO_ACCESS_KEY +export AWS_SECRET_ACCESS_KEY=$MINIO_SECRET_KEY +export S3_ENDPOINT=127.0.0.1:24927 +rm -rf "$TEST_DIR/$DB" +mkdir -p "$TEST_DIR/$DB" +bin/minio server --address $S3_ENDPOINT "$TEST_DIR/$DB" & +MINIO_PID=$! +i=0 +while ! curl -o /dev/null -v -s "http://$S3_ENDPOINT/"; do + i=$(($i+1)) + if [ $i -gt 7 ]; then + echo 'Failed to start minio' + exit 1 + fi + sleep 2 +done + +stop_minio() { + kill -2 $MINIO_PID +} +trap stop_minio EXIT + +s3cmd --access_key=$MINIO_ACCESS_KEY --secret_key=$MINIO_SECRET_KEY --host=$S3_ENDPOINT --host-bucket=$S3_ENDPOINT --no-ssl mb s3://mybucket + +# Fill in the database +for i in $(seq $DB_COUNT); do + run_sql "CREATE DATABASE $DB${i};" + go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB${i} +done + +for i in $(seq $DB_COUNT); do + row_count_ori[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}') +done + +# backup full +echo "backup start..." +run_br --pd $PD_ADDR backup full -s "s3://mybucket/$DB" --s3.endpoint="http://$S3_ENDPOINT" + +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE $DB${i};" +done + +# restore full +echo "restore start..." +run_br restore full -s "s3://mybucket/$DB" --pd $PD_ADDR --s3.endpoint="http://$S3_ENDPOINT" + +for i in $(seq $DB_COUNT); do + row_count_new[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}') +done + +fail=false +for i in $(seq $DB_COUNT); do + if [ "${row_count_ori[i]}" != "${row_count_new[i]}" ];then + fail=true + echo "TEST: [$TEST_NAME] fail on database $DB${i}" + fi + echo "database $DB${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}" +done + +if $fail; then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +else + echo "TEST: [$TEST_NAME] successed!" +fi + +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE $DB${i};" +done diff --git a/tests/br_s3/workload b/tests/br_s3/workload new file mode 100644 index 000000000..19336335e --- /dev/null +++ b/tests/br_s3/workload @@ -0,0 +1,12 @@ +recordcount=5000 +operationcount=0 +workload=core + +readallfields=true + +readproportion=0 +updateproportion=0 +scanproportion=0 +insertproportion=0 + +requestdistribution=uniform diff --git a/tests/download_tools.sh b/tests/download_tools.sh new file mode 100755 index 000000000..e0689dd61 --- /dev/null +++ b/tests/download_tools.sh @@ -0,0 +1,57 @@ +#!/bin/sh +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +# Download tools for running the integration test + +set -eu + +BIN="$(dirname "$0")/../bin" + +if [ "$(uname -s)" != Linux ]; then + echo 'Can only automatically download binaries on Linux.' + exit 1 +fi + +MISSING_TIDB_COMPONENTS= +for COMPONENT in tidb-server pd-server tikv-server pd-ctl; do + if [ ! -e "$BIN/$COMPONENT" ]; then + MISSING_TIDB_COMPONENTS="$MISSING_TIDB_COMPONENTS tidb-latest-linux-amd64/bin/$COMPONENT" + fi +done + +if [ -n "$MISSING_TIDB_COMPONENTS" ]; then + echo "Downloading latest TiDB bundle..." + # TODO: the url is going to change from 'latest' to 'nightly' someday. + curl -L -f -o "$BIN/tidb.tar.gz" "https://download.pingcap.org/tidb-latest-linux-amd64.tar.gz" + tar -x -f "$BIN/tidb.tar.gz" -C "$BIN/" $MISSING_TIDB_COMPONENTS + rm "$BIN/tidb.tar.gz" + mv "$BIN"/tidb-latest-linux-amd64/bin/* "$BIN/" + rmdir "$BIN/tidb-latest-linux-amd64/bin" + rmdir "$BIN/tidb-latest-linux-amd64" +fi + +if [ ! -e "$BIN/go-ycsb" ]; then + # TODO: replace this once there's a public downloadable release. + echo 'go-ycsb is missing. Please build manually following https://github.com/pingcap/go-ycsb#getting-started' + exit 1 +fi + +if [ ! -e "$BIN/minio" ]; then + echo "Downloading minio..." + curl -L -f -o "$BIN/minio" "https://dl.min.io/server/minio/release/linux-amd64/minio" + chmod a+x "$BIN/minio" +fi + +echo "All binaries are now available." diff --git a/tests/run.sh b/tests/run.sh index 053013352..21d6b27ed 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # # Copyright 2019 PingCAP, Inc. # @@ -28,7 +28,7 @@ if [ "${1-}" = '--debug' ]; then read line fi -for script in tests/*/run.sh; do +for script in tests/${TEST_NAME-*}/run.sh; do echo "*===== Running test $script... =====*" TEST_DIR="$TEST_DIR" \ PD_ADDR="$PD_ADDR" \ @@ -39,5 +39,5 @@ for script in tests/*/run.sh; do TIKV_ADDR="$TIKV_ADDR" \ PATH="tests/_utils:bin:$PATH" \ TEST_NAME="$(basename "$(dirname "$script")")" \ - sh "$script" + bash "$script" done From bbedfc8a1b23534307f0ee60fa0412bce6ba8298 Mon Sep 17 00:00:00 2001 From: 3pointer Date: Mon, 16 Mar 2020 23:44:11 +0800 Subject: [PATCH 28/46] Fix summary log (#191) * *: fix restore summary log after restore logic changed to files * fix * fix * fix Co-authored-by: kennytm --- pkg/backup/client.go | 6 ++-- pkg/restore/client.go | 7 ++--- pkg/restore/import.go | 4 +-- pkg/summary/collector.go | 62 ++++++++++++++++++---------------------- pkg/summary/summary.go | 4 +-- pkg/task/restore.go | 1 - 6 files changed, 38 insertions(+), 46 deletions(-) diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 13236388b..9693b6b5f 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -384,7 +384,7 @@ func (bc *Client) BackupRange( if err != nil { summary.CollectFailureUnit(key, err) } else { - summary.CollectSuccessUnit(key, elapsed) + summary.CollectSuccessUnit(key, 1, elapsed) } }() log.Info("backup started", @@ -771,8 +771,8 @@ func (bc *Client) FastChecksum() (bool, error) { totalBytes += file.TotalBytes } - summary.CollectSuccessUnit(summary.TotalKV, totalKvs) - summary.CollectSuccessUnit(summary.TotalBytes, totalBytes) + summary.CollectSuccessUnit(summary.TotalKV, 1, totalKvs) + summary.CollectSuccessUnit(summary.TotalBytes, 1, totalBytes) if schema.Crc64Xor == checksum && schema.TotalKvs == totalKvs && schema.TotalBytes == totalBytes { log.Info("fast checksum success", zap.Stringer("db", dbInfo.Name), zap.Stringer("table", tblInfo.Name)) diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 914b94607..ba409ec32 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -289,9 +289,7 @@ func (rc *Client) RestoreFiles( if err == nil { log.Info("Restore Files", zap.Int("files", len(files)), zap.Duration("take", elapsed)) - summary.CollectSuccessUnit("files", elapsed) - } else { - summary.CollectFailureUnit("files", err) + summary.CollectSuccessUnit("files", len(files), elapsed) } }() @@ -320,9 +318,10 @@ func (rc *Client) RestoreFiles( } }) } - for range files { + for i := range files { err := <-errCh if err != nil { + summary.CollectFailureUnit(fmt.Sprintf("file:%d", i), err) rc.cancel() wg.Wait() log.Error( diff --git a/pkg/restore/import.go b/pkg/restore/import.go index f98e0fc13..9b96509ea 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -264,8 +264,8 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul zap.Error(errIngest)) return errIngest } - summary.CollectSuccessUnit(summary.TotalKV, file.TotalKvs) - summary.CollectSuccessUnit(summary.TotalBytes, file.TotalBytes) + summary.CollectSuccessUnit(summary.TotalKV, 1, file.TotalKvs) + summary.CollectSuccessUnit(summary.TotalBytes, 1, file.TotalBytes) } return nil }, newImportSSTBackoffer()) diff --git a/pkg/summary/collector.go b/pkg/summary/collector.go index 42488cb82..ee465d60b 100644 --- a/pkg/summary/collector.go +++ b/pkg/summary/collector.go @@ -27,7 +27,7 @@ const ( type LogCollector interface { SetUnit(unit string) - CollectSuccessUnit(name string, arg interface{}) + CollectSuccessUnit(name string, unitCount int, arg interface{}) CollectFailureUnit(name string, reason error) @@ -43,27 +43,29 @@ type logFunc func(msg string, fields ...zap.Field) var collector = newLogCollector(log.Info) type logCollector struct { - mu sync.Mutex - unit string - unitCount int - successCosts map[string]time.Duration - successData map[string]uint64 - failureReasons map[string]error - durations map[string]time.Duration - ints map[string]int + mu sync.Mutex + unit string + successUnitCount int + failureUnitCount int + successCosts map[string]time.Duration + successData map[string]uint64 + failureReasons map[string]error + durations map[string]time.Duration + ints map[string]int log logFunc } func newLogCollector(log logFunc) LogCollector { return &logCollector{ - unitCount: 0, - successCosts: make(map[string]time.Duration), - successData: make(map[string]uint64), - failureReasons: make(map[string]error), - durations: make(map[string]time.Duration), - ints: make(map[string]int), - log: log, + successUnitCount: 0, + failureUnitCount: 0, + successCosts: make(map[string]time.Duration), + successData: make(map[string]uint64), + failureReasons: make(map[string]error), + durations: make(map[string]time.Duration), + ints: make(map[string]int), + log: log, } } @@ -73,7 +75,7 @@ func (tc *logCollector) SetUnit(unit string) { tc.unit = unit } -func (tc *logCollector) CollectSuccessUnit(name string, arg interface{}) { +func (tc *logCollector) CollectSuccessUnit(name string, unitCount int, arg interface{}) { tc.mu.Lock() defer tc.mu.Unlock() @@ -81,7 +83,7 @@ func (tc *logCollector) CollectSuccessUnit(name string, arg interface{}) { case time.Duration: if _, ok := tc.successCosts[name]; !ok { tc.successCosts[name] = v - tc.unitCount++ + tc.successUnitCount += unitCount } else { tc.successCosts[name] += v } @@ -99,7 +101,7 @@ func (tc *logCollector) CollectFailureUnit(name string, reason error) { defer tc.mu.Unlock() if _, ok := tc.failureReasons[name]; !ok { tc.failureReasons[name] = reason - tc.unitCount++ + tc.failureUnitCount++ } } @@ -129,16 +131,10 @@ func (tc *logCollector) Summary(name string) { switch tc.unit { case BackupUnit: msg = fmt.Sprintf("total backup ranges: %d, total success: %d, total failed: %d", - tc.unitCount, len(tc.successCosts), len(tc.failureReasons)) - if len(tc.failureReasons) != 0 { - msg += ", failed ranges" - } + tc.failureUnitCount+tc.successUnitCount, tc.successUnitCount, tc.failureUnitCount) case RestoreUnit: - msg = fmt.Sprintf("total restore tables: %d, total success: %d, total failed: %d", - tc.unitCount, len(tc.successCosts), len(tc.failureReasons)) - if len(tc.failureReasons) != 0 { - msg += ", failed tables" - } + msg = fmt.Sprintf("total restore files: %d, total success: %d, total failed: %d", + tc.failureUnitCount+tc.successUnitCount, tc.successUnitCount, tc.failureUnitCount) } logFields := make([]zap.Field, 0, len(tc.durations)+len(tc.ints)) @@ -150,12 +146,10 @@ func (tc *logCollector) Summary(name string) { } if len(tc.failureReasons) != 0 { - names := make([]string, 0, len(tc.failureReasons)) - for name := range tc.failureReasons { - names = append(names, name) + for unitName, reason := range tc.failureReasons { + logFields = append(logFields, zap.String("unitName", unitName), zap.Error(reason)) } - logFields = append(logFields, zap.Strings(msg, names)) - log.Info(name+" summary", logFields...) + log.Info(name+" Failed summary : "+msg, logFields...) return } totalCost := time.Duration(0) @@ -178,7 +172,7 @@ func (tc *logCollector) Summary(name string) { msg += fmt.Sprintf(", %s: %d", name, data) } - tc.log(name+" summary: "+msg, logFields...) + tc.log(name+" Success summary: "+msg, logFields...) } // SetLogCollector allow pass LogCollector outside diff --git a/pkg/summary/summary.go b/pkg/summary/summary.go index 08a16c00a..3ffdedf8a 100644 --- a/pkg/summary/summary.go +++ b/pkg/summary/summary.go @@ -10,8 +10,8 @@ func SetUnit(unit string) { } // CollectSuccessUnit collects success time costs -func CollectSuccessUnit(name string, arg interface{}) { - collector.CollectSuccessUnit(name, arg) +func CollectSuccessUnit(name string, unitCount int, arg interface{}) { + collector.CollectSuccessUnit(name, unitCount, arg) } // CollectFailureUnit collects fail reason diff --git a/pkg/task/restore.go b/pkg/task/restore.go index c443b4c0f..4dac5f869 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -114,7 +114,6 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf if len(files) == 0 { return errors.New("all files are filtered out from the backup archive, nothing to restore") } - summary.CollectInt("restore files", len(files)) var newTS uint64 if client.IsIncremental() { From c3d26d91a294ef2526198cac7460026c0e7693b8 Mon Sep 17 00:00:00 2001 From: MyonKeminta <9948422+MyonKeminta@users.noreply.github.com> Date: Tue, 17 Mar 2020 10:35:50 +0800 Subject: [PATCH 29/46] Implement Raw Restore (#104) * Update kvproto * Implement raw restore * fix build * Set range for file importer Signed-off-by: MyonKeminta * Remove unnecessary comments Signed-off-by: MyonKeminta * check cf and support multi ranges in BackupMeta Signed-off-by: MyonKeminta * Check files' cf; address comments * adjust structure to keep consistent with master * Fix build Signed-off-by: MyonKeminta * Fix build and make check, avoid accessing TiDB in rawkv mode * Fix test Signed-off-by: MyonKeminta * Fix tests Signed-off-by: MyonKeminta * Fix broken logic after merging master * Update pkg/task/restore_raw.go Co-Authored-By: Neil Shen * Address comments * Address comments * Mark raw restore as experimental * Fix build * Address comments * test: Add check for deleting data and partial backup * Fix build * Add license header * fix ci * fix ci Co-authored-by: MyonKeminta Co-authored-by: 3pointer Co-authored-by: Neil Shen Co-authored-by: pingcap-github-bot --- cmd/backup.go | 5 +- cmd/restore.go | 25 +++++++ pkg/gluetidb/glue.go | 18 ++--- pkg/gluetikv/glue.go | 43 ++++++++++++ pkg/restore/client.go | 146 ++++++++++++++++++++++++++++++++++++---- pkg/restore/db.go | 4 ++ pkg/restore/import.go | 82 +++++++++++++++++++++- pkg/task/backup_raw.go | 8 +-- pkg/task/restore.go | 4 ++ pkg/task/restore_raw.go | 131 +++++++++++++++++++++++++++++++++++ pkg/utils/key.go | 18 +++++ pkg/utils/key_test.go | 20 ++++++ tests/br_rawkv/run.sh | 63 ++++++++++++----- 13 files changed, 520 insertions(+), 47 deletions(-) create mode 100644 pkg/gluetikv/glue.go create mode 100644 pkg/task/restore_raw.go diff --git a/cmd/backup.go b/cmd/backup.go index b856dae38..3aed2147f 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -7,6 +7,7 @@ import ( "github.com/pingcap/tidb/session" "github.com/spf13/cobra" + "github.com/pingcap/br/pkg/gluetikv" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" @@ -21,11 +22,11 @@ func runBackupCommand(command *cobra.Command, cmdName string) error { } func runBackupRawCommand(command *cobra.Command, cmdName string) error { - cfg := task.BackupRawConfig{Config: task.Config{LogProgress: HasLogFile()}} + cfg := task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { return err } - return task.RunBackupRaw(GetDefaultContext(), tidbGlue, cmdName, &cfg) + return task.RunBackupRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) } // NewBackupCommand return a full backup subcommand. diff --git a/cmd/restore.go b/cmd/restore.go index 0b2792a25..6353719f2 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -6,6 +6,7 @@ import ( "github.com/pingcap/tidb/session" "github.com/spf13/cobra" + "github.com/pingcap/br/pkg/gluetikv" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" @@ -19,6 +20,16 @@ func runRestoreCommand(command *cobra.Command, cmdName string) error { return task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg) } +func runRestoreRawCommand(command *cobra.Command, cmdName string) error { + cfg := task.RestoreRawConfig{ + RawKvConfig: task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}}, + } + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + return err + } + return task.RunRestoreRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) +} + // NewRestoreCommand returns a restore subcommand func NewRestoreCommand() *cobra.Command { command := &cobra.Command{ @@ -43,6 +54,7 @@ func NewRestoreCommand() *cobra.Command { newFullRestoreCommand(), newDbRestoreCommand(), newTableRestoreCommand(), + newRawRestoreCommand(), ) task.DefineRestoreFlags(command.PersistentFlags()) @@ -83,3 +95,16 @@ func newTableRestoreCommand() *cobra.Command { task.DefineTableFlags(command) return command } + +func newRawRestoreCommand() *cobra.Command { + command := &cobra.Command{ + Use: "raw", + Short: "(experimental) restore a raw kv range to TiKV cluster", + RunE: func(cmd *cobra.Command, _ []string) error { + return runRestoreRawCommand(cmd, "Raw restore") + }, + } + + task.DefineRawRestoreFlags(command) + return command +} diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go index 333053b97..80756d2c2 100644 --- a/pkg/gluetidb/glue.go +++ b/pkg/gluetidb/glue.go @@ -8,19 +8,20 @@ import ( "github.com/pingcap/parser/model" pd "github.com/pingcap/pd/v4/client" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/gluetikv" ) // Glue is an implementation of glue.Glue using a new TiDB session. -type Glue struct{} +type Glue struct { + tikvGlue gluetikv.Glue +} type tidbSession struct { se session.Session @@ -41,15 +42,8 @@ func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { } // Open implements glue.Glue -func (Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { - if option.CAPath != "" { - conf := config.GetGlobalConfig() - conf.Security.ClusterSSLCA = option.CAPath - conf.Security.ClusterSSLCert = option.CertPath - conf.Security.ClusterSSLKey = option.KeyPath - config.StoreGlobalConfig(conf) - } - return tikv.Driver{}.Open(path) +func (g Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + return g.tikvGlue.Open(path, option) } // OwnsStorage implements glue.Glue diff --git a/pkg/gluetikv/glue.go b/pkg/gluetikv/glue.go new file mode 100644 index 000000000..e63b35b95 --- /dev/null +++ b/pkg/gluetikv/glue.go @@ -0,0 +1,43 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package gluetikv + +import ( + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv" + + "github.com/pingcap/br/pkg/glue" +) + +// Glue is an implementation of glue.Glue that accesses only TiKV without TiDB. +type Glue struct{} + +// GetDomain implements glue.Glue +func (Glue) GetDomain(store kv.Storage) (*domain.Domain, error) { + return nil, nil +} + +// CreateSession implements glue.Glue +func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { + return nil, nil +} + +// Open implements glue.Glue +func (Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + if option.CAPath != "" { + conf := config.GetGlobalConfig() + conf.Security.ClusterSSLCA = option.CAPath + conf.Security.ClusterSSLCert = option.CertPath + conf.Security.ClusterSSLKey = option.KeyPath + config.StoreGlobalConfig(conf) + } + return tikv.Driver{}.Open(path) +} + +// OwnsStorage implements glue.Glue +func (Glue) OwnsStorage() bool { + return true +} diff --git a/pkg/restore/client.go b/pkg/restore/client.go index ba409ec32..97467d913 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -3,6 +3,7 @@ package restore import ( + "bytes" "context" "crypto/tls" "encoding/hex" @@ -108,33 +109,97 @@ func (rc *Client) IsOnline() bool { // Close a client func (rc *Client) Close() { - rc.db.Close() + // rc.db can be nil in raw kv mode. + if rc.db != nil { + rc.db.Close() + } rc.cancel() log.Info("Restore client closed") } // InitBackupMeta loads schemas from BackupMeta to initialize RestoreClient func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup.StorageBackend) error { - databases, err := utils.LoadBackupTables(backupMeta) - if err != nil { - return errors.Trace(err) - } - var ddlJobs []*model.Job - err = json.Unmarshal(backupMeta.GetDdls(), &ddlJobs) - if err != nil { - return errors.Trace(err) + if !backupMeta.IsRawKv { + databases, err := utils.LoadBackupTables(backupMeta) + if err != nil { + return errors.Trace(err) + } + rc.databases = databases + + var ddlJobs []*model.Job + err = json.Unmarshal(backupMeta.GetDdls(), &ddlJobs) + if err != nil { + return errors.Trace(err) + } + rc.ddlJobs = ddlJobs } - rc.databases = databases - rc.ddlJobs = ddlJobs rc.backupMeta = backupMeta log.Info("load backupmeta", zap.Int("databases", len(rc.databases)), zap.Int("jobs", len(rc.ddlJobs))) metaClient := NewSplitClient(rc.pdClient, rc.tlsConf) importClient := NewImportClient(metaClient, rc.tlsConf) - rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, rc.rateLimit) + rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, backupMeta.IsRawKv, rc.rateLimit) return nil } +// IsRawKvMode checks whether the backup data is in raw kv format, in which case transactional recover is forbidden. +func (rc *Client) IsRawKvMode() bool { + return rc.backupMeta.IsRawKv +} + +// GetFilesInRawRange gets all files that are in the given range or intersects with the given range. +func (rc *Client) GetFilesInRawRange(startKey []byte, endKey []byte, cf string) ([]*backup.File, error) { + if !rc.IsRawKvMode() { + return nil, errors.New("the backup data is not in raw kv mode") + } + + for _, rawRange := range rc.backupMeta.RawRanges { + // First check whether the given range is backup-ed. If not, we cannot perform the restore. + if rawRange.Cf != cf { + continue + } + + if (len(rawRange.EndKey) > 0 && bytes.Compare(startKey, rawRange.EndKey) >= 0) || + (len(endKey) > 0 && bytes.Compare(rawRange.StartKey, endKey) >= 0) { + // The restoring range is totally out of the current range. Skip it. + continue + } + + if bytes.Compare(startKey, rawRange.StartKey) < 0 || + utils.CompareEndKey(endKey, rawRange.EndKey) > 0 { + // Only partial of the restoring range is in the current backup-ed range. So the given range can't be fully + // restored. + return nil, errors.New("the given range to restore is not fully covered by the range that was backed up") + } + + // We have found the range that contains the given range. Find all necessary files. + files := make([]*backup.File, 0) + + for _, file := range rc.backupMeta.Files { + if file.Cf != cf { + continue + } + + if len(file.EndKey) > 0 && bytes.Compare(file.EndKey, startKey) < 0 { + // The file is before the range to be restored. + continue + } + if len(endKey) > 0 && bytes.Compare(endKey, file.StartKey) <= 0 { + // The file is after the range to be restored. + // The specified endKey is exclusive, so when it equals to a file's startKey, the file is still skipped. + continue + } + + files = append(files, file) + } + + // There should be at most one backed up range that covers the restoring range. + return files, nil + } + + return nil, errors.New("no backup data in the range") +} + // SetConcurrency sets the concurrency of dbs tables files func (rc *Client) SetConcurrency(c uint) { rc.workerPool = utils.NewWorkerPool(c, "file") @@ -334,6 +399,63 @@ func (rc *Client) RestoreFiles( return nil } +// RestoreRaw tries to restore raw keys in the specified range. +func (rc *Client) RestoreRaw(startKey []byte, endKey []byte, files []*backup.File, updateCh chan<- struct{}) error { + start := time.Now() + defer func() { + elapsed := time.Since(start) + log.Info("Restore Raw", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + zap.Duration("take", elapsed)) + }() + errCh := make(chan error, len(files)) + wg := new(sync.WaitGroup) + defer close(errCh) + + err := rc.fileImporter.SetRawRange(startKey, endKey) + if err != nil { + + return errors.Trace(err) + } + + emptyRules := &RewriteRules{} + for _, file := range files { + wg.Add(1) + fileReplica := file + rc.workerPool.Apply( + func() { + defer wg.Done() + select { + case <-rc.ctx.Done(): + errCh <- nil + case errCh <- rc.fileImporter.Import(fileReplica, emptyRules): + updateCh <- struct{}{} + } + }) + } + for range files { + err := <-errCh + if err != nil { + rc.cancel() + wg.Wait() + log.Error( + "restore raw range failed", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + zap.Error(err), + ) + return err + } + } + log.Info( + "finish to restore raw range", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + ) + return nil +} + //SwitchToImportMode switch tikv cluster to import mode func (rc *Client) SwitchToImportMode(ctx context.Context) error { return rc.switchTiKVMode(ctx, import_sstpb.SwitchMode_Import) diff --git a/pkg/restore/db.go b/pkg/restore/db.go index 7251b9f24..d4a4a0a41 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -29,6 +29,10 @@ func NewDB(g glue.Glue, store kv.Storage) (*DB, error) { if err != nil { return nil, errors.Trace(err) } + // The session may be nil in raw kv mode + if se == nil { + return nil, nil + } // Set SQL mode to None for avoiding SQL compatibility problem err = se.Execute(context.Background(), "set @@sql_mode=''") if err != nil { diff --git a/pkg/restore/import.go b/pkg/restore/import.go index 9b96509ea..fec07a870 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -3,6 +3,7 @@ package restore import ( + "bytes" "context" "crypto/tls" "strings" @@ -134,6 +135,10 @@ type FileImporter struct { backend *backup.StorageBackend rateLimit uint64 + isRawKvMode bool + rawStartKey []byte + rawEndKey []byte + ctx context.Context cancel context.CancelFunc } @@ -144,6 +149,7 @@ func NewFileImporter( metaClient SplitClient, importClient ImporterClient, backend *backup.StorageBackend, + isRawKvMode bool, rateLimit uint64, ) FileImporter { ctx, cancel := context.WithCancel(ctx) @@ -153,16 +159,34 @@ func NewFileImporter( ctx: ctx, cancel: cancel, importClient: importClient, + isRawKvMode: isRawKvMode, rateLimit: rateLimit, } } +// SetRawRange sets the range to be restored in raw kv mode. +func (importer *FileImporter) SetRawRange(startKey, endKey []byte) error { + if !importer.isRawKvMode { + return errors.New("file importer is not in raw kv mode") + } + importer.rawStartKey = startKey + importer.rawEndKey = endKey + return nil +} + // Import tries to import a file. // All rules must contain encoded keys. func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRules) error { log.Debug("import file", zap.Stringer("file", file)) // Rewrite the start key and end key of file to scan regions - startKey, endKey, err := rewriteFileKeys(file, rewriteRules) + var startKey, endKey []byte + var err error + if importer.isRawKvMode { + startKey = file.StartKey + endKey = file.EndKey + } else { + startKey, endKey, err = rewriteFileKeys(file, rewriteRules) + } if err != nil { return err } @@ -187,7 +211,11 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul var downloadMeta *import_sstpb.SSTMeta errDownload := utils.WithRetry(importer.ctx, func() error { var e error - downloadMeta, e = importer.downloadSST(info, file, rewriteRules) + if importer.isRawKvMode { + downloadMeta, e = importer.downloadRawKVSST(info, file) + } else { + downloadMeta, e = importer.downloadSST(info, file, rewriteRules) + } return e }, newDownloadSSTBackoffer()) if errDownload != nil { @@ -303,6 +331,7 @@ func (importer *FileImporter) downloadSST( NewKeyPrefix: encodeKeyPrefix(regionRule.GetNewKeyPrefix()), } sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) + req := &import_sstpb.DownloadRequest{ Sst: sstMeta, StorageBackend: importer.backend, @@ -328,6 +357,55 @@ func (importer *FileImporter) downloadSST( return &sstMeta, nil } +func (importer *FileImporter) downloadRawKVSST( + regionInfo *RegionInfo, + file *backup.File, +) (*import_sstpb.SSTMeta, error) { + id, err := uuid.New().MarshalBinary() + if err != nil { + return nil, errors.Trace(err) + } + // Empty rule + var rule import_sstpb.RewriteRule + sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) + + // Cut the SST file's range to fit in the restoring range. + if bytes.Compare(importer.rawStartKey, sstMeta.Range.GetStart()) > 0 { + sstMeta.Range.Start = importer.rawStartKey + } + // TODO: importer.RawEndKey is exclusive but sstMeta.Range.End is inclusive. How to exclude importer.RawEndKey? + if len(importer.rawEndKey) > 0 && bytes.Compare(importer.rawEndKey, sstMeta.Range.GetEnd()) < 0 { + sstMeta.Range.End = importer.rawEndKey + } + if bytes.Compare(sstMeta.Range.GetStart(), sstMeta.Range.GetEnd()) > 0 { + return nil, errors.Trace(errRangeIsEmpty) + } + + req := &import_sstpb.DownloadRequest{ + Sst: sstMeta, + StorageBackend: importer.backend, + Name: file.GetName(), + RewriteRule: rule, + } + log.Debug("download SST", + zap.Stringer("sstMeta", &sstMeta), + zap.Stringer("region", regionInfo.Region), + ) + var resp *import_sstpb.DownloadResponse + for _, peer := range regionInfo.Region.GetPeers() { + resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) + if err != nil { + return nil, extractDownloadSSTError(err) + } + if resp.GetIsEmpty() { + return nil, errors.Trace(errRangeIsEmpty) + } + } + sstMeta.Range.Start = resp.Range.GetStart() + sstMeta.Range.End = resp.Range.GetEnd() + return &sstMeta, nil +} + func (importer *FileImporter) ingestSST( sstMeta *import_sstpb.SSTMeta, regionInfo *RegionInfo, diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go index a51e80e95..55299bbb1 100644 --- a/pkg/task/backup_raw.go +++ b/pkg/task/backup_raw.go @@ -27,8 +27,8 @@ const ( flagEndKey = "end" ) -// BackupRawConfig is the configuration specific for backup tasks. -type BackupRawConfig struct { +// RawKvConfig is the common config for rawkv backup and restore. +type RawKvConfig struct { Config StartKey []byte `json:"start-key" toml:"start-key"` @@ -45,7 +45,7 @@ func DefineRawBackupFlags(command *cobra.Command) { } // ParseFromFlags parses the backup-related flags from the flag set. -func (cfg *BackupRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { +func (cfg *RawKvConfig) ParseFromFlags(flags *pflag.FlagSet) error { format, err := flags.GetString(flagKeyFormat) if err != nil { return err @@ -82,7 +82,7 @@ func (cfg *BackupRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { } // RunBackupRaw starts a backup task inside the current goroutine. -func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *BackupRawConfig) error { +func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *RawKvConfig) error { ctx, cancel := context.WithCancel(c) defer cancel() diff --git a/pkg/task/restore.go b/pkg/task/restore.go index 4dac5f869..f2143764c 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -107,6 +107,10 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf return err } + if client.IsRawKvMode() { + return errors.New("cannot do transactional restore from raw kv data") + } + files, tables, err := filterRestoreFiles(client, cfg) if err != nil { return err diff --git a/pkg/task/restore_raw.go b/pkg/task/restore_raw.go new file mode 100644 index 000000000..8511003a1 --- /dev/null +++ b/pkg/task/restore_raw.go @@ -0,0 +1,131 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package task + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +// RestoreRawConfig is the configuration specific for raw kv restore tasks. +type RestoreRawConfig struct { + RawKvConfig + + Online bool `json:"online" toml:"online"` +} + +// DefineRawRestoreFlags defines common flags for the backup command. +func DefineRawRestoreFlags(command *cobra.Command) { + command.Flags().StringP(flagKeyFormat, "", "hex", "start/end key format, support raw|escaped|hex") + command.Flags().StringP(flagTiKVColumnFamily, "", "default", "restore specify cf, correspond to tikv cf") + command.Flags().StringP(flagStartKey, "", "", "restore raw kv start key, key is inclusive") + command.Flags().StringP(flagEndKey, "", "", "restore raw kv end key, key is exclusive") + + command.Flags().Bool(flagOnline, false, "Whether online when restore") + // TODO remove hidden flag if it's stable + _ = command.Flags().MarkHidden(flagOnline) +} + +// ParseFromFlags parses the backup-related flags from the flag set. +func (cfg *RestoreRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + cfg.Online, err = flags.GetBool(flagOnline) + if err != nil { + return errors.Trace(err) + } + return cfg.RawKvConfig.ParseFromFlags(flags) +} + +// RunRestoreRaw starts a raw kv restore task inside the current goroutine. +func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreRawConfig) error { + ctx, cancel := context.WithCancel(c) + defer cancel() + + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.ErrorOnTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + client, err := restore.NewRestoreClient(ctx, g, mgr.GetPDClient(), mgr.GetTiKV(), mgr.GetTLSConfig()) + if err != nil { + return err + } + defer client.Close() + client.SetRateLimit(cfg.RateLimit) + client.SetConcurrency(uint(cfg.Concurrency)) + if cfg.Online { + client.EnableOnline() + } + + defer summary.Summary(cmdName) + + u, _, backupMeta, err := ReadBackupMeta(ctx, &cfg.Config) + if err != nil { + return err + } + if err = client.InitBackupMeta(backupMeta, u); err != nil { + return err + } + + if !client.IsRawKvMode() { + return errors.New("cannot do raw restore from transactional data") + } + + files, err := client.GetFilesInRawRange(cfg.StartKey, cfg.EndKey, cfg.CF) + if err != nil { + return errors.Trace(err) + } + + if len(files) == 0 { + return errors.New("all files are filtered out from the backup archive, nothing to restore") + } + summary.CollectInt("restore files", len(files)) + + ranges, err := restore.ValidateFileRanges(files, nil) + if err != nil { + return errors.Trace(err) + } + + // Redirect to log if there is no log file to avoid unreadable output. + // TODO: How to show progress? + updateCh := utils.StartProgress( + ctx, + "Raw Restore", + // Split/Scatter + Download/Ingest + int64(len(ranges)+len(files)), + !cfg.LogProgress) + + err = restore.SplitRanges(ctx, client, ranges, nil, updateCh) + if err != nil { + return errors.Trace(err) + } + + removedSchedulers, err := restorePreWork(ctx, client, mgr) + if err != nil { + return errors.Trace(err) + } + + err = client.RestoreRaw(cfg.StartKey, cfg.EndKey, files, updateCh) + if err != nil { + return errors.Trace(err) + } + + err = restorePostWork(ctx, client, mgr, removedSchedulers) + if err != nil { + return errors.Trace(err) + } + // Restore has finished. + close(updateCh) + + return nil +} diff --git a/pkg/utils/key.go b/pkg/utils/key.go index ecaa5fce2..8caeb2833 100644 --- a/pkg/utils/key.go +++ b/pkg/utils/key.go @@ -70,3 +70,21 @@ func unescapedKey(text string) ([]byte, error) { } return buf, nil } + +// CompareEndKey compared two keys that BOTH represent the EXCLUSIVE ending of some range. An empty end key is the very +// end, so an empty key is greater than any other keys. +// Please note that this function is not applicable if any one argument is not an EXCLUSIVE ending of a range. +func CompareEndKey(a, b []byte) int { + if len(a) == 0 { + if len(b) == 0 { + return 0 + } + return 1 + } + + if len(b) == 0 { + return -1 + } + + return bytes.Compare(a, b) +} diff --git a/pkg/utils/key_test.go b/pkg/utils/key_test.go index e314fbeb5..3e20bae24 100644 --- a/pkg/utils/key_test.go +++ b/pkg/utils/key_test.go @@ -32,3 +32,23 @@ func (r *testKeySuite) TestParseKey(c *C) { c.Assert(err, ErrorMatches, "*unknown format*") } + +func (r *testKeySuite) TestCompareEndKey(c *C) { + res := CompareEndKey([]byte("1"), []byte("2")) + c.Assert(res, Less, 0) + + res = CompareEndKey([]byte("1"), []byte("1")) + c.Assert(res, Equals, 0) + + res = CompareEndKey([]byte("2"), []byte("1")) + c.Assert(res, Greater, 0) + + res = CompareEndKey([]byte("1"), []byte("")) + c.Assert(res, Less, 0) + + res = CompareEndKey([]byte(""), []byte("")) + c.Assert(res, Equals, 0) + + res = CompareEndKey([]byte(""), []byte("1")) + c.Assert(res, Greater, 0) +} diff --git a/tests/br_rawkv/run.sh b/tests/br_rawkv/run.sh index a3f62311f..f57e76827 100644 --- a/tests/br_rawkv/run.sh +++ b/tests/br_rawkv/run.sh @@ -17,13 +17,22 @@ set -eu BACKUP_DIR="raw_backup" +checksum() { + bin/rawkv --pd $PD_ADDR --mode checksum --start-key $1 --end-key $2 | grep result | awk '{print $3}' +} + +fail_and_exit() { + echo "TEST: [$TEST_NAME] failed!" + exit 1 +} + +checksum_empty=$(checksum 31 3130303030303030) + # generate raw kv randomly in range[start-key, end-key) in 10s bin/rawkv --pd $PD_ADDR --mode rand-gen --start-key 31 --end-key 3130303030303030 --duration 10 -# output checksum -bin/rawkv --pd $PD_ADDR --mode checksum --start-key 31 --end-key 3130303030303030 > /$TEST_DIR/checksum.out - -checksum_ori=$(cat /$TEST_DIR/checksum.out | grep result | awk '{print $3}') +checksum_ori=$(checksum 31 3130303030303030) +checksum_partial=$(checksum 311111 311122) # backup rawkv echo "backup start..." @@ -32,21 +41,45 @@ run_br --pd $PD_ADDR backup raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 -- # delete data in range[start-key, end-key) bin/rawkv --pd $PD_ADDR --mode delete --start-key 31 --end-key 3130303030303030 -# TODO: Finish check after restore ready +# Ensure the data is deleted +checksum_new=$(checksum 31 3130303030303030) + +if [ "$checksum_new" != "$checksum_empty" ];then + echo "failed to delete data in range" + fail_and_exit +fi + # restore rawkv -# echo "restore start..." -# run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4 +echo "restore start..." +run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4 -# output checksum after restore -# bin/rawkv --pd $PD_ADDR --mode checksum --start-key 31 --end-key 3130303030303030 > /$TEST_DIR/checksum.out +checksum_new=$(checksum 31 3130303030303030) -checksum_new=$(cat /$TEST_DIR/checksum.out | grep result | awk '{print $3}') +if [ "$checksum_new" != "$checksum_ori" ];then + echo "checksum failed after restore" + fail_and_exit +fi -if [ "$checksum_ori" == "$checksum_new" ];then - echo "TEST: [$TEST_NAME] successed!" -else - echo "TEST: [$TEST_NAME] failed!" - exit 1 +# delete data in range[start-key, end-key) +bin/rawkv --pd $PD_ADDR --mode delete --start-key 31 --end-key 3130303030303030 + +# Ensure the data is deleted +checksum_new=$(checksum 31 3130303030303030) + +if [ "$checksum_new" != "$checksum_empty" ];then + echo "failed to delete data in range" + fail_and_exit fi +# FIXME restore rawkv partially after change endkey to inclusive +# echo "restore start..." +# run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 311111 --end 311122 --format hex --concurrency 4 +# +# checksum_new=$(checksum 31 3130303030303030) +# +# if [ "$checksum_new" != "$checksum_partial" ];then +# echo "checksum failed after restore" +# fail_and_exit +# fi +echo "TEST: [$TEST_NAME] successed!" From 6268cde45e6a05b5e46d1208b31ea18a1770b8ba Mon Sep 17 00:00:00 2001 From: 5kbpers <20279863+5kbpers@users.noreply.github.com> Date: Wed, 18 Mar 2020 13:49:01 +0800 Subject: [PATCH 30/46] restore: remove tiflash replica before restore (#194) * restore: remove tiflash replica before restore Signed-off-by: 5kbpers * rename errSplit variable Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * check replica count by region info Signed-off-by: 5kbpers * cleanup Signed-off-by: 5kbpers * save tiflash replica count to backupmeta Signed-off-by: 5kbpers * fix save crcxor Signed-off-by: 5kbpers * fix decode the key of placement rule Signed-off-by: 5kbpers * address lint Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * close domain after restoring tiflash-replica Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * address comments Signed-off-by: 5kbpers * Update pkg/task/restore.go Co-Authored-By: 3pointer Co-authored-by: 3pointer --- cmd/restore.go | 21 ++++++++ cmd/validate.go | 6 +-- go.mod | 3 +- go.sum | 5 ++ pkg/conn/conn.go | 31 +++++++---- pkg/conn/conn_test.go | 46 ++++++++++------ pkg/restore/backoff.go | 10 ++-- pkg/restore/client.go | 105 +++++++++++++++++++++++++++++++++++-- pkg/restore/db.go | 46 ++++++++++++++-- pkg/restore/split.go | 103 ++++++++++++++++++++++++++++++------ pkg/restore/split_test.go | 2 +- pkg/restore/util.go | 12 ++++- pkg/task/common.go | 7 +-- pkg/task/restore.go | 72 ++++++++++++++++++++++++- pkg/task/restore_raw.go | 2 +- pkg/utils/pd.go | 107 ++++++++++++++++++++++++++++++++++++++ pkg/utils/schema.go | 28 +++++----- pkg/utils/tso.go | 49 ----------------- 18 files changed, 526 insertions(+), 129 deletions(-) create mode 100644 pkg/utils/pd.go delete mode 100644 pkg/utils/tso.go diff --git a/cmd/restore.go b/cmd/restore.go index 6353719f2..bc74bea84 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -30,6 +30,15 @@ func runRestoreRawCommand(command *cobra.Command, cmdName string) error { return task.RunRestoreRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) } +func runRestoreTiflashReplicaCommand(command *cobra.Command, cmdName string) error { + cfg := task.RestoreConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + return err + } + + return task.RunRestoreTiflashReplica(GetDefaultContext(), tidbGlue, cmdName, &cfg) +} + // NewRestoreCommand returns a restore subcommand func NewRestoreCommand() *cobra.Command { command := &cobra.Command{ @@ -55,6 +64,7 @@ func NewRestoreCommand() *cobra.Command { newDbRestoreCommand(), newTableRestoreCommand(), newRawRestoreCommand(), + newTiflashReplicaRestoreCommand(), ) task.DefineRestoreFlags(command.PersistentFlags()) @@ -96,6 +106,17 @@ func newTableRestoreCommand() *cobra.Command { return command } +func newTiflashReplicaRestoreCommand() *cobra.Command { + command := &cobra.Command{ + Use: "tiflash-replica", + Short: "restore the tiflash replica before the last restore, it must only be used after the last restore failed", + RunE: func(cmd *cobra.Command, _ []string) error { + return runRestoreTiflashReplicaCommand(cmd, "Restore TiFlash Replica") + }, + } + return command +} + func newRawRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "raw", diff --git a/cmd/validate.go b/cmd/validate.go index baf30200f..386a7bb47 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -63,7 +63,7 @@ func newCheckSumCommand() *cobra.Command { return err } - _, s, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) + _, s, backupMeta, err := task.ReadBackupMeta(ctx, utils.MetaFile, &cfg) if err != nil { return err } @@ -151,7 +151,7 @@ func newBackupMetaCommand() *cobra.Command { if err = cfg.ParseFromFlags(cmd.Flags()); err != nil { return err } - _, _, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) + _, _, backupMeta, err := task.ReadBackupMeta(ctx, utils.MetaFile, &cfg) if err != nil { log.Error("read backupmeta failed", zap.Error(err)) return err @@ -242,7 +242,7 @@ func decodeBackupMetaCommand() *cobra.Command { if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { return err } - _, s, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) + _, s, backupMeta, err := task.ReadBackupMeta(ctx, utils.MetaFile, &cfg) if err != nil { return err } diff --git a/go.mod b/go.mod index 50b81eed8..94f4022f9 100644 --- a/go.mod +++ b/go.mod @@ -16,11 +16,12 @@ require ( github.com/google/uuid v1.1.1 github.com/klauspost/cpuid v1.2.0 // indirect github.com/mattn/go-runewidth v0.0.7 // indirect + github.com/montanaflynn/stats v0.5.0 // indirect github.com/onsi/ginkgo v1.11.0 // indirect github.com/onsi/gomega v1.8.1 // indirect github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 - github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5 + github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75 github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84 github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 diff --git a/go.sum b/go.sum index 8c5ae9cff..31fd50bcc 100644 --- a/go.sum +++ b/go.sum @@ -309,6 +309,8 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s= github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.5.0 h1:2EkzeTSqBB4V4bJwWrt5gIIrZmpJBcoIRGS2kWLgzmk= +github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c= github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI= @@ -363,6 +365,8 @@ github.com/pingcap/kvproto v0.0.0-20200214064158-62d31900d88e/go.mod h1:IOdRDPLy github.com/pingcap/kvproto v0.0.0-20200221034943-a2aa1d1e20a8/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5 h1:knEvP4R5v5b2T107/Q6VzB0C8/6T7NXB/V7Vl1FtQsg= github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75 h1:DB3NTM0ilba/6sW+vccdEnP10bVvrVunDwWvRa0hSKc= +github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= @@ -486,6 +490,7 @@ github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljT github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3 h1:ZsIlNwu/G0zbChIZaWOeZ2TPGNmKMt46jZLXi3e8LFc= github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index cdfc78168..c7c4a9c9e 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -92,17 +92,20 @@ func pdRequest( return r, nil } -// UnexpectedStoreBehavior is the action to do in GetAllTiKVStores when a -// non-TiKV store (e.g. TiFlash store) is found. -type UnexpectedStoreBehavior uint8 +// StoreBehavior is the action to do in GetAllTiKVStores when a non-TiKV +// store (e.g. TiFlash store) is found. +type StoreBehavior uint8 const ( // ErrorOnTiFlash causes GetAllTiKVStores to return error when the store is // found to be a TiFlash node. - ErrorOnTiFlash UnexpectedStoreBehavior = 0 + ErrorOnTiFlash StoreBehavior = 0 // SkipTiFlash causes GetAllTiKVStores to skip the store when it is found to // be a TiFlash node. - SkipTiFlash UnexpectedStoreBehavior = 1 + SkipTiFlash StoreBehavior = 1 + // TiFlashOnly caused GetAllTiKVStores to skip the store which is not a + // TiFlash node. + TiFlashOnly StoreBehavior = 2 ) // GetAllTiKVStores returns all TiKV stores registered to the PD client. The @@ -110,7 +113,7 @@ const ( func GetAllTiKVStores( ctx context.Context, pdClient pd.Client, - unexpectedStoreBehavior UnexpectedStoreBehavior, + storeBehavior StoreBehavior, ) ([]*metapb.Store, error) { // get all live stores. stores, err := pdClient.GetAllStores(ctx, pd.WithExcludeTombstone()) @@ -122,15 +125,21 @@ func GetAllTiKVStores( j := 0 skipStore: for _, store := range stores { + var isTiFlash bool for _, label := range store.Labels { if label.Key == "engine" && label.Value == "tiflash" { - if unexpectedStoreBehavior == SkipTiFlash { + if storeBehavior == SkipTiFlash { continue skipStore + } else if storeBehavior == ErrorOnTiFlash { + return nil, errors.Errorf( + "cannot restore to a cluster with active TiFlash stores (store %d at %s)", store.Id, store.Address) } - return nil, errors.Errorf( - "cannot restore to a cluster with active TiFlash stores (store %d at %s)", store.Id, store.Address) + isTiFlash = true } } + if !isTiFlash && storeBehavior == TiFlashOnly { + continue skipStore + } stores[j] = store j++ } @@ -145,7 +154,7 @@ func NewMgr( storage tikv.Storage, tlsConf *tls.Config, securityOption pd.SecurityOption, - unexpectedStoreBehavior UnexpectedStoreBehavior, + storeBehavior StoreBehavior, ) (*Mgr, error) { addrs := strings.Split(pdAddrs, ",") @@ -190,7 +199,7 @@ func NewMgr( log.Info("new mgr", zap.String("pdAddrs", pdAddrs)) // Check live tikv. - stores, err := GetAllTiKVStores(ctx, pdClient, unexpectedStoreBehavior) + stores, err := GetAllTiKVStores(ctx, pdClient, storeBehavior) if err != nil { log.Error("fail to get store", zap.Error(err)) return nil, err diff --git a/pkg/conn/conn_test.go b/pkg/conn/conn_test.go index 572798e23..26278035b 100644 --- a/pkg/conn/conn_test.go +++ b/pkg/conn/conn_test.go @@ -162,40 +162,40 @@ func (fpdc fakePDClient) GetAllStores(context.Context, ...pd.GetStoreOption) ([] func (s *testClientSuite) TestGetAllTiKVStores(c *C) { testCases := []struct { - stores []*metapb.Store - unexpectedStoreBehavior UnexpectedStoreBehavior - expectedStores map[uint64]int - expectedError string + stores []*metapb.Store + storeBehavior StoreBehavior + expectedStores map[uint64]int + expectedError string }{ { stores: []*metapb.Store{ {Id: 1}, }, - unexpectedStoreBehavior: SkipTiFlash, - expectedStores: map[uint64]int{1: 1}, + storeBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1}, }, { stores: []*metapb.Store{ {Id: 1}, }, - unexpectedStoreBehavior: ErrorOnTiFlash, - expectedStores: map[uint64]int{1: 1}, + storeBehavior: ErrorOnTiFlash, + expectedStores: map[uint64]int{1: 1}, }, { stores: []*metapb.Store{ {Id: 1}, {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, }, - unexpectedStoreBehavior: SkipTiFlash, - expectedStores: map[uint64]int{1: 1}, + storeBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1}, }, { stores: []*metapb.Store{ {Id: 1}, {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, }, - unexpectedStoreBehavior: ErrorOnTiFlash, - expectedError: "cannot restore to a cluster with active TiFlash stores.*", + storeBehavior: ErrorOnTiFlash, + expectedError: "cannot restore to a cluster with active TiFlash stores.*", }, { stores: []*metapb.Store{ @@ -206,8 +206,8 @@ func (s *testClientSuite) TestGetAllTiKVStores(c *C) { {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, }, - unexpectedStoreBehavior: SkipTiFlash, - expectedStores: map[uint64]int{1: 1, 3: 1, 4: 1, 6: 1}, + storeBehavior: SkipTiFlash, + expectedStores: map[uint64]int{1: 1, 3: 1, 4: 1, 6: 1}, }, { stores: []*metapb.Store{ @@ -218,14 +218,26 @@ func (s *testClientSuite) TestGetAllTiKVStores(c *C) { {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, }, - unexpectedStoreBehavior: ErrorOnTiFlash, - expectedError: "cannot restore to a cluster with active TiFlash stores.*", + storeBehavior: ErrorOnTiFlash, + expectedError: "cannot restore to a cluster with active TiFlash stores.*", + }, + { + stores: []*metapb.Store{ + {Id: 1}, + {Id: 2, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}}}, + {Id: 3}, + {Id: 4, Labels: []*metapb.StoreLabel{{Key: "engine", Value: "tikv"}}}, + {Id: 5, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tikv"}, {Key: "engine", Value: "tiflash"}}}, + {Id: 6, Labels: []*metapb.StoreLabel{{Key: "else", Value: "tiflash"}, {Key: "engine", Value: "tikv"}}}, + }, + storeBehavior: TiFlashOnly, + expectedStores: map[uint64]int{2: 1, 5: 1}, }, } for _, testCase := range testCases { pdClient := fakePDClient{stores: testCase.stores} - stores, err := GetAllTiKVStores(context.Background(), pdClient, testCase.unexpectedStoreBehavior) + stores, err := GetAllTiKVStores(context.Background(), pdClient, testCase.storeBehavior) if len(testCase.expectedError) != 0 { c.Assert(err, ErrorMatches, testCase.expectedError) continue diff --git a/pkg/restore/backoff.go b/pkg/restore/backoff.go index ae5cddd66..21048dd13 100644 --- a/pkg/restore/backoff.go +++ b/pkg/restore/backoff.go @@ -90,21 +90,21 @@ func (bo *importerBackoffer) Attempt() int { return bo.attempt } -type resetTSBackoffer struct { +type pdReqBackoffer struct { attempt int delayTime time.Duration maxDelayTime time.Duration } -func newResetTSBackoffer() utils.Backoffer { - return &resetTSBackoffer{ +func newPDReqBackoffer() utils.Backoffer { + return &pdReqBackoffer{ attempt: resetTsRetryTime, delayTime: resetTSWaitInterval, maxDelayTime: resetTSMaxWaitInterval, } } -func (bo *resetTSBackoffer) NextBackoff(err error) time.Duration { +func (bo *pdReqBackoffer) NextBackoff(err error) time.Duration { bo.delayTime = 2 * bo.delayTime bo.attempt-- if bo.delayTime > bo.maxDelayTime { @@ -113,6 +113,6 @@ func (bo *resetTSBackoffer) NextBackoff(err error) time.Duration { return bo.delayTime } -func (bo *resetTSBackoffer) Attempt() int { +func (bo *pdReqBackoffer) Attempt() int { return bo.attempt } diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 97467d913..2453f2974 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -15,6 +15,7 @@ import ( "sync" "time" + "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/import_sstpb" @@ -37,6 +38,7 @@ import ( "github.com/pingcap/br/pkg/checksum" "github.com/pingcap/br/pkg/conn" "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) @@ -65,6 +67,9 @@ type Client struct { hasSpeedLimited bool restoreStores []uint64 + + storage storage.ExternalStorage + backend *backup.StorageBackend } // NewRestoreClient returns a new RestoreClient @@ -97,6 +102,17 @@ func (rc *Client) SetRateLimit(rateLimit uint64) { rc.rateLimit = rateLimit } +// SetStorage set ExternalStorage for client +func (rc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) error { + var err error + rc.storage, err = storage.Create(ctx, backend, sendCreds) + if err != nil { + return err + } + rc.backend = backend + return nil +} + // GetPDClient returns a pd client. func (rc *Client) GetPDClient() pd.Client { return rc.pdClient @@ -232,8 +248,23 @@ func (rc *Client) ResetTS(pdAddrs []string) error { i := 0 return utils.WithRetry(rc.ctx, func() error { idx := i % len(pdAddrs) + i++ return utils.ResetTS(pdAddrs[idx], restoreTS, rc.tlsConf) - }, newResetTSBackoffer()) + }, newPDReqBackoffer()) +} + +// GetPlacementRules return the current placement rules +func (rc *Client) GetPlacementRules(pdAddrs []string) ([]placement.Rule, error) { + var placementRules []placement.Rule + i := 0 + errRetry := utils.WithRetry(rc.ctx, func() error { + var err error + idx := i % len(pdAddrs) + i++ + placementRules, err = utils.GetPlacementRules(pdAddrs[idx], rc.tlsConf) + return err + }, newPDReqBackoffer()) + return placementRules, errRetry } // GetDatabases returns all databases. @@ -305,6 +336,74 @@ func (rc *Client) CreateTables( return rewriteRules, newTables, nil } +// RemoveTiFlashReplica removes all the tiflash replicas of a table +// TODO: remove this after tiflash supports restore +func (rc *Client) RemoveTiFlashReplica(tables []*utils.Table, placementRules []placement.Rule) error { + schemas := make([]*backup.Schema, 0, len(tables)) + var updateReplica bool + for _, table := range tables { + if rule := utils.SearchPlacementRule(table.Info.ID, placementRules, placement.Learner); rule != nil { + table.TiFlashReplicas = rule.Count + updateReplica = true + } + tableData, err := json.Marshal(table.Info) + if err != nil { + return errors.Trace(err) + } + dbData, err := json.Marshal(table.Db) + if err != nil { + return errors.Trace(err) + } + schemas = append(schemas, &backup.Schema{ + Db: dbData, + Table: tableData, + Crc64Xor: table.Crc64Xor, + TotalKvs: table.TotalKvs, + TotalBytes: table.TotalBytes, + TiflashReplicas: uint32(table.TiFlashReplicas), + }) + } + + if updateReplica { + // Update backup meta + rc.backupMeta.Schemas = schemas + backupMetaData, err := proto.Marshal(rc.backupMeta) + if err != nil { + return errors.Trace(err) + } + backendURL := storage.FormatBackendURL(rc.backend) + log.Info("update backup meta", zap.Stringer("path", &backendURL)) + err = rc.storage.Write(rc.ctx, utils.SavedMetaFile, backupMetaData) + if err != nil { + return errors.Trace(err) + } + } + + for _, table := range tables { + if table.TiFlashReplicas > 0 { + err := rc.db.AlterTiflashReplica(rc.ctx, table, 0) + if err != nil { + return errors.Trace(err) + } + } + } + return nil +} + +// RecoverTiFlashReplica recovers all the tiflash replicas of a table +// TODO: remove this after tiflash supports restore +func (rc *Client) RecoverTiFlashReplica(tables []*utils.Table) error { + for _, table := range tables { + if table.TiFlashReplicas > 0 { + err := rc.db.AlterTiflashReplica(rc.ctx, table, table.TiFlashReplicas) + if err != nil { + return errors.Trace(err) + } + } + } + return nil +} + // ExecDDLs executes the queries of the ddl jobs. func (rc *Client) ExecDDLs(ddlJobs []*model.Job) error { // Sort the ddl jobs by schema version in ascending order. @@ -327,7 +426,7 @@ func (rc *Client) ExecDDLs(ddlJobs []*model.Job) error { func (rc *Client) setSpeedLimit() error { if !rc.hasSpeedLimited && rc.rateLimit != 0 { - stores, err := conn.GetAllTiKVStores(rc.ctx, rc.pdClient, conn.ErrorOnTiFlash) + stores, err := conn.GetAllTiKVStores(rc.ctx, rc.pdClient, conn.SkipTiFlash) if err != nil { return err } @@ -467,7 +566,7 @@ func (rc *Client) SwitchToNormalMode(ctx context.Context) error { } func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMode) error { - stores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.ErrorOnTiFlash) + stores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.SkipTiFlash) if err != nil { return errors.Trace(err) } diff --git a/pkg/restore/db.go b/pkg/restore/db.go index d4a4a0a41..9ce3b10c7 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -84,13 +84,13 @@ func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { // CreateTable executes a CREATE TABLE SQL. func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { - schema := table.Info - createSQL, err := db.se.ShowCreateTable(schema, newIDAllocator(schema.AutoIncID)) + tableInfo := table.Info + createSQL, err := db.se.ShowCreateTable(tableInfo, newIDAllocator(tableInfo.AutoIncID)) if err != nil { log.Error( "build create table SQL failed", zap.Stringer("db", table.Db.Name), - zap.Stringer("table", schema.Name), + zap.Stringer("table", tableInfo.Name), zap.Error(err)) return errors.Trace(err) } @@ -119,8 +119,8 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { } alterAutoIncIDSQL := fmt.Sprintf( "alter table %s auto_increment = %d", - utils.EncloseName(schema.Name.O), - schema.AutoIncID) + utils.EncloseName(tableInfo.Name.O), + tableInfo.AutoIncID) err = db.se.Execute(ctx, alterAutoIncIDSQL) if err != nil { log.Error("alter AutoIncID failed", @@ -129,9 +129,45 @@ func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { zap.Stringer("table", table.Info.Name), zap.Error(err)) } + return errors.Trace(err) } +// AlterTiflashReplica alters the replica count of tiflash +func (db *DB) AlterTiflashReplica(ctx context.Context, table *utils.Table, count int) error { + switchDbSQL := fmt.Sprintf("use %s;", utils.EncloseName(table.Db.Name.O)) + err := db.se.Execute(ctx, switchDbSQL) + if err != nil { + log.Error("switch db failed", + zap.String("SQL", switchDbSQL), + zap.Stringer("db", table.Db.Name), + zap.Error(err)) + return errors.Trace(err) + } + alterTiFlashSQL := fmt.Sprintf( + "alter table %s set tiflash replica %d", + utils.EncloseName(table.Info.Name.O), + count, + ) + err = db.se.Execute(ctx, alterTiFlashSQL) + if err != nil { + log.Error("alter tiflash replica failed", + zap.String("query", alterTiFlashSQL), + zap.Stringer("db", table.Db.Name), + zap.Stringer("table", table.Info.Name), + zap.Error(err)) + return err + } else if table.TiFlashReplicas > 0 { + log.Warn("alter tiflash replica done", + zap.Stringer("db", table.Db.Name), + zap.Stringer("table", table.Info.Name), + zap.Int("originalReplicaCount", table.TiFlashReplicas), + zap.Int("replicaCount", count)) + + } + return nil +} + // Close closes the connection func (db *DB) Close() { db.se.Close() diff --git a/pkg/restore/split.go b/pkg/restore/split.go index dc0bab80a..03153097a 100644 --- a/pkg/restore/split.go +++ b/pkg/restore/split.go @@ -32,8 +32,11 @@ const ( ScatterWaitMaxRetryTimes = 64 ScatterWaitInterval = 50 * time.Millisecond ScatterMaxWaitInterval = time.Second - ScatterWaitUpperInterval = 180 * time.Second + + RejectStoreCheckRetryTimes = 64 + RejectStoreCheckInterval = 100 * time.Millisecond + RejectStoreMaxCheckInterval = 2 * time.Second ) // RegionSplitter is a executor of region split by rules. @@ -60,6 +63,7 @@ func (rs *RegionSplitter) Split( ctx context.Context, ranges []rtree.Range, rewriteRules *RewriteRules, + rejectStores map[uint64]bool, onSplit OnSplitFunc, ) error { if len(ranges) == 0 { @@ -67,9 +71,9 @@ func (rs *RegionSplitter) Split( } startTime := time.Now() // Sort the range for getting the min and max key of the ranges - sortedRanges, err := sortRanges(ranges, rewriteRules) - if err != nil { - return errors.Trace(err) + sortedRanges, errSplit := sortRanges(ranges, rewriteRules) + if errSplit != nil { + return errors.Trace(errSplit) } minKey := codec.EncodeBytes([]byte{}, sortedRanges[0].StartKey) maxKey := codec.EncodeBytes([]byte{}, sortedRanges[len(sortedRanges)-1].EndKey) @@ -91,12 +95,14 @@ func (rs *RegionSplitter) Split( } interval := SplitRetryInterval scatterRegions := make([]*RegionInfo, 0) + allRegions := make([]*RegionInfo, 0) SplitRegions: for i := 0; i < SplitRetryTimes; i++ { - regions, err1 := paginateScanRegion(ctx, rs.client, minKey, maxKey, scanRegionPaginationLimit) - if err1 != nil { - return errors.Trace(err1) + regions, errScan := paginateScanRegion(ctx, rs.client, minKey, maxKey, scanRegionPaginationLimit) + if errScan != nil { + return errors.Trace(errScan) } + allRegions = append(allRegions, regions...) if len(regions) == 0 { log.Warn("cannot scan any region") return nil @@ -109,16 +115,16 @@ SplitRegions: for regionID, keys := range splitKeyMap { var newRegions []*RegionInfo region := regionMap[regionID] - newRegions, err = rs.splitAndScatterRegions(ctx, region, keys) - if err != nil { - if strings.Contains(err.Error(), "no valid key") { + newRegions, errSplit = rs.splitAndScatterRegions(ctx, region, keys) + if errSplit != nil { + if strings.Contains(errSplit.Error(), "no valid key") { for _, key := range keys { log.Error("no valid key", zap.Binary("startKey", region.Region.StartKey), zap.Binary("endKey", region.Region.EndKey), zap.Binary("key", codec.EncodeBytes([]byte{}, key))) } - return errors.Trace(err) + return errors.Trace(errSplit) } interval = 2 * interval if interval > SplitMaxRetryInterval { @@ -126,7 +132,7 @@ SplitRegions: } time.Sleep(interval) if i > 3 { - log.Warn("splitting regions failed, retry it", zap.Error(err), zap.ByteStrings("keys", keys)) + log.Warn("splitting regions failed, retry it", zap.Error(errSplit), zap.ByteStrings("keys", keys)) } continue SplitRegions } @@ -136,10 +142,23 @@ SplitRegions: } break } - if err != nil { - return errors.Trace(err) + if errSplit != nil { + return errors.Trace(errSplit) } - log.Info("splitting regions done, wait for scattering regions", + if len(rejectStores) > 0 { + startTime = time.Now() + log.Info("start to wait for removing rejected stores", zap.Reflect("rejectStores", rejectStores)) + for _, region := range allRegions { + if !rs.waitForRemoveRejectStores(ctx, region, rejectStores) { + log.Error("waiting for removing rejected stores failed", + zap.Stringer("region", region.Region)) + return errors.New("waiting for removing rejected stores failed") + } + } + log.Info("waiting for removing rejected stores done", + zap.Int("regions", len(allRegions)), zap.Duration("take", time.Since(startTime))) + } + log.Info("start to wait for scattering regions", zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime))) startTime = time.Now() scatterCount := 0 @@ -192,6 +211,30 @@ func (rs *RegionSplitter) isScatterRegionFinished(ctx context.Context, regionID return ok, nil } +func (rs *RegionSplitter) hasRejectStorePeer( + ctx context.Context, + regionID uint64, + rejectStores map[uint64]bool, +) (bool, error) { + regionInfo, err := rs.client.GetRegionByID(ctx, regionID) + if err != nil { + return false, err + } + if regionInfo == nil { + return false, nil + } + for _, peer := range regionInfo.Region.GetPeers() { + if rejectStores[peer.GetStoreId()] { + return true, nil + } + } + retryTimes := ctx.Value(retryTimes).(int) + if retryTimes > 10 { + log.Warn("get region info", zap.Stringer("region", regionInfo.Region)) + } + return false, nil +} + func (rs *RegionSplitter) waitForSplit(ctx context.Context, regionID uint64) { interval := SplitCheckInterval for i := 0; i < SplitCheckMaxRetryTimes; i++ { @@ -237,6 +280,36 @@ func (rs *RegionSplitter) waitForScatterRegion(ctx context.Context, regionInfo * } } +func (rs *RegionSplitter) waitForRemoveRejectStores( + ctx context.Context, + regionInfo *RegionInfo, + rejectStores map[uint64]bool, +) bool { + interval := RejectStoreCheckInterval + regionID := regionInfo.Region.GetId() + for i := 0; i < RejectStoreCheckRetryTimes; i++ { + ctx1 := context.WithValue(ctx, retryTimes, i) + ok, err := rs.hasRejectStorePeer(ctx1, regionID, rejectStores) + if err != nil { + log.Warn("wait for rejecting store failed", + zap.Stringer("region", regionInfo.Region), + zap.Error(err)) + return false + } + // Do not have any peer in the rejected store, return true + if !ok { + return true + } + interval = 2 * interval + if interval > RejectStoreMaxCheckInterval { + interval = RejectStoreMaxCheckInterval + } + time.Sleep(interval) + } + + return false +} + func (rs *RegionSplitter) splitAndScatterRegions( ctx context.Context, regionInfo *RegionInfo, keys [][]byte, ) ([]*RegionInfo, error) { diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go index b21cbf781..06dab1cf1 100644 --- a/pkg/restore/split_test.go +++ b/pkg/restore/split_test.go @@ -193,7 +193,7 @@ func (s *testRestoreUtilSuite) TestSplit(c *C) { regionSplitter := NewRegionSplitter(client) ctx := context.Background() - err := regionSplitter.Split(ctx, ranges, rewriteRules, func(key [][]byte) {}) + err := regionSplitter.Split(ctx, ranges, rewriteRules, map[uint64]bool{}, func(key [][]byte) {}) if err != nil { c.Assert(err, IsNil, Commentf("split regions failed: %v", err)) } diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 03af2b3c0..c49c07994 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" + "github.com/pingcap/br/pkg/conn" "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/summary" ) @@ -327,7 +328,16 @@ func SplitRanges( summary.CollectDuration("split region", elapsed) }() splitter := NewRegionSplitter(NewSplitClient(client.GetPDClient(), client.GetTLSConfig())) - return splitter.Split(ctx, ranges, rewriteRules, func(keys [][]byte) { + tiflashStores, err := conn.GetAllTiKVStores(ctx, client.GetPDClient(), conn.TiFlashOnly) + if err != nil { + return errors.Trace(err) + } + storeMap := make(map[uint64]bool) + for _, store := range tiflashStores { + storeMap[store.GetId()] = true + } + + return splitter.Split(ctx, ranges, rewriteRules, storeMap, func(keys [][]byte) { for range keys { updateCh <- struct{}{} } diff --git a/pkg/task/common.go b/pkg/task/common.go index 859c4206d..61186abe1 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -213,7 +213,7 @@ func newMgr( g glue.Glue, pds []string, tlsConfig TLSConfig, - unexpectedStoreBehavior conn.UnexpectedStoreBehavior, + storeBehavior conn.StoreBehavior, ) (*conn.Mgr, error) { var ( tlsConf *tls.Config @@ -240,7 +240,7 @@ func newMgr( if err != nil { return nil, err } - return conn.NewMgr(ctx, g, pdAddress, store.(tikv.Storage), tlsConf, securityOption, unexpectedStoreBehavior) + return conn.NewMgr(ctx, g, pdAddress, store.(tikv.Storage), tlsConf, securityOption, storeBehavior) } // GetStorage gets the storage backend from the config. @@ -262,13 +262,14 @@ func GetStorage( // ReadBackupMeta reads the backupmeta file from the storage. func ReadBackupMeta( ctx context.Context, + fileName string, cfg *Config, ) (*backup.StorageBackend, storage.ExternalStorage, *backup.BackupMeta, error) { u, s, err := GetStorage(ctx, cfg) if err != nil { return nil, nil, nil, err } - metaData, err := s.Read(ctx, utils.MetaFile) + metaData, err := s.Read(ctx, fileName) if err != nil { return nil, nil, nil, errors.Annotate(err, "load backupmeta failed") } diff --git a/pkg/task/restore.go b/pkg/task/restore.go index f2143764c..a4598baa9 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -17,6 +17,7 @@ import ( "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/restore" "github.com/pingcap/br/pkg/rtree" + "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) @@ -75,7 +76,7 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf ctx, cancel := context.WithCancel(c) defer cancel() - mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.ErrorOnTiFlash) + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) if err != nil { return err } @@ -87,6 +88,13 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf } defer client.Close() + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return err + } + if err = client.SetStorage(ctx, u, cfg.SendCreds); err != nil { + return err + } client.SetRateLimit(cfg.RateLimit) client.SetConcurrency(uint(cfg.Concurrency)) if cfg.Online { @@ -99,7 +107,7 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf defer summary.Summary(cmdName) - u, _, backupMeta, err := ReadBackupMeta(ctx, &cfg.Config) + u, _, backupMeta, err := ReadBackupMeta(ctx, utils.MetaFile, &cfg.Config) if err != nil { return err } @@ -138,6 +146,18 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf if err != nil { return err } + placementRules, err := client.GetPlacementRules(cfg.PD) + if err != nil { + return err + } + err = client.RemoveTiFlashReplica(tables, placementRules) + if err != nil { + return err + } + + defer func() { + _ = client.RecoverTiFlashReplica(tables) + }() ranges, err := restore.ValidateFileRanges(files, rewriteRules) if err != nil { @@ -349,3 +369,51 @@ func splitPostWork(ctx context.Context, client *restore.Client, tables []*model. } return nil } + +// RunRestoreTiflashReplica restores the replica of tiflash saved in the last restore. +func RunRestoreTiflashReplica(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConfig) error { + defer summary.Summary(cmdName) + ctx, cancel := context.WithCancel(c) + defer cancel() + + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.SkipTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + // Load saved backupmeta + _, _, backupMeta, err := ReadBackupMeta(ctx, utils.SavedMetaFile, &cfg.Config) + if err != nil { + return err + } + dbs, err := utils.LoadBackupTables(backupMeta) + if err != nil { + return err + } + se, err := restore.NewDB(g, mgr.GetTiKV()) + if err != nil { + return err + } + + tables := make([]*utils.Table, 0) + for _, db := range dbs { + tables = append(tables, db.Tables...) + } + updateCh := utils.StartProgress( + ctx, "RecoverTiflashReplica", int64(len(tables)), !cfg.LogProgress) + for _, t := range tables { + log.Info("get table", zap.Stringer("name", t.Info.Name), + zap.Int("replica", t.TiFlashReplicas)) + if t.TiFlashReplicas > 0 { + err := se.AlterTiflashReplica(ctx, t, t.TiFlashReplicas) + if err != nil { + return err + } + updateCh <- struct{}{} + } + } + summary.CollectInt("recover tables", len(tables)) + + return nil +} diff --git a/pkg/task/restore_raw.go b/pkg/task/restore_raw.go index 8511003a1..3d60df1cd 100644 --- a/pkg/task/restore_raw.go +++ b/pkg/task/restore_raw.go @@ -69,7 +69,7 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR defer summary.Summary(cmdName) - u, _, backupMeta, err := ReadBackupMeta(ctx, &cfg.Config) + u, _, backupMeta, err := ReadBackupMeta(ctx, utils.MetaFile, &cfg.Config) if err != nil { return err } diff --git a/pkg/utils/pd.go b/pkg/utils/pd.go new file mode 100644 index 000000000..7a65a2ac4 --- /dev/null +++ b/pkg/utils/pd.go @@ -0,0 +1,107 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package utils + +import ( + "bytes" + "crypto/tls" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/pd/v4/pkg/codec" + "github.com/pingcap/pd/v4/server/schedule/placement" + "github.com/pingcap/tidb/tablecodec" +) + +const ( + resetTSURL = "/pd/api/v1/admin/reset-ts" + placementRuleURL = "/pd/api/v1/config/rules" +) + +// ResetTS resets the timestamp of PD to a bigger value +func ResetTS(pdAddr string, ts uint64, tlsConf *tls.Config) error { + req, err := json.Marshal(struct { + TSO string `json:"tso,omitempty"` + }{TSO: fmt.Sprintf("%d", ts)}) + if err != nil { + return err + } + cli := &http.Client{Timeout: 30 * time.Second} + prefix := "http://" + if tlsConf != nil { + prefix = "https://" + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConf + cli.Transport = transport + } + reqURL := prefix + pdAddr + resetTSURL + resp, err := cli.Post(reqURL, "application/json", strings.NewReader(string(req))) + if err != nil { + return errors.Trace(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusForbidden { + buf := new(bytes.Buffer) + _, _ = buf.ReadFrom(resp.Body) + return errors.Errorf("pd resets TS failed: req=%v, resp=%v, err=%v", string(req), buf.String(), err) + } + return nil +} + +// GetPlacementRules return the current placement rules +func GetPlacementRules(pdAddr string, tlsConf *tls.Config) ([]placement.Rule, error) { + cli := &http.Client{Timeout: 30 * time.Second} + prefix := "http://" + if tlsConf != nil { + prefix = "https://" + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsConf + cli.Transport = transport + } + reqURL := prefix + pdAddr + placementRuleURL + resp, err := cli.Get(reqURL) + if err != nil { + return nil, errors.Trace(err) + } + defer resp.Body.Close() + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(resp.Body) + if err != nil { + return nil, errors.Trace(err) + } + if resp.StatusCode == http.StatusPreconditionFailed { + return []placement.Rule{}, nil + } + if resp.StatusCode != http.StatusOK { + return nil, errors.Errorf("get placement rules failed: resp=%v, err=%v, code=%d", buf.String(), err, resp.StatusCode) + } + var rules []placement.Rule + err = json.Unmarshal(buf.Bytes(), &rules) + if err != nil { + return nil, errors.Trace(err) + } + return rules, nil +} + +// SearchPlacementRule returns the placement rule matched to the table or nil +func SearchPlacementRule(tableID int64, placementRules []placement.Rule, role placement.PeerRoleType) *placement.Rule { + for _, rule := range placementRules { + key, err := hex.DecodeString(rule.StartKeyHex) + if err != nil { + continue + } + _, decoded, err := codec.DecodeBytes(key) + if err != nil { + continue + } + if rule.Role == role && tableID == tablecodec.DecodeTableID(decoded) { + return &rule + } + } + return nil +} diff --git a/pkg/utils/schema.go b/pkg/utils/schema.go index bc22768e5..5ac439e36 100644 --- a/pkg/utils/schema.go +++ b/pkg/utils/schema.go @@ -18,16 +18,19 @@ const ( MetaFile = "backupmeta" // MetaJSONFile represents backup meta json file name MetaJSONFile = "backupmeta.json" + // SavedMetaFile represents saved meta file name for recovering later + SavedMetaFile = "backupmeta.bak" ) // Table wraps the schema and files of a table. type Table struct { - Db *model.DBInfo - Info *model.TableInfo - Crc64Xor uint64 - TotalKvs uint64 - TotalBytes uint64 - Files []*backup.File + Db *model.DBInfo + Info *model.TableInfo + Crc64Xor uint64 + TotalKvs uint64 + TotalBytes uint64 + Files []*backup.File + TiFlashReplicas int } // Database wraps the schema and tables of a database. @@ -92,12 +95,13 @@ func LoadBackupTables(meta *backup.BackupMeta) (map[string]*Database, error) { } } table := &Table{ - Db: dbInfo, - Info: tableInfo, - Crc64Xor: schema.Crc64Xor, - TotalKvs: schema.TotalKvs, - TotalBytes: schema.TotalBytes, - Files: tableFiles, + Db: dbInfo, + Info: tableInfo, + Crc64Xor: schema.Crc64Xor, + TotalKvs: schema.TotalKvs, + TotalBytes: schema.TotalBytes, + Files: tableFiles, + TiFlashReplicas: int(schema.TiflashReplicas), } db.Tables = append(db.Tables, table) } diff --git a/pkg/utils/tso.go b/pkg/utils/tso.go deleted file mode 100644 index c90cd3575..000000000 --- a/pkg/utils/tso.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. - -package utils - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "fmt" - "net/http" - "strings" - "time" - - "github.com/pingcap/errors" -) - -const ( - resetTSURL = "/pd/api/v1/admin/reset-ts" -) - -// ResetTS resets the timestamp of PD to a bigger value -func ResetTS(pdAddr string, ts uint64, tlsConf *tls.Config) error { - req, err := json.Marshal(struct { - TSO string `json:"tso,omitempty"` - }{TSO: fmt.Sprintf("%d", ts)}) - if err != nil { - return err - } - cli := &http.Client{Timeout: 30 * time.Second} - prefix := "http://" - if tlsConf != nil { - prefix = "https://" - transport := http.DefaultTransport.(*http.Transport).Clone() - transport.TLSClientConfig = tlsConf - cli.Transport = transport - } - reqURL := prefix + pdAddr + resetTSURL - resp, err := cli.Post(reqURL, "application/json", strings.NewReader(string(req))) - if err != nil { - return errors.Trace(err) - } - defer resp.Body.Close() - if resp.StatusCode != 200 && resp.StatusCode != 403 { - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(resp.Body) - return errors.Errorf("pd resets TS failed: req=%v, resp=%v, err=%v", string(req), buf.String(), err) - } - return nil -} From 4ea6c1c83e1eeb448e3fb7707987820baadb3118 Mon Sep 17 00:00:00 2001 From: 3pointer Date: Thu, 19 Mar 2020 19:29:28 +0800 Subject: [PATCH 31/46] summary: put summary log at last (#197) * summary: put summary log at last * fix switch sql --- pkg/restore/db.go | 2 +- pkg/task/backup.go | 3 +-- pkg/task/backup_raw.go | 3 +-- pkg/task/restore.go | 3 +-- pkg/task/restore_raw.go | 3 +-- 5 files changed, 5 insertions(+), 9 deletions(-) diff --git a/pkg/restore/db.go b/pkg/restore/db.go index 9ce3b10c7..be24a1ad9 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -47,7 +47,7 @@ func NewDB(g glue.Glue, store kv.Storage) (*DB, error) { func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { var err error if ddlJob.BinlogInfo.TableInfo != nil { - switchDbSQL := fmt.Sprintf("use %s;", ddlJob.SchemaName) + switchDbSQL := fmt.Sprintf("use %s;", utils.EncloseName(ddlJob.SchemaName)) err = db.se.Execute(ctx, switchDbSQL) if err != nil { log.Error("switch db failed", diff --git a/pkg/task/backup.go b/pkg/task/backup.go index 8d9613047..5944a22a0 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -91,6 +91,7 @@ func (cfg *BackupConfig) ParseFromFlags(flags *pflag.FlagSet) error { // RunBackup starts a backup task inside the current goroutine. func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig) error { + defer summary.Summary(cmdName) ctx, cancel := context.WithCancel(c) defer cancel() @@ -121,8 +122,6 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig return err } - defer summary.Summary(cmdName) - ranges, backupSchemas, err := backup.BuildBackupRangeAndSchema( mgr.GetDomain(), mgr.GetTiKV(), tableFilter, backupTS) if err != nil { diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go index 55299bbb1..d9deaccba 100644 --- a/pkg/task/backup_raw.go +++ b/pkg/task/backup_raw.go @@ -83,6 +83,7 @@ func (cfg *RawKvConfig) ParseFromFlags(flags *pflag.FlagSet) error { // RunBackupRaw starts a backup task inside the current goroutine. func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *RawKvConfig) error { + defer summary.Summary(cmdName) ctx, cancel := context.WithCancel(c) defer cancel() @@ -104,8 +105,6 @@ func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *RawKvConf return err } - defer summary.Summary(cmdName) - backupRange := rtree.Range{StartKey: cfg.StartKey, EndKey: cfg.EndKey} // The number of regions need to backup diff --git a/pkg/task/restore.go b/pkg/task/restore.go index a4598baa9..7d5dd6846 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -73,6 +73,7 @@ func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { // RunRestore starts a restore task inside the current goroutine. func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConfig) error { + defer summary.Summary(cmdName) ctx, cancel := context.WithCancel(c) defer cancel() @@ -105,8 +106,6 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf return err } - defer summary.Summary(cmdName) - u, _, backupMeta, err := ReadBackupMeta(ctx, utils.MetaFile, &cfg.Config) if err != nil { return err diff --git a/pkg/task/restore_raw.go b/pkg/task/restore_raw.go index 3d60df1cd..308a44b4e 100644 --- a/pkg/task/restore_raw.go +++ b/pkg/task/restore_raw.go @@ -47,6 +47,7 @@ func (cfg *RestoreRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { // RunRestoreRaw starts a raw kv restore task inside the current goroutine. func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreRawConfig) error { + defer summary.Summary(cmdName) ctx, cancel := context.WithCancel(c) defer cancel() @@ -67,8 +68,6 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR client.EnableOnline() } - defer summary.Summary(cmdName) - u, _, backupMeta, err := ReadBackupMeta(ctx, utils.MetaFile, &cfg.Config) if err != nil { return err From 2669204c2894752166af5df067643e85a4ee3086 Mon Sep 17 00:00:00 2001 From: kennytm Date: Fri, 20 Mar 2020 14:21:29 +0800 Subject: [PATCH 32/46] *: abstract the progress channel (updateCh) into the glue package (#196) * *: abstract the progress channel (updateCh) into the glue package * restore: fix crash in truncateTS() when the bound is unlimited * task: fix comment Co-authored-by: Ian --- pkg/backup/client.go | 9 +++++---- pkg/backup/push.go | 5 +++-- pkg/backup/schema.go | 7 ++++--- pkg/backup/schema_test.go | 27 +++++++++++++++++++++++---- pkg/glue/glue.go | 12 ++++++++++++ pkg/gluetidb/glue.go | 5 +++++ pkg/gluetikv/glue.go | 22 ++++++++++++++++++++++ pkg/restore/client.go | 12 ++++++------ pkg/restore/util.go | 8 ++++++-- pkg/task/backup.go | 9 ++++----- pkg/task/backup_raw.go | 4 ++-- pkg/task/restore.go | 13 +++++++------ pkg/task/restore_raw.go | 4 ++-- 13 files changed, 101 insertions(+), 36 deletions(-) diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 9693b6b5f..219f58550 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -32,6 +32,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" @@ -309,7 +310,7 @@ func (bc *Client) BackupRanges( ctx context.Context, ranges []rtree.Range, req kvproto.BackupRequest, - updateCh chan<- struct{}, + updateCh glue.Progress, ) error { start := time.Now() defer func() { @@ -374,7 +375,7 @@ func (bc *Client) BackupRange( ctx context.Context, startKey, endKey []byte, req kvproto.BackupRequest, - updateCh chan<- struct{}, + updateCh glue.Progress, ) (err error) { start := time.Now() defer func() { @@ -486,7 +487,7 @@ func (bc *Client) fineGrainedBackup( rateLimit uint64, concurrency uint32, rangeTree rtree.RangeTree, - updateCh chan<- struct{}, + updateCh glue.Progress, ) error { bo := tikv.NewBackoffer(ctx, backupFineGrainedMaxBackoff) for { @@ -561,7 +562,7 @@ func (bc *Client) fineGrainedBackup( rangeTree.Put(resp.StartKey, resp.EndKey, resp.Files) // Update progress - updateCh <- struct{}{} + updateCh.Inc() } } diff --git a/pkg/backup/push.go b/pkg/backup/push.go index 4aaffa7e2..d329f7088 100644 --- a/pkg/backup/push.go +++ b/pkg/backup/push.go @@ -12,6 +12,7 @@ import ( "github.com/pingcap/log" "go.uber.org/zap" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/rtree" ) @@ -38,7 +39,7 @@ func newPushDown(ctx context.Context, mgr ClientMgr, cap int) *pushDown { func (push *pushDown) pushBackup( req backup.BackupRequest, stores []*metapb.Store, - updateCh chan<- struct{}, + updateCh glue.Progress, ) (rtree.RangeTree, error) { // Push down backup tasks to all tikv instances. res := rtree.NewRangeTree() @@ -90,7 +91,7 @@ func (push *pushDown) pushBackup( resp.GetStartKey(), resp.GetEndKey(), resp.GetFiles()) // Update progress - updateCh <- struct{}{} + updateCh.Inc() } else { errPb := resp.GetError() switch v := errPb.Detail.(type) { diff --git a/pkg/backup/schema.go b/pkg/backup/schema.go index 18583d094..73a62477d 100644 --- a/pkg/backup/schema.go +++ b/pkg/backup/schema.go @@ -18,6 +18,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/checksum" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/utils" ) @@ -67,7 +68,7 @@ func (pending *Schemas) Start( store kv.Storage, backupTS uint64, concurrency uint, - updateCh chan<- struct{}, + updateCh glue.Progress, ) { workerPool := utils.NewWorkerPool(concurrency, "Schemas") go func() { @@ -82,7 +83,7 @@ func (pending *Schemas) Start( if pending.skipChecksum { pending.backupSchemaCh <- schema - updateCh <- struct{}{} + updateCh.Inc() return } @@ -110,7 +111,7 @@ func (pending *Schemas) Start( zap.Duration("take", time.Since(start))) pending.backupSchemaCh <- schema - updateCh <- struct{}{} + updateCh.Inc() }) } pending.wg.Wait() diff --git a/pkg/backup/schema_test.go b/pkg/backup/schema_test.go index 3b3bef897..d3c82f172 100644 --- a/pkg/backup/schema_test.go +++ b/pkg/backup/schema_test.go @@ -5,6 +5,7 @@ package backup import ( "context" "math" + "sync/atomic" . "github.com/pingcap/check" "github.com/pingcap/tidb-tools/pkg/filter" @@ -30,6 +31,24 @@ func (s *testBackupSchemaSuite) TearDownSuite(c *C) { testleak.AfterTest(c)() } +type simpleProgress struct { + counter int64 +} + +func (sp *simpleProgress) Inc() { + atomic.AddInt64(&sp.counter, 1) +} + +func (sp *simpleProgress) Close() {} + +func (sp *simpleProgress) reset() { + atomic.StoreInt64(&sp.counter, 0) +} + +func (sp *simpleProgress) get() int64 { + return atomic.LoadInt64(&sp.counter) +} + func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { c.Assert(s.mock.Start(), IsNil) defer s.mock.Stop() @@ -73,10 +92,10 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) c.Assert(err, IsNil) c.Assert(backupSchemas.Len(), Equals, 1) - updateCh := make(chan struct{}, 2) + updateCh := new(simpleProgress) backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 1, updateCh) schemas, err := backupSchemas.finishTableChecksum() - <-updateCh + c.Assert(updateCh.get(), Equals, int64(1)) c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 1) // Cluster returns a dummy checksum (all fields are 1). @@ -93,10 +112,10 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) c.Assert(err, IsNil) c.Assert(backupSchemas.Len(), Equals, 2) + updateCh.reset() backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 2, updateCh) schemas, err = backupSchemas.finishTableChecksum() - <-updateCh - <-updateCh + c.Assert(updateCh.get(), Equals, int64(2)) c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 2) // Cluster returns a dummy checksum (all fields are 1). diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go index f2f3ff55e..88a05c5c3 100644 --- a/pkg/glue/glue.go +++ b/pkg/glue/glue.go @@ -21,6 +21,8 @@ type Glue interface { // OwnsStorage returns whether the storage returned by Open() is owned // If this method returns false, the connection manager will never close the storage. OwnsStorage() bool + + StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) Progress } // Session is an abstraction of the session.Session interface. @@ -30,3 +32,13 @@ type Session interface { ShowCreateTable(table *model.TableInfo, allocator autoid.Allocator) (string, error) Close() } + +// Progress is an interface recording the current execution progress. +type Progress interface { + // Inc increases the progress. This method must be goroutine-safe, and can + // be called from any goroutine. + Inc() + // Close marks the progress as 100% complete and that Inc() can no longer be + // called. + Close() +} diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go index 80756d2c2..5f4aff6fa 100644 --- a/pkg/gluetidb/glue.go +++ b/pkg/gluetidb/glue.go @@ -51,6 +51,11 @@ func (Glue) OwnsStorage() bool { return true } +// StartProgress implements glue.Glue +func (g Glue) StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) glue.Progress { + return g.tikvGlue.StartProgress(ctx, cmdName, total, redirectLog) +} + // Execute implements glue.Session func (gs *tidbSession) Execute(ctx context.Context, sql string) error { _, err := gs.se.Execute(ctx, sql) diff --git a/pkg/gluetikv/glue.go b/pkg/gluetikv/glue.go index e63b35b95..ab3c0d57f 100644 --- a/pkg/gluetikv/glue.go +++ b/pkg/gluetikv/glue.go @@ -3,6 +3,8 @@ package gluetikv import ( + "context" + pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" @@ -10,6 +12,7 @@ import ( "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/utils" ) // Glue is an implementation of glue.Glue that accesses only TiKV without TiDB. @@ -41,3 +44,22 @@ func (Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { func (Glue) OwnsStorage() bool { return true } + +// StartProgress implements glue.Glue +func (Glue) StartProgress(ctx context.Context, cmdName string, total int64, redirectLog bool) glue.Progress { + return progress{ch: utils.StartProgress(ctx, cmdName, total, redirectLog)} +} + +type progress struct { + ch chan<- struct{} +} + +// Inc implements glue.Progress +func (p progress) Inc() { + p.ch <- struct{}{} +} + +// Close implements glue.Progress +func (p progress) Close() { + close(p.ch) +} diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 2453f2974..6b88f5a6d 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -445,7 +445,7 @@ func (rc *Client) setSpeedLimit() error { func (rc *Client) RestoreFiles( files []*backup.File, rewriteRules *RewriteRules, - updateCh chan<- struct{}, + updateCh glue.Progress, ) (err error) { start := time.Now() defer func() { @@ -478,7 +478,7 @@ func (rc *Client) RestoreFiles( case <-rc.ctx.Done(): errCh <- nil case errCh <- rc.fileImporter.Import(fileReplica, rewriteRules): - updateCh <- struct{}{} + updateCh.Inc() } }) } @@ -499,7 +499,7 @@ func (rc *Client) RestoreFiles( } // RestoreRaw tries to restore raw keys in the specified range. -func (rc *Client) RestoreRaw(startKey []byte, endKey []byte, files []*backup.File, updateCh chan<- struct{}) error { +func (rc *Client) RestoreRaw(startKey []byte, endKey []byte, files []*backup.File, updateCh glue.Progress) error { start := time.Now() defer func() { elapsed := time.Since(start) @@ -529,7 +529,7 @@ func (rc *Client) RestoreRaw(startKey []byte, endKey []byte, files []*backup.Fil case <-rc.ctx.Done(): errCh <- nil case errCh <- rc.fileImporter.Import(fileReplica, emptyRules): - updateCh <- struct{}{} + updateCh.Inc() } }) } @@ -617,7 +617,7 @@ func (rc *Client) ValidateChecksum( kvClient kv.Client, tables []*utils.Table, newTables []*model.TableInfo, - updateCh chan<- struct{}, + updateCh glue.Progress, ) error { start := time.Now() defer func() { @@ -674,7 +674,7 @@ func (rc *Client) ValidateChecksum( return } - updateCh <- struct{}{} + updateCh.Inc() }) } wg.Wait() diff --git a/pkg/restore/util.go b/pkg/restore/util.go index c49c07994..698de6aec 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -22,6 +22,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/summary" ) @@ -309,6 +310,9 @@ func matchNewPrefix(key []byte, rewriteRules *RewriteRules) *import_sstpb.Rewrit } func truncateTS(key []byte) []byte { + if len(key) == 0 { + return nil + } return key[:len(key)-8] } @@ -320,7 +324,7 @@ func SplitRanges( client *Client, ranges []rtree.Range, rewriteRules *RewriteRules, - updateCh chan<- struct{}, + updateCh glue.Progress, ) error { start := time.Now() defer func() { @@ -339,7 +343,7 @@ func SplitRanges( return splitter.Split(ctx, ranges, rewriteRules, storeMap, func(keys [][]byte) { for range keys { - updateCh <- struct{}{} + updateCh.Inc() } }) } diff --git a/pkg/task/backup.go b/pkg/task/backup.go index 5944a22a0..bf90c6739 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -24,7 +24,6 @@ import ( "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" - "github.com/pingcap/br/pkg/utils" ) const ( @@ -160,7 +159,7 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig // Backup // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( + updateCh := g.StartProgress( ctx, cmdName, int64(approximateRegions), !cfg.LogProgress) req := kvproto.BackupRequest{ @@ -175,14 +174,14 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig return err } // Backup has finished - close(updateCh) + updateCh.Close() // Checksum backupSchemasConcurrency := backup.DefaultSchemaConcurrency if backupSchemas.Len() < backupSchemasConcurrency { backupSchemasConcurrency = backupSchemas.Len() } - updateCh = utils.StartProgress( + updateCh = g.StartProgress( ctx, "Checksum", int64(backupSchemas.Len()), !cfg.LogProgress) backupSchemas.SetSkipChecksum(!cfg.Checksum) backupSchemas.Start( @@ -209,7 +208,7 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig log.Info("Skip fast checksum in incremental backup") } // Checksum has finished - close(updateCh) + updateCh.Close() err = client.SaveBackupMeta(ctx, ddlJobs) if err != nil { diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go index d9deaccba..e879523b5 100644 --- a/pkg/task/backup_raw.go +++ b/pkg/task/backup_raw.go @@ -117,7 +117,7 @@ func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *RawKvConf // Backup // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( + updateCh := g.StartProgress( ctx, cmdName, int64(approximateRegions), !cfg.LogProgress) req := kvproto.BackupRequest{ @@ -134,7 +134,7 @@ func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *RawKvConf return err } // Backup has finished - close(updateCh) + updateCh.Close() // Checksum err = client.SaveBackupMeta(ctx, nil) diff --git a/pkg/task/restore.go b/pkg/task/restore.go index 7d5dd6846..c75d53f89 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -171,7 +171,7 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf ranges = restore.AttachFilesToRanges(files, ranges) // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( + updateCh := g.StartProgress( ctx, cmdName, // Split/Scatter + Download/Ingest @@ -239,17 +239,17 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf } // Restore has finished. - close(updateCh) + updateCh.Close() // Checksum - updateCh = utils.StartProgress( + updateCh = g.StartProgress( ctx, "Checksum", int64(len(newTables)), !cfg.LogProgress) err = client.ValidateChecksum( ctx, mgr.GetTiKV().GetClient(), tables, newTables, updateCh) if err != nil { return err } - close(updateCh) + updateCh.Close() return nil } @@ -399,7 +399,7 @@ func RunRestoreTiflashReplica(c context.Context, g glue.Glue, cmdName string, cf for _, db := range dbs { tables = append(tables, db.Tables...) } - updateCh := utils.StartProgress( + updateCh := g.StartProgress( ctx, "RecoverTiflashReplica", int64(len(tables)), !cfg.LogProgress) for _, t := range tables { log.Info("get table", zap.Stringer("name", t.Info.Name), @@ -409,9 +409,10 @@ func RunRestoreTiflashReplica(c context.Context, g glue.Glue, cmdName string, cf if err != nil { return err } - updateCh <- struct{}{} + updateCh.Inc() } } + updateCh.Close() summary.CollectInt("recover tables", len(tables)) return nil diff --git a/pkg/task/restore_raw.go b/pkg/task/restore_raw.go index 308a44b4e..ccd6bd17a 100644 --- a/pkg/task/restore_raw.go +++ b/pkg/task/restore_raw.go @@ -97,7 +97,7 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR // Redirect to log if there is no log file to avoid unreadable output. // TODO: How to show progress? - updateCh := utils.StartProgress( + updateCh := g.StartProgress( ctx, "Raw Restore", // Split/Scatter + Download/Ingest @@ -124,7 +124,7 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR return errors.Trace(err) } // Restore has finished. - close(updateCh) + updateCh.Close() return nil } From f9f6e1915285891f91c54c9bd22d518384f41cf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BA=84=E5=A4=A9=E7=BF=BC?= Date: Fri, 20 Mar 2020 18:35:28 +0800 Subject: [PATCH 33/46] *: refline logs (#189) --- cmd/cmd.go | 15 +++++++++++++-- pkg/summary/collector.go | 19 ++++++++++++++++++- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index 5b2801894..3e6abff67 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -6,8 +6,11 @@ import ( "context" "net/http" "net/http/pprof" + "os" + "path/filepath" "sync" "sync/atomic" + "time" "github.com/pingcap/log" "github.com/pingcap/tidb/util/logutil" @@ -16,6 +19,7 @@ import ( "go.uber.org/zap" "github.com/pingcap/br/pkg/gluetidb" + "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) @@ -41,6 +45,10 @@ const ( flagVersionShort = "V" ) +func timestampLogFileName() string { + return filepath.Join(os.TempDir(), "br-"+time.Now().Format(time.RFC3339)) +} + // AddFlags adds flags to the given cmd. func AddFlags(cmd *cobra.Command) { cmd.Version = utils.BRInfo() @@ -49,8 +57,8 @@ func AddFlags(cmd *cobra.Command) { cmd.PersistentFlags().StringP(FlagLogLevel, "L", "info", "Set the log level") - cmd.PersistentFlags().String(FlagLogFile, "", - "Set the log file path. If not set, logs will output to stdout") + cmd.PersistentFlags().String(FlagLogFile, timestampLogFileName(), + "Set the log file path. If not set, logs will output to temp file") cmd.PersistentFlags().String(FlagStatusAddr, "", "Set the HTTP listening address for the status report service. Set to empty string to disable") task.DefineCommonFlags(cmd.PersistentFlags()) @@ -75,6 +83,9 @@ func Init(cmd *cobra.Command) (err error) { } if len(conf.File.Filename) != 0 { atomic.StoreUint64(&hasLogFile, 1) + summary.InitCollector(true) + } else { + cmd.Printf("log file: %s\n", conf.File.Filename) } lg, p, e := log.InitLogger(conf) if e != nil { diff --git a/pkg/summary/collector.go b/pkg/summary/collector.go index ee465d60b..e1d722a18 100644 --- a/pkg/summary/collector.go +++ b/pkg/summary/collector.go @@ -40,7 +40,24 @@ type LogCollector interface { type logFunc func(msg string, fields ...zap.Field) -var collector = newLogCollector(log.Info) +var collector LogCollector = newLogCollector(log.Info) + +// InitCollector initilize global collector instance. +func InitCollector(hasLogFile bool) { + logF := log.L().Info + if hasLogFile { + conf := new(log.Config) + // Always duplicate summary to stdout. + logger, _, err := log.InitLogger(conf) + if err == nil { + logF = func(msg string, fields ...zap.Field) { + logger.Info(msg, fields...) + log.Info(msg, fields...) + } + } + } + collector = newLogCollector(logF) +} type logCollector struct { mu sync.Mutex From e168a60288ade404a97b47147dbc64ec0b63b9e6 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Tue, 24 Mar 2020 20:49:42 +0800 Subject: [PATCH 34/46] tests: disable TLS test (#204) Signed-off-by: Neil Shen --- tests/run.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/run.sh b/tests/run.sh index 21d6b27ed..3bb5bd57b 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -29,6 +29,13 @@ if [ "${1-}" = '--debug' ]; then fi for script in tests/${TEST_NAME-*}/run.sh; do + TEST_NAME="$(basename "$(dirname "$script")")" + if [ $TEST_NAME = "br_tls" ]; then + echo "FIXME enable br_tls test" + echo "TiKV master (ed71f20f445e10595553d2bf3d1a1eb645b9a61a) aborts when TLS is enabled" + continue + fi + echo "*===== Running test $script... =====*" TEST_DIR="$TEST_DIR" \ PD_ADDR="$PD_ADDR" \ From 4bc66c396a510865bde913c629e554b02bb02379 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Wed, 25 Mar 2020 10:49:39 +0800 Subject: [PATCH 35/46] *: add S3 quick start and few enhancement of log (#202) * README, docker: add quick start about S3 storage Signed-off-by: Neil Shen * pkg/summary: make sure to output correct summary Signed-off-by: Neil Shen * cmd, tests: log to terminal if BR_LOG_TO_TERM is set Signed-off-by: Neil Shen * Update pkg/task/common.go Co-Authored-By: kennytm * address comments Signed-off-by: Neil Shen * address comments Signed-off-by: Neil Shen * tests: cat log if br fails Signed-off-by: Neil Shen Co-authored-by: kennytm --- README.md | 16 +++++++++++++++- cmd/cmd.go | 19 ++++++++++++------- docker-compose.yaml | 26 ++++++++++++++++++++++++-- docker/Dockerfile | 9 ++++++--- docker/minio.env | 6 ++++++ pkg/summary/collector.go | 15 +++++++++++++-- pkg/summary/collector_test.go | 1 + pkg/summary/summary.go | 5 +++++ pkg/task/backup.go | 3 +++ pkg/task/backup_raw.go | 3 +++ pkg/task/common.go | 2 +- pkg/task/restore.go | 4 ++++ pkg/task/restore_raw.go | 2 ++ tests/br_full_ddl/run.sh | 5 ++++- tests/br_full_index/run.sh | 5 ++++- tests/run.sh | 1 + 16 files changed, 104 insertions(+), 18 deletions(-) create mode 100644 docker/minio.env diff --git a/README.md b/README.md index 6207d98eb..408b09749 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ go-ycsb load mysql -p workload=core \ mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" # Build BR and backup! -make release && \ +make build && \ bin/br backup full --pd pd0:2379 --storage "local:///data/backup/full" \ --log-file "/logs/br_backup.log" @@ -69,6 +69,20 @@ bin/br restore full --pd pd0:2379 --storage "local:///data/backup/full" \ # How many rows do we get again? Expected to be 100000 rows. mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" + +# Test S3 compatible storage (MinIO). +# Create a bucket to save backup by mc (a MinIO Client). +mc config host add minio $S3_ENDPOINT $MINIO_ACCESS_KEY $MINIO_SECRET_KEY && \ +mc mb minio/mybucket + +# Backup to S3 compatible storage. +bin/br backup full --pd pd0:2379 --storage "s3://mybucket/full" \ + --s3.endpoint="$S3_ENDPOINT" + +# Drop database and restore! +mysql -uroot -htidb -P4000 -E -e "DROP DATABASE test; SHOW DATABASES;" && \ +bin/br restore full --pd pd0:2379 --storage "s3://mybucket/full" \ + --s3.endpoint="$S3_ENDPOINT" ``` ## Contributing diff --git a/cmd/cmd.go b/cmd/cmd.go index 3e6abff67..87a8aadc9 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -25,10 +25,11 @@ import ( ) var ( - initOnce = sync.Once{} - defaultContext context.Context - hasLogFile uint64 - tidbGlue = gluetidb.Glue{} + initOnce = sync.Once{} + defaultContext context.Context + hasLogFile uint64 + tidbGlue = gluetidb.Glue{} + envLogToTermKey = "BR_LOG_TO_TERM" ) const ( @@ -46,7 +47,7 @@ const ( ) func timestampLogFileName() string { - return filepath.Join(os.TempDir(), "br-"+time.Now().Format(time.RFC3339)) + return filepath.Join(os.TempDir(), "br.log."+time.Now().Format(time.RFC3339)) } // AddFlags adds flags to the given cmd. @@ -81,11 +82,15 @@ func Init(cmd *cobra.Command) (err error) { if err != nil { return } + _, outputLogToTerm := os.LookupEnv(envLogToTermKey) + if outputLogToTerm { + // Log to term if env `BR_LOG_TO_TERM` is set. + conf.File.Filename = "" + } if len(conf.File.Filename) != 0 { atomic.StoreUint64(&hasLogFile, 1) summary.InitCollector(true) - } else { - cmd.Printf("log file: %s\n", conf.File.Filename) + cmd.Printf("Detial BR log in %s\n", conf.File.Filename) } lg, p, e := log.InitLogger(conf) if e != nil { diff --git a/docker-compose.yaml b/docker-compose.yaml index 4d84c67fa..ab6360d6d 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,6 +1,6 @@ --- # Source: tidb-docker-compose/templates/docker-compose.yml -version: '2.1' +version: '3.2' services: control: @@ -10,11 +10,13 @@ services: dockerfile: ./docker/Dockerfile volumes: - ./docker/data:/data - - ./docker/logs:/logs + - ./docker/logs:/tmp command: -c "/usr/bin/tail -f /dev/null" depends_on: - "tidb" restart: on-failure + env_file: + - ./docker/minio.env pd0: image: pingcap/pd:latest @@ -64,6 +66,8 @@ services: # soft: 1000000 # hard: 1000000 restart: on-failure + env_file: + - ./docker/minio.env tikv1: image: pingcap/tikv:latest @@ -87,6 +91,8 @@ services: # soft: 1000000 # hard: 1000000 restart: on-failure + env_file: + - ./docker/minio.env tikv2: image: pingcap/tikv:latest @@ -110,6 +116,8 @@ services: # soft: 1000000 # hard: 1000000 restart: on-failure + env_file: + - ./docker/minio.env tikv3: image: pingcap/tikv:latest @@ -133,6 +141,8 @@ services: # soft: 1000000 # hard: 1000000 restart: on-failure + env_file: + - ./docker/minio.env tikv4: image: pingcap/tikv:latest @@ -156,6 +166,8 @@ services: # soft: 1000000 # hard: 1000000 restart: on-failure + env_file: + - ./docker/minio.env tidb: image: pingcap/tidb:latest @@ -185,6 +197,16 @@ services: # hard: 1000000 restart: on-failure + minio: + image: minio/minio + ports: + - 24927:24927 + volumes: + - ./docker/data/s3:/data/s3 + command: server --address=:24927 /data/s3 + env_file: + - ./docker/minio.env + tidb-vision: image: pingcap/tidb-vision:latest environment: diff --git a/docker/Dockerfile b/docker/Dockerfile index c93d22ab4..14c577fcf 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,11 +1,13 @@ -FROM golang:1.13.8-buster as builder - # For loading data to TiDB +FROM golang:1.13.8-buster as go-ycsb-builder WORKDIR /go/src/github.com/pingcap/ RUN git clone https://github.com/pingcap/go-ycsb.git && \ cd go-ycsb && \ make +# For operating minio S3 compatible storage +FROM minio/mc as mc-builder + FROM golang:1.13.8-buster RUN apt-get update && apt-get install -y --no-install-recommends \ @@ -19,6 +21,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ WORKDIR /go/src/github.com/pingcap/br COPY . . -COPY --from=builder /go/src/github.com/pingcap/go-ycsb/bin/go-ycsb /go/bin/go-ycsb +COPY --from=go-ycsb-builder /go/src/github.com/pingcap/go-ycsb/bin/go-ycsb /go/bin/go-ycsb +COPY --from=mc-builder /usr/bin/mc /usr/bin/mc ENTRYPOINT ["/bin/bash"] diff --git a/docker/minio.env b/docker/minio.env new file mode 100644 index 000000000..d865b2474 --- /dev/null +++ b/docker/minio.env @@ -0,0 +1,6 @@ +MINIO_ACCESS_KEY=brs3accesskey +MINIO_SECRET_KEY=brs3secretkey +MINIO_BROWSER=off +AWS_ACCESS_KEY_ID=brs3accesskey +AWS_SECRET_ACCESS_KEY=brs3secretkey +S3_ENDPOINT=http://minio:24927 diff --git a/pkg/summary/collector.go b/pkg/summary/collector.go index e1d722a18..76dd8a121 100644 --- a/pkg/summary/collector.go +++ b/pkg/summary/collector.go @@ -35,6 +35,8 @@ type LogCollector interface { CollectInt(name string, t int) + SetSuccessStatus(success bool) + Summary(name string) } @@ -43,7 +45,9 @@ type logFunc func(msg string, fields ...zap.Field) var collector LogCollector = newLogCollector(log.Info) // InitCollector initilize global collector instance. -func InitCollector(hasLogFile bool) { +func InitCollector( // revive:disable-line:flag-parameter + hasLogFile bool, +) { logF := log.L().Info if hasLogFile { conf := new(log.Config) @@ -69,6 +73,7 @@ type logCollector struct { failureReasons map[string]error durations map[string]time.Duration ints map[string]int + successStatus bool log logFunc } @@ -134,6 +139,12 @@ func (tc *logCollector) CollectInt(name string, t int) { tc.ints[name] += t } +func (tc *logCollector) SetSuccessStatus(success bool) { + tc.mu.Lock() + defer tc.mu.Unlock() + tc.successStatus = success +} + func (tc *logCollector) Summary(name string) { tc.mu.Lock() defer func() { @@ -162,7 +173,7 @@ func (tc *logCollector) Summary(name string) { logFields = append(logFields, zap.Int(key, val)) } - if len(tc.failureReasons) != 0 { + if len(tc.failureReasons) != 0 || !tc.successStatus { for unitName, reason := range tc.failureReasons { logFields = append(logFields, zap.String("unitName", unitName), zap.Error(reason)) } diff --git a/pkg/summary/collector_test.go b/pkg/summary/collector_test.go index 7dff32dd1..165232f55 100644 --- a/pkg/summary/collector_test.go +++ b/pkg/summary/collector_test.go @@ -30,6 +30,7 @@ func (suit *testCollectorSuite) TestSumDurationInt(c *C) { col.CollectDuration("b", time.Second) col.CollectInt("c", 2) col.CollectInt("c", 2) + col.SetSuccessStatus(true) col.Summary("foo") c.Assert(len(fields), Equals, 3) diff --git a/pkg/summary/summary.go b/pkg/summary/summary.go index 3ffdedf8a..852e936a9 100644 --- a/pkg/summary/summary.go +++ b/pkg/summary/summary.go @@ -29,6 +29,11 @@ func CollectInt(name string, t int) { collector.CollectInt(name, t) } +// SetSuccessStatus sets final success status +func SetSuccessStatus(success bool) { + collector.SetSuccessStatus(success) +} + // Summary outputs summary log func Summary(name string) { collector.Summary(name) diff --git a/pkg/task/backup.go b/pkg/task/backup.go index bf90c6739..fe3021a33 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -214,6 +214,9 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig if err != nil { return err } + + // Set task summary to success status. + summary.SetSuccessStatus(true) return nil } diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go index e879523b5..fefcc2cf1 100644 --- a/pkg/task/backup_raw.go +++ b/pkg/task/backup_raw.go @@ -141,5 +141,8 @@ func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *RawKvConf if err != nil { return err } + + // Set task summary to success status. + summary.SetSuccessStatus(true) return nil } diff --git a/pkg/task/common.go b/pkg/task/common.go index 61186abe1..e3b48748e 100644 --- a/pkg/task/common.go +++ b/pkg/task/common.go @@ -95,7 +95,7 @@ type Config struct { // DefineCommonFlags defines the flags common to all BRIE commands. func DefineCommonFlags(flags *pflag.FlagSet) { flags.BoolP(flagSendCreds, "c", true, "Whether send credentials to tikv") - flags.StringP(flagStorage, "s", "", `specify the url where backup storage, eg, "s3:///path/to/save"`) + flags.StringP(flagStorage, "s", "", `specify the url where backup storage, eg, "s3://bucket/path/prefix"`) flags.StringSliceP(flagPD, "u", []string{"127.0.0.1:2379"}, "PD address") flags.String(flagCA, "", "CA certificate path for TLS connection") flags.String(flagCert, "", "Certificate path for TLS connection") diff --git a/pkg/task/restore.go b/pkg/task/restore.go index c75d53f89..5fffd7ce4 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -251,6 +251,8 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf } updateCh.Close() + // Set task summary to success status. + summary.SetSuccessStatus(true) return nil } @@ -415,5 +417,7 @@ func RunRestoreTiflashReplica(c context.Context, g glue.Glue, cmdName string, cf updateCh.Close() summary.CollectInt("recover tables", len(tables)) + // Set task summary to success status. + summary.SetSuccessStatus(true) return nil } diff --git a/pkg/task/restore_raw.go b/pkg/task/restore_raw.go index ccd6bd17a..ca7504f6f 100644 --- a/pkg/task/restore_raw.go +++ b/pkg/task/restore_raw.go @@ -126,5 +126,7 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR // Restore has finished. updateCh.Close() + // Set task summary to success status. + summary.SetSuccessStatus(true) return nil } diff --git a/tests/br_full_ddl/run.sh b/tests/br_full_ddl/run.sh index e50ef1ecf..93c5b28fb 100755 --- a/tests/br_full_ddl/run.sh +++ b/tests/br_full_ddl/run.sh @@ -36,7 +36,10 @@ done # backup full echo "backup start..." -run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG +# Do not log to terminal +unset BR_LOG_TO_TERM +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG || cat $LOG +BR_LOG_TO_TERM=1 checksum_count=$(cat $LOG | grep "fast checksum success" | wc -l | xargs) diff --git a/tests/br_full_index/run.sh b/tests/br_full_index/run.sh index 5069035e6..bb2486802 100755 --- a/tests/br_full_index/run.sh +++ b/tests/br_full_index/run.sh @@ -36,7 +36,10 @@ done # backup full echo "backup start..." -run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG +# Do not log to terminal +unset BR_LOG_TO_TERM +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 --log-file $LOG || cat $LOG +BR_LOG_TO_TERM=1 checksum_count=$(cat $LOG | grep "fast checksum success" | wc -l | xargs) diff --git a/tests/run.sh b/tests/run.sh index 3bb5bd57b..583d395c8 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -46,5 +46,6 @@ for script in tests/${TEST_NAME-*}/run.sh; do TIKV_ADDR="$TIKV_ADDR" \ PATH="tests/_utils:bin:$PATH" \ TEST_NAME="$(basename "$(dirname "$script")")" \ + BR_LOG_TO_TERM=1 \ bash "$script" done From 2f083c8e27767307c4c263752437533b69826918 Mon Sep 17 00:00:00 2001 From: 5kbpers <20279863+5kbpers@users.noreply.github.com> Date: Wed, 25 Mar 2020 13:06:15 +0800 Subject: [PATCH 36/46] restore: add error field to `DownloadResponse` (#195) * restore: add error field to `DownloadResponse` Signed-off-by: 5kbpers --- go.sum | 1 - pkg/restore/backoff.go | 13 +++---------- pkg/restore/backoff_test.go | 6 +++--- pkg/restore/import.go | 32 ++++++++++---------------------- tests/br_move_backup/run.sh | 9 +++++++++ 5 files changed, 25 insertions(+), 36 deletions(-) diff --git a/go.sum b/go.sum index 31fd50bcc..9c1c97052 100644 --- a/go.sum +++ b/go.sum @@ -363,7 +363,6 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17Xtb github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20200214064158-62d31900d88e/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20200221034943-a2aa1d1e20a8/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5 h1:knEvP4R5v5b2T107/Q6VzB0C8/6T7NXB/V7Vl1FtQsg= github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75 h1:DB3NTM0ilba/6sW+vccdEnP10bVvrVunDwWvRa0hSKc= github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= diff --git a/pkg/restore/backoff.go b/pkg/restore/backoff.go index 21048dd13..a84014c11 100644 --- a/pkg/restore/backoff.go +++ b/pkg/restore/backoff.go @@ -15,18 +15,11 @@ import ( var ( errEpochNotMatch = errors.NewNoStackError("epoch not match") errKeyNotInRegion = errors.NewNoStackError("key not in region") - errRegionNotFound = errors.NewNoStackError("region not found") - errResp = errors.NewNoStackError("response error") errRewriteRuleNotFound = errors.NewNoStackError("rewrite rule not found") errRangeIsEmpty = errors.NewNoStackError("range is empty") errGrpc = errors.NewNoStackError("gRPC error") - - // TODO: add `error` field to `DownloadResponse` for distinguish the errors of gRPC - // and the errors of request - errBadFormat = errors.NewNoStackError("bad format") - errWrongKeyPrefix = errors.NewNoStackError("wrong key prefix") - errFileCorrupted = errors.NewNoStackError("file corrupted") - errCannotRead = errors.NewNoStackError("cannot read externel storage") + errDownloadFailed = errors.NewNoStackError("download sst failed") + errIngestFailed = errors.NewNoStackError("ingest sst failed") ) const ( @@ -67,7 +60,7 @@ func newDownloadSSTBackoffer() utils.Backoffer { func (bo *importerBackoffer) NextBackoff(err error) time.Duration { switch errors.Cause(err) { - case errResp, errGrpc, errEpochNotMatch: + case errGrpc, errEpochNotMatch, errIngestFailed: bo.delayTime = 2 * bo.delayTime bo.attempt-- case errRangeIsEmpty, errRewriteRuleNotFound: diff --git a/pkg/restore/backoff_test.go b/pkg/restore/backoff_test.go index 11accedd2..a07c0839b 100644 --- a/pkg/restore/backoff_test.go +++ b/pkg/restore/backoff_test.go @@ -37,7 +37,7 @@ func (s *testBackofferSuite) TestImporterBackoffer(c *C) { case 0: return errGrpc case 1: - return errResp + return errEpochNotMatch case 2: return errRangeIsEmpty } @@ -54,8 +54,8 @@ func (s *testBackofferSuite) TestImporterBackoffer(c *C) { } err = utils.WithRetry(context.Background(), func() error { defer func() { counter++ }() - return errResp + return errEpochNotMatch }, &backoffer) c.Assert(counter, Equals, 10) - c.Assert(err, Equals, errResp) + c.Assert(err, Equals, errEpochNotMatch) } diff --git a/pkg/restore/import.go b/pkg/restore/import.go index fec07a870..405af050c 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -6,7 +6,6 @@ import ( "bytes" "context" "crypto/tls" - "strings" "sync" "time" @@ -272,14 +271,12 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul // 2. retry ingest errIngest = errors.AddStack(errEpochNotMatch) break ingestRetry - case errPb.RegionNotFound != nil: - errIngest = errors.AddStack(errRegionNotFound) - break ingestRetry case errPb.KeyNotInRegion != nil: errIngest = errors.AddStack(errKeyNotInRegion) break ingestRetry default: - errIngest = errors.Errorf("ingest error %s", errPb) + // Other errors like `ServerIsBusy`, `RegionNotFound`, etc. should be retryable + errIngest = errors.Annotatef(errIngestFailed, "ingest error %s", errPb) break ingestRetry } } @@ -346,7 +343,10 @@ func (importer *FileImporter) downloadSST( for _, peer := range regionInfo.Region.GetPeers() { resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) if err != nil { - return nil, extractDownloadSSTError(err) + return nil, errors.Annotatef(errGrpc, "%s", err) + } + if resp.GetError() != nil { + return nil, errors.Annotate(errDownloadFailed, resp.GetError().GetMessage()) } if resp.GetIsEmpty() { return nil, errors.Trace(errRangeIsEmpty) @@ -395,7 +395,10 @@ func (importer *FileImporter) downloadRawKVSST( for _, peer := range regionInfo.Region.GetPeers() { resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) if err != nil { - return nil, extractDownloadSSTError(err) + return nil, errors.Annotatef(errGrpc, "%s", err) + } + if resp.GetError() != nil { + return nil, errors.Annotate(errDownloadFailed, resp.GetError().GetMessage()) } if resp.GetIsEmpty() { return nil, errors.Trace(errRangeIsEmpty) @@ -439,18 +442,3 @@ func checkRegionEpoch(new, old *RegionInfo) bool { } return false } - -func extractDownloadSSTError(e error) error { - err := errGrpc - switch { - case strings.Contains(e.Error(), "bad format"): - err = errBadFormat - case strings.Contains(e.Error(), "wrong prefix"): - err = errWrongKeyPrefix - case strings.Contains(e.Error(), "corrupted"): - err = errFileCorrupted - case strings.Contains(e.Error(), "Cannot read"): - err = errCannotRead - } - return errors.Annotatef(err, "%s", e) -} diff --git a/tests/br_move_backup/run.sh b/tests/br_move_backup/run.sh index 43f27a9af..b85d25823 100755 --- a/tests/br_move_backup/run.sh +++ b/tests/br_move_backup/run.sh @@ -32,6 +32,15 @@ run_sql "DROP TABLE $DB.$TABLE;" # change backup path mv $TEST_DIR/$DB $TEST_DIR/another$DB +# restore table with old path +echo "restore with old path start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB" --pd $PD_ADDR || restore_old_fail=1 + +if [ "$restore_old_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] test restore with old path failed!" + exit 1 +fi + # restore table echo "restore start..." run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/another$DB" --pd $PD_ADDR From 1f27b57f535c8cb2e9dfd8e4bb247635d8fa3d45 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Wed, 25 Mar 2020 15:15:29 +0800 Subject: [PATCH 37/46] restore: populate restore cancel error (#207) Signed-off-by: Neil Shen Co-authored-by: kennytm --- pkg/restore/client.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 6b88f5a6d..56bb83233 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -476,7 +476,7 @@ func (rc *Client) RestoreFiles( defer wg.Done() select { case <-rc.ctx.Done(): - errCh <- nil + errCh <- rc.ctx.Err() case errCh <- rc.fileImporter.Import(fileReplica, rewriteRules): updateCh.Inc() } @@ -527,7 +527,7 @@ func (rc *Client) RestoreRaw(startKey []byte, endKey []byte, files []*backup.Fil defer wg.Done() select { case <-rc.ctx.Done(): - errCh <- nil + errCh <- rc.ctx.Err() case errCh <- rc.fileImporter.Import(fileReplica, emptyRules): updateCh.Inc() } From ae7688aeafaf8059fbfa41b515ac5e3a2968359d Mon Sep 17 00:00:00 2001 From: 3pointer Date: Wed, 25 Mar 2020 18:34:57 +0800 Subject: [PATCH 38/46] enhance usability of br (#208) * silenceUsage only when parse cmd flags failed * udpate tidb Co-authored-by: kennytm --- cmd/backup.go | 4 +++- cmd/restore.go | 5 ++++- go.mod | 4 ++-- go.sum | 13 +++++++------ main.go | 2 +- 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/cmd/backup.go b/cmd/backup.go index 3aed2147f..d37229e0a 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -16,6 +16,7 @@ import ( func runBackupCommand(command *cobra.Command, cmdName string) error { cfg := task.BackupConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { + command.SilenceUsage = false return err } return task.RunBackup(GetDefaultContext(), tidbGlue, cmdName, &cfg) @@ -24,6 +25,7 @@ func runBackupCommand(command *cobra.Command, cmdName string) error { func runBackupRawCommand(command *cobra.Command, cmdName string) error { cfg := task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { + command.SilenceUsage = false return err } return task.RunBackupRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) @@ -34,7 +36,7 @@ func NewBackupCommand() *cobra.Command { command := &cobra.Command{ Use: "backup", Short: "backup a TiDB/TiKV cluster", - SilenceUsage: false, + SilenceUsage: true, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err diff --git a/cmd/restore.go b/cmd/restore.go index bc74bea84..1e894b4ee 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -15,6 +15,7 @@ import ( func runRestoreCommand(command *cobra.Command, cmdName string) error { cfg := task.RestoreConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { + command.SilenceUsage = false return err } return task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg) @@ -25,6 +26,7 @@ func runRestoreRawCommand(command *cobra.Command, cmdName string) error { RawKvConfig: task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}}, } if err := cfg.ParseFromFlags(command.Flags()); err != nil { + command.SilenceUsage = false return err } return task.RunRestoreRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) @@ -33,6 +35,7 @@ func runRestoreRawCommand(command *cobra.Command, cmdName string) error { func runRestoreTiflashReplicaCommand(command *cobra.Command, cmdName string) error { cfg := task.RestoreConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { + command.SilenceUsage = false return err } @@ -44,7 +47,7 @@ func NewRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "restore", Short: "restore a TiDB/TiKV cluster", - SilenceUsage: false, + SilenceUsage: true, PersistentPreRunE: func(c *cobra.Command, args []string) error { if err := Init(c); err != nil { return err diff --git a/go.mod b/go.mod index 94f4022f9..2659bdd03 100644 --- a/go.mod +++ b/go.mod @@ -23,9 +23,9 @@ require ( github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75 github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd - github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84 + github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87 github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 - github.com/pingcap/tidb v1.1.0-beta.0.20200310133602-7c39e5e5e0bc + github.com/pingcap/tidb v1.1.0-beta.0.20200325094938-30e1edae0897 github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 github.com/prometheus/client_golang v1.0.0 diff --git a/go.sum b/go.sum index 9c1c97052..67b7e16fd 100644 --- a/go.sum +++ b/go.sum @@ -370,8 +370,8 @@ github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84 h1:u5FOwUw9muF8mBTZVV1dQhoAKiEo2Ci54CxN9XchEEY= -github.com/pingcap/parser v0.0.0-20200305120128-bde9faa0df84/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87 h1:533jEUp3mtfWjk0el+awLbyGVxiHcUIGWcR1Y7gB+fg= +github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 h1:Yrp99FnjHAEuDrSBql2l0IqCtJX7KwJbTsD5hIArkvk= github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3/go.mod h1:25GfNw6+Jcr9kca5rtmTb4gKCJ4jOpow2zV2S9Dgafs= github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= @@ -379,8 +379,8 @@ github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1 h1:YUnUZ914SHFMsOS github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb h1:bDbgLaNTRNK6Qw7KjvEqqfCQstY8WMEcXyXTU7yzYKg= github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= -github.com/pingcap/tidb v1.1.0-beta.0.20200310133602-7c39e5e5e0bc h1:1aW3qTRJZjnosvXt1b75KL73b28XRJWBx6jtTtHsybg= -github.com/pingcap/tidb v1.1.0-beta.0.20200310133602-7c39e5e5e0bc/go.mod h1:WTmfs5zrUGMPw3Enn5FI3buzkU8BDuJ6BhsO/JC239U= +github.com/pingcap/tidb v1.1.0-beta.0.20200325094938-30e1edae0897 h1:wTNFJMM6GNmG09YoN3/3K8BqiK74zKkurjm4iY+m2mI= +github.com/pingcap/tidb v1.1.0-beta.0.20200325094938-30e1edae0897/go.mod h1:4CGOiKZSaOU/Da3QYMtp0c3uBE2SxpcLOpESXmeQhcs= github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible h1:84F7MFMfdAYObrznvRslmVu43aoihrlL+7mMyMlOi0o= github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= @@ -500,6 +500,7 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yookoala/realpath v1.0.0 h1:7OA9pj4FZd+oZDsyvXWQvjn5oBdcHRTV44PpdMSuImQ= github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= @@ -675,8 +676,8 @@ golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200301222351-066e0c02454c h1:FD7jysxM+EJqg5UYYy3XYDsAiUickFsn4UiaanJkf8c= golang.org/x/tools v0.0.0-20200301222351-066e0c02454c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb h1:iKlO7ROJc6SttHKlxzwGytRtBUqX4VARrNTgP2YLX5M= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200313205530-4303120df7d8 h1:gkI/wGGwpcG5W4hLCzZNGxA4wzWBGGDStRI1MrjDl2Q= +golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/main.go b/main.go index 7b7cbfc97..4b369785f 100644 --- a/main.go +++ b/main.go @@ -41,7 +41,7 @@ func main() { Use: "br", Short: "br is a TiDB/TiKV cluster backup restore tool.", TraverseChildren: true, - SilenceUsage: false, + SilenceUsage: true, } cmd.AddFlags(rootCmd) cmd.SetDefaultContext(ctx) From 8638d9a05ca4ba55aae71431eabe618b40454da4 Mon Sep 17 00:00:00 2001 From: kennytm Date: Thu, 26 Mar 2020 14:58:05 +0800 Subject: [PATCH 39/46] task: do not run checksum if restore failed (#209) --- pkg/task/restore.go | 12 ++++++++---- pkg/task/restore_raw.go | 12 +++++++----- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/pkg/task/restore.go b/pkg/task/restore.go index 5fffd7ce4..ff2374fa8 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -229,12 +229,16 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf // Always run the post-work even on error, so we don't stuck in the import // mode or emptied schedulers - err = restorePostWork(ctx, client, mgr, clusterCfg) - if err != nil { - return err + if errRestorePostWork := restorePostWork(ctx, client, mgr, clusterCfg); err == nil { + err = errRestorePostWork } - if err = splitPostWork(ctx, client, newTables); err != nil { + if errSplitPostWork := splitPostWork(ctx, client, newTables); err == nil { + err = errSplitPostWork + } + + // If any error happened, return now, don't execute checksum. + if err != nil { return err } diff --git a/pkg/task/restore_raw.go b/pkg/task/restore_raw.go index ca7504f6f..03e987456 100644 --- a/pkg/task/restore_raw.go +++ b/pkg/task/restore_raw.go @@ -46,7 +46,7 @@ func (cfg *RestoreRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { } // RunRestoreRaw starts a raw kv restore task inside the current goroutine. -func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreRawConfig) error { +func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreRawConfig) (err error) { defer summary.Summary(cmdName) ctx, cancel := context.WithCancel(c) defer cancel() @@ -113,16 +113,18 @@ func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreR if err != nil { return errors.Trace(err) } + defer func() { + errPostWork := restorePostWork(ctx, client, mgr, removedSchedulers) + if err == nil { + err = errPostWork + } + }() err = client.RestoreRaw(cfg.StartKey, cfg.EndKey, files, updateCh) if err != nil { return errors.Trace(err) } - err = restorePostWork(ctx, client, mgr, removedSchedulers) - if err != nil { - return errors.Trace(err) - } // Restore has finished. updateCh.Close() From f112da7eedc651db4b4c3c54dd2deec0aa95e16c Mon Sep 17 00:00:00 2001 From: 3pointer Date: Thu, 26 Mar 2020 20:41:37 +0800 Subject: [PATCH 40/46] fix incremental bug in llroad test (#199) * restore: filter same table ddl * *: do not return error when backup/restore data is empty * fix create database double during incremental restore * add tests * fix ci * address comment --- pkg/backup/client.go | 3 +- pkg/backup/schema_test.go | 8 +-- pkg/restore/db.go | 15 +++-- pkg/task/backup.go | 4 ++ pkg/task/restore.go | 30 +++++---- tests/br_incremental_only_ddl/run.sh | 72 +++++++++++++++++++++ tests/br_incremental_same_table/run.sh | 86 ++++++++++++++++++++++++++ 7 files changed, 198 insertions(+), 20 deletions(-) create mode 100755 tests/br_incremental_only_ddl/run.sh create mode 100755 tests/br_incremental_same_table/run.sh diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 219f58550..6cd8abe45 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -255,7 +255,8 @@ func BuildBackupRangeAndSchema( } if backupSchemas.Len() == 0 { - return nil, nil, errors.New("nothing to backup") + log.Info("nothing to backup") + return nil, nil, nil } return ranges, backupSchemas, nil } diff --git a/pkg/backup/schema_test.go b/pkg/backup/schema_test.go index d3c82f172..98173dd55 100644 --- a/pkg/backup/schema_test.go +++ b/pkg/backup/schema_test.go @@ -62,7 +62,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { c.Assert(err, IsNil) _, backupSchemas, err := BuildBackupRangeAndSchema( s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) - c.Assert(err, NotNil) + c.Assert(err, IsNil) c.Assert(backupSchemas, IsNil) // Database is not exist. @@ -72,15 +72,15 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { c.Assert(err, IsNil) _, backupSchemas, err = BuildBackupRangeAndSchema( s.mock.Domain, s.mock.Storage, fooFilter, math.MaxUint64) - c.Assert(err, NotNil) + c.Assert(err, IsNil) c.Assert(backupSchemas, IsNil) - // Empty databse. + // Empty database. noFilter, err := filter.New(false, &filter.Rules{}) c.Assert(err, IsNil) _, backupSchemas, err = BuildBackupRangeAndSchema( s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) - c.Assert(err, NotNil) + c.Assert(err, IsNil) c.Assert(backupSchemas, IsNil) tk.MustExec("use test") diff --git a/pkg/restore/db.go b/pkg/restore/db.go index be24a1ad9..ae90df371 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -203,19 +203,26 @@ func FilterDDLJobs(allDDLJobs []*model.Job, tables []*utils.Table) (ddlJobs []*m } } + type namePair struct { + db string + table string + } + for _, table := range tables { tableIDs := make(map[int64]bool) tableIDs[table.Info.ID] = true - tableNames := make(map[string]bool) - tableNames[table.Info.Name.String()] = true + tableNames := make(map[namePair]bool) + name := namePair{table.Db.Name.String(), table.Info.Name.String()} + tableNames[name] = true for _, job := range allDDLJobs { if job.BinlogInfo.TableInfo != nil { - if tableIDs[job.TableID] || tableNames[job.BinlogInfo.TableInfo.Name.String()] { + name := namePair{job.SchemaName, job.BinlogInfo.TableInfo.Name.String()} + if tableIDs[job.TableID] || tableNames[name] { ddlJobs = append(ddlJobs, job) tableIDs[job.TableID] = true // For truncate table, the id may be changed tableIDs[job.BinlogInfo.TableInfo.ID] = true - tableNames[job.BinlogInfo.TableInfo.Name.String()] = true + tableNames[name] = true } } } diff --git a/pkg/task/backup.go b/pkg/task/backup.go index fe3021a33..040be0444 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -126,6 +126,10 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig if err != nil { return err } + // nothing to backup + if ranges == nil { + return nil + } ddlJobs := make([]*model.Job, 0) if cfg.LastBackupTS > 0 { diff --git a/pkg/task/restore.go b/pkg/task/restore.go index ff2374fa8..33bac7a6c 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -118,13 +118,10 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf return errors.New("cannot do transactional restore from raw kv data") } - files, tables, err := filterRestoreFiles(client, cfg) + files, tables, dbs, err := filterRestoreFiles(client, cfg) if err != nil { return err } - if len(files) == 0 { - return errors.New("all files are filtered out from the backup archive, nothing to restore") - } var newTS uint64 if client.IsIncremental() { @@ -137,10 +134,25 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf if err != nil { return err } + // execute DDL first err = client.ExecDDLs(ddlJobs) if err != nil { return errors.Trace(err) } + + // nothing to restore, maybe only ddl changes in incremental restore + if len(files) == 0 { + log.Info("all files are filtered out from the backup archive, nothing to restore") + return nil + } + + for _, db := range dbs { + err = client.CreateDatabase(db.Info) + if err != nil { + return err + } + } + rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) if err != nil { return err @@ -263,10 +275,10 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf func filterRestoreFiles( client *restore.Client, cfg *RestoreConfig, -) (files []*backup.File, tables []*utils.Table, err error) { +) (files []*backup.File, tables []*utils.Table, dbs []*utils.Database, err error) { tableFilter, err := filter.New(cfg.CaseSensitive, &cfg.Filter) if err != nil { - return nil, nil, err + return nil, nil, nil, err } for _, db := range client.GetDatabases() { @@ -277,17 +289,13 @@ func filterRestoreFiles( } if !createdDatabase { - if err = client.CreateDatabase(db.Info); err != nil { - return nil, nil, err - } + dbs = append(dbs, db) createdDatabase = true } - files = append(files, table.Files...) tables = append(tables, table) } } - return } diff --git a/tests/br_incremental_only_ddl/run.sh b/tests/br_incremental_only_ddl/run.sh new file mode 100755 index 000000000..f525acda4 --- /dev/null +++ b/tests/br_incremental_only_ddl/run.sh @@ -0,0 +1,72 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" + +echo "load data..." +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" +done + +# full backup +echo "full backup start..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/full" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 +# run ddls +echo "run ddls..." +run_sql "RENAME TABLE ${DB}.${TABLE} to ${DB}.${TABLE}1;" +run_sql "DROP TABLE ${DB}.${TABLE}1;" +run_sql "DROP DATABASE ${DB};" +run_sql "CREATE DATABASE ${DB};" +run_sql "CREATE TABLE ${DB}.${TABLE}1 (c2 CHAR(255));" +run_sql "RENAME TABLE ${DB}.${TABLE}1 to ${DB}.${TABLE};" +run_sql "TRUNCATE TABLE ${DB}.${TABLE};" + +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB/inc" --db $DB -t $TABLE --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +run_sql "DROP DATABASE $DB;" + +# full restore +echo "full restore start..." +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi +# incremental restore +echo "incremental restore start..." +fail=false +run_br restore table --db $DB --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR || fail=true +if $fail; then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +else + echo "TEST: [$TEST_NAME] successed!" +fi + +run_sql "DROP DATABASE $DB;" diff --git a/tests/br_incremental_same_table/run.sh b/tests/br_incremental_same_table/run.sh new file mode 100755 index 000000000..797806837 --- /dev/null +++ b/tests/br_incremental_same_table/run.sh @@ -0,0 +1,86 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" +TABLE="usertable" +ROW_COUNT=100 +PATH="tests/$TEST_NAME:bin:$PATH" +DB_COUNT=3 + +echo "load data..." + +# create database +run_sql "CREATE DATABASE IF NOT EXISTS $DB;" +# create table +run_sql "CREATE TABLE IF NOT EXISTS ${DB}.${TABLE} (c1 INT);" +# insert records +for i in $(seq $ROW_COUNT); do + run_sql "INSERT INTO ${DB}.${TABLE}(c1) VALUES ($i);" +done + +# full backup +echo "full backup start..." +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/full" --ratelimit 5 --concurrency 4 +# run ddls + +# create 3 databases, each db has one table with same name +for i in $(seq $DB_COUNT); do + # create database + run_sql "CREATE DATABASE $DB$i;" + # create table + run_sql "CREATE TABLE IF NOT EXISTS $DB$i.${TABLE} (c1 INT);" + # insert records + for j in $(seq $ROW_COUNT); do + run_sql "INSERT INTO $DB$i.${TABLE}(c1) VALUES ($j);" + done +done + +# incremental backup +echo "incremental backup start..." +last_backup_ts=$(br validate decode --field="end-version" -s "local://$TEST_DIR/$DB/full" | tail -n1) +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$DB/inc" --ratelimit 5 --concurrency 4 --lastbackupts $last_backup_ts + +# cleanup env +run_sql "DROP DATABASE $DB;" +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE $DB$i;" +done + +# full restore +echo "full restore start..." +run_br restore full -s "local://$TEST_DIR/$DB/full" --pd $PD_ADDR +row_count_full=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_full}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] full restore fail on database $DB" + exit 1 +fi + +# incremental restore only DB2.Table +echo "incremental restore start..." +run_br restore table --db ${DB}2 --table $TABLE -s "local://$TEST_DIR/$DB/inc" --pd $PD_ADDR +row_count_inc=$(run_sql "SELECT COUNT(*) FROM ${DB}2.$TABLE;" | awk '/COUNT/{print $2}') +# check full restore +if [ "${row_count_inc}" != "${ROW_COUNT}" ];then + echo "TEST: [$TEST_NAME] incremental restore fail on database $DB" + exit 1 +fi + +# cleanup env +run_sql "DROP DATABASE $DB;" +for i in $(seq $DB_COUNT); do + run_sql "DROP DATABASE IF EXISTS $DB$i;" +done From 8fbedbf39c13692c4efaeb15c6f4cb1a46d52245 Mon Sep 17 00:00:00 2001 From: 3pointer Date: Mon, 30 Mar 2020 17:46:16 +0800 Subject: [PATCH 41/46] add skip create sqls (#211) --- pkg/restore/client.go | 25 ++++++++++++-- pkg/task/restore.go | 19 +++++++++-- tests/br_db_skip/run.sh | 72 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 110 insertions(+), 6 deletions(-) create mode 100755 tests/br_db_skip/run.sh diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 56bb83233..76dbf5066 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -64,6 +64,7 @@ type Client struct { db *DB rateLimit uint64 isOnline bool + noSchema bool hasSpeedLimited bool restoreStores []uint64 @@ -305,6 +306,10 @@ func (rc *Client) GetTableSchema( // CreateDatabase creates a database. func (rc *Client) CreateDatabase(db *model.DBInfo) error { + if rc.IsSkipCreateSQL() { + log.Info("skip create database", zap.Stringer("database", db.Name)) + return nil + } return rc.db.CreateDatabase(rc.ctx, db) } @@ -320,9 +325,13 @@ func (rc *Client) CreateTables( } newTables := make([]*model.TableInfo, 0, len(tables)) for _, table := range tables { - err := rc.db.CreateTable(rc.ctx, table) - if err != nil { - return nil, nil, err + if rc.IsSkipCreateSQL() { + log.Info("skip create table and alter autoIncID", zap.Stringer("table", table.Info.Name)) + } else { + err := rc.db.CreateTable(rc.ctx, table) + if err != nil { + return nil, nil, err + } } newTableInfo, err := rc.GetTableSchema(dom, table.Db.Name, table.Info.Name) if err != nil { @@ -847,3 +856,13 @@ func (rc *Client) IsIncremental() bool { return !(rc.backupMeta.StartVersion == rc.backupMeta.EndVersion || rc.backupMeta.StartVersion == 0) } + +// EnableSkipCreateSQL sets switch of skip create schema and tables +func (rc *Client) EnableSkipCreateSQL() { + rc.noSchema = true +} + +// IsSkipCreateSQL returns whether we need skip create schema and tables in restore +func (rc *Client) IsSkipCreateSQL() bool { + return rc.noSchema +} diff --git a/pkg/task/restore.go b/pkg/task/restore.go index 33bac7a6c..336758dd7 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -23,7 +23,8 @@ import ( ) const ( - flagOnline = "online" + flagOnline = "online" + flagNoSchema = "no-schema" ) var schedulers = map[string]struct{}{ @@ -45,13 +46,18 @@ const ( type RestoreConfig struct { Config - Online bool `json:"online" toml:"online"` + Online bool `json:"online" toml:"online"` + NoSchema bool `json:"no-schema" toml:"no-schema"` } // DefineRestoreFlags defines common flags for the restore command. func DefineRestoreFlags(flags *pflag.FlagSet) { // TODO remove experimental tag if it's stable - flags.Bool("online", false, "(experimental) Whether online when restore") + flags.Bool(flagOnline, false, "(experimental) Whether online when restore") + flags.Bool(flagNoSchema, false, "skip creating schemas and tables, reuse existing empty ones") + + // Do not expose this flag + _ = flags.MarkHidden(flagNoSchema) } // ParseFromFlags parses the restore-related flags from the flag set. @@ -61,6 +67,10 @@ func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { if err != nil { return errors.Trace(err) } + cfg.NoSchema, err = flags.GetBool(flagNoSchema) + if err != nil { + return errors.Trace(err) + } err = cfg.Config.ParseFromFlags(flags) if err != nil { return errors.Trace(err) @@ -101,6 +111,9 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf if cfg.Online { client.EnableOnline() } + if cfg.NoSchema { + client.EnableSkipCreateSQL() + } err = client.LoadRestoreStores(ctx) if err != nil { return err diff --git a/tests/br_db_skip/run.sh b/tests/br_db_skip/run.sh new file mode 100755 index 000000000..e126447c6 --- /dev/null +++ b/tests/br_db_skip/run.sh @@ -0,0 +1,72 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +DB="$TEST_NAME" + +run_sql "CREATE DATABASE $DB;" + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" + +# backup db +echo "backup start..." +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 + +run_sql "DROP DATABASE $DB;" + +run_sql "CREATE DATABASE $DB;" +# restore db with skip-create-sql must failed +echo "restore start but must failed" +fail=false +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --no-schema=true || fail=true +if $fail; then + # Error: [schema:1146]Table 'br_db_skip.usertable1' doesn't exist + echo "TEST: [$TEST_NAME] restore $DB with no-schema must failed" +else + echo "TEST: [$TEST_NAME] restore $DB with no-schema not failed" + exit 1 +fi + + +run_sql "CREATE TABLE $DB.usertable1 ( \ + YCSB_KEY varchar(64) NOT NULL, \ + FIELD0 varchar(1) DEFAULT NULL, \ + PRIMARY KEY (YCSB_KEY) \ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + +echo "restore start must succeed" +fail=false +run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR --no-schema=true || fail=true +if $fail; then + echo "TEST: [$TEST_NAME] restore $DB with no-schema failed" + exit 1 +else + echo "TEST: [$TEST_NAME] restore $DB with no-schema succeed" +fi + +table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l) +if [ "$table_count" -ne "1" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +run_sql "DROP DATABASE $DB;" From f031777add5baaf61945457f1afa7dc287e51bc4 Mon Sep 17 00:00:00 2001 From: kennytm Date: Tue, 31 Mar 2020 23:20:51 +0800 Subject: [PATCH 42/46] Revert "tests: disable TLS test (#204)" (#218) This reverts commit e168a60288ade404a97b47147dbc64ec0b63b9e6. --- tests/run.sh | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tests/run.sh b/tests/run.sh index 583d395c8..5b1111afd 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -29,13 +29,6 @@ if [ "${1-}" = '--debug' ]; then fi for script in tests/${TEST_NAME-*}/run.sh; do - TEST_NAME="$(basename "$(dirname "$script")")" - if [ $TEST_NAME = "br_tls" ]; then - echo "FIXME enable br_tls test" - echo "TiKV master (ed71f20f445e10595553d2bf3d1a1eb645b9a61a) aborts when TLS is enabled" - continue - fi - echo "*===== Running test $script... =====*" TEST_DIR="$TEST_DIR" \ PD_ADDR="$PD_ADDR" \ From f7dc2db6774b4ed32ea794c0febaea86d947af84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B1=B1=E5=B2=9A?= <36239017+YuJuncen@users.noreply.github.com> Date: Wed, 1 Apr 2020 12:44:07 +0800 Subject: [PATCH 43/46] doc: add `minio` to dependence list. (#221) The README of test omitted `minio` in the dependence list, which is needed for run the integration test. Co-authored-by: Neil Shen --- tests/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/README.md b/tests/README.md index 814241b4a..fbb018505 100644 --- a/tests/README.md +++ b/tests/README.md @@ -11,6 +11,7 @@ programs. * `bin/pd-server` * `bin/pd-ctl` * `bin/go-ycsb` + * `bin/minio` The versions must be ≥2.1.0 as usual. @@ -33,7 +34,7 @@ Run `make integration_test` to execute the integration tests. This command will 2. Check that all 6 required executables and `br` executable exist 3. Execute `tests/run.sh` -If the first tow steps are done before, you could also run `tests/run.sh` directly. +If the first two steps are done before, you could also run `tests/run.sh` directly. This script will 1. Start PD, TiKV and TiDB in background with local storage From 01de3f521334539f7ceb072f67471d57f03d41de Mon Sep 17 00:00:00 2001 From: 3pointer Date: Wed, 1 Apr 2020 23:14:28 +0800 Subject: [PATCH 44/46] move waiting reject stores in import file (#222) * move wait rejectstores into import files * restore: use new table id to search placementRules * Update pkg/restore/import.go Co-Authored-By: Neil Shen * Update pkg/restore/import.go Co-Authored-By: kennytm * fix ci Co-authored-by: Neil Shen Co-authored-by: kennytm --- pkg/restore/client.go | 16 +++++---- pkg/restore/client_test.go | 4 +++ pkg/restore/import.go | 26 +++++++++++++- pkg/restore/split.go | 70 -------------------------------------- pkg/restore/split_test.go | 2 +- pkg/restore/util.go | 67 ++++++++++++++++++++++++++++++------ pkg/task/restore.go | 15 ++++++-- 7 files changed, 110 insertions(+), 90 deletions(-) diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 76dbf5066..fde382fb0 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -347,15 +347,18 @@ func (rc *Client) CreateTables( // RemoveTiFlashReplica removes all the tiflash replicas of a table // TODO: remove this after tiflash supports restore -func (rc *Client) RemoveTiFlashReplica(tables []*utils.Table, placementRules []placement.Rule) error { +func (rc *Client) RemoveTiFlashReplica( + tables []*utils.Table, newTables []*model.TableInfo, placementRules []placement.Rule) error { schemas := make([]*backup.Schema, 0, len(tables)) var updateReplica bool - for _, table := range tables { - if rule := utils.SearchPlacementRule(table.Info.ID, placementRules, placement.Learner); rule != nil { + // must use new table id to search placement rules + // here newTables and tables must have same order + for i, table := range tables { + if rule := utils.SearchPlacementRule(newTables[i].ID, placementRules, placement.Learner); rule != nil { table.TiFlashReplicas = rule.Count updateReplica = true } - tableData, err := json.Marshal(table.Info) + tableData, err := json.Marshal(newTables[i]) if err != nil { return errors.Trace(err) } @@ -454,6 +457,7 @@ func (rc *Client) setSpeedLimit() error { func (rc *Client) RestoreFiles( files []*backup.File, rewriteRules *RewriteRules, + rejectStoreMap map[uint64]bool, updateCh glue.Progress, ) (err error) { start := time.Now() @@ -486,7 +490,7 @@ func (rc *Client) RestoreFiles( select { case <-rc.ctx.Done(): errCh <- rc.ctx.Err() - case errCh <- rc.fileImporter.Import(fileReplica, rewriteRules): + case errCh <- rc.fileImporter.Import(fileReplica, rejectStoreMap, rewriteRules): updateCh.Inc() } }) @@ -537,7 +541,7 @@ func (rc *Client) RestoreRaw(startKey []byte, endKey []byte, files []*backup.Fil select { case <-rc.ctx.Done(): errCh <- rc.ctx.Err() - case errCh <- rc.fileImporter.Import(fileReplica, emptyRules): + case errCh <- rc.fileImporter.Import(fileReplica, nil, emptyRules): updateCh.Inc() } }) diff --git a/pkg/restore/client_test.go b/pkg/restore/client_test.go index 3f8cb71f8..13b5caa0a 100644 --- a/pkg/restore/client_test.go +++ b/pkg/restore/client_test.go @@ -72,6 +72,10 @@ func (s *testRestoreClientSuite) TestCreateTables(c *C) { } rules, newTables, err := client.CreateTables(s.mock.Domain, tables, 0) c.Assert(err, IsNil) + // make sure tables and newTables have same order + for i, t := range tables { + c.Assert(newTables[i].Name, Equals, t.Info.Name) + } for _, nt := range newTables { c.Assert(nt.Name.String(), Matches, "test[0-3]") } diff --git a/pkg/restore/import.go b/pkg/restore/import.go index 405af050c..ee5cef6ca 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -175,7 +175,11 @@ func (importer *FileImporter) SetRawRange(startKey, endKey []byte) error { // Import tries to import a file. // All rules must contain encoded keys. -func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRules) error { +func (importer *FileImporter) Import( + file *backup.File, + rejectStoreMap map[uint64]bool, + rewriteRules *RewriteRules, +) error { log.Debug("import file", zap.Stringer("file", file)) // Rewrite the start key and end key of file to scan regions var startKey, endKey []byte @@ -193,6 +197,9 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul zap.Stringer("file", file), zap.Binary("startKey", startKey), zap.Binary("endKey", endKey)) + + needReject := len(rejectStoreMap) > 0 + err = utils.WithRetry(importer.ctx, func() error { ctx, cancel := context.WithTimeout(importer.ctx, importScanRegionTime) defer cancel() @@ -202,6 +209,23 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul if errScanRegion != nil { return errors.Trace(errScanRegion) } + + if needReject { + // TODO remove when TiFlash support restore + startTime := time.Now() + log.Info("start to wait for removing rejected stores", zap.Reflect("rejectStores", rejectStoreMap)) + for _, region := range regionInfos { + if !waitForRemoveRejectStores(ctx, importer.metaClient, region, rejectStoreMap) { + log.Error("waiting for removing rejected stores failed", + zap.Stringer("region", region.Region)) + return errors.New("waiting for removing rejected stores failed") + } + } + log.Info("waiting for removing rejected stores done", + zap.Int("regions", len(regionInfos)), zap.Duration("take", time.Since(startTime))) + needReject = false + } + log.Debug("scan regions", zap.Stringer("file", file), zap.Int("count", len(regionInfos))) // Try to download and ingest the file in every region for _, regionInfo := range regionInfos { diff --git a/pkg/restore/split.go b/pkg/restore/split.go index 03153097a..4138d0012 100644 --- a/pkg/restore/split.go +++ b/pkg/restore/split.go @@ -63,7 +63,6 @@ func (rs *RegionSplitter) Split( ctx context.Context, ranges []rtree.Range, rewriteRules *RewriteRules, - rejectStores map[uint64]bool, onSplit OnSplitFunc, ) error { if len(ranges) == 0 { @@ -95,14 +94,12 @@ func (rs *RegionSplitter) Split( } interval := SplitRetryInterval scatterRegions := make([]*RegionInfo, 0) - allRegions := make([]*RegionInfo, 0) SplitRegions: for i := 0; i < SplitRetryTimes; i++ { regions, errScan := paginateScanRegion(ctx, rs.client, minKey, maxKey, scanRegionPaginationLimit) if errScan != nil { return errors.Trace(errScan) } - allRegions = append(allRegions, regions...) if len(regions) == 0 { log.Warn("cannot scan any region") return nil @@ -145,19 +142,6 @@ SplitRegions: if errSplit != nil { return errors.Trace(errSplit) } - if len(rejectStores) > 0 { - startTime = time.Now() - log.Info("start to wait for removing rejected stores", zap.Reflect("rejectStores", rejectStores)) - for _, region := range allRegions { - if !rs.waitForRemoveRejectStores(ctx, region, rejectStores) { - log.Error("waiting for removing rejected stores failed", - zap.Stringer("region", region.Region)) - return errors.New("waiting for removing rejected stores failed") - } - } - log.Info("waiting for removing rejected stores done", - zap.Int("regions", len(allRegions)), zap.Duration("take", time.Since(startTime))) - } log.Info("start to wait for scattering regions", zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime))) startTime = time.Now() @@ -211,30 +195,6 @@ func (rs *RegionSplitter) isScatterRegionFinished(ctx context.Context, regionID return ok, nil } -func (rs *RegionSplitter) hasRejectStorePeer( - ctx context.Context, - regionID uint64, - rejectStores map[uint64]bool, -) (bool, error) { - regionInfo, err := rs.client.GetRegionByID(ctx, regionID) - if err != nil { - return false, err - } - if regionInfo == nil { - return false, nil - } - for _, peer := range regionInfo.Region.GetPeers() { - if rejectStores[peer.GetStoreId()] { - return true, nil - } - } - retryTimes := ctx.Value(retryTimes).(int) - if retryTimes > 10 { - log.Warn("get region info", zap.Stringer("region", regionInfo.Region)) - } - return false, nil -} - func (rs *RegionSplitter) waitForSplit(ctx context.Context, regionID uint64) { interval := SplitCheckInterval for i := 0; i < SplitCheckMaxRetryTimes; i++ { @@ -280,36 +240,6 @@ func (rs *RegionSplitter) waitForScatterRegion(ctx context.Context, regionInfo * } } -func (rs *RegionSplitter) waitForRemoveRejectStores( - ctx context.Context, - regionInfo *RegionInfo, - rejectStores map[uint64]bool, -) bool { - interval := RejectStoreCheckInterval - regionID := regionInfo.Region.GetId() - for i := 0; i < RejectStoreCheckRetryTimes; i++ { - ctx1 := context.WithValue(ctx, retryTimes, i) - ok, err := rs.hasRejectStorePeer(ctx1, regionID, rejectStores) - if err != nil { - log.Warn("wait for rejecting store failed", - zap.Stringer("region", regionInfo.Region), - zap.Error(err)) - return false - } - // Do not have any peer in the rejected store, return true - if !ok { - return true - } - interval = 2 * interval - if interval > RejectStoreMaxCheckInterval { - interval = RejectStoreMaxCheckInterval - } - time.Sleep(interval) - } - - return false -} - func (rs *RegionSplitter) splitAndScatterRegions( ctx context.Context, regionInfo *RegionInfo, keys [][]byte, ) ([]*RegionInfo, error) { diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go index 06dab1cf1..b21cbf781 100644 --- a/pkg/restore/split_test.go +++ b/pkg/restore/split_test.go @@ -193,7 +193,7 @@ func (s *testRestoreUtilSuite) TestSplit(c *C) { regionSplitter := NewRegionSplitter(client) ctx := context.Background() - err := regionSplitter.Split(ctx, ranges, rewriteRules, map[uint64]bool{}, func(key [][]byte) {}) + err := regionSplitter.Split(ctx, ranges, rewriteRules, func(key [][]byte) {}) if err != nil { c.Assert(err, IsNil, Commentf("split regions failed: %v", err)) } diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 698de6aec..d322c9de0 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -21,7 +21,6 @@ import ( "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" - "github.com/pingcap/br/pkg/conn" "github.com/pingcap/br/pkg/glue" "github.com/pingcap/br/pkg/rtree" "github.com/pingcap/br/pkg/summary" @@ -332,16 +331,8 @@ func SplitRanges( summary.CollectDuration("split region", elapsed) }() splitter := NewRegionSplitter(NewSplitClient(client.GetPDClient(), client.GetTLSConfig())) - tiflashStores, err := conn.GetAllTiKVStores(ctx, client.GetPDClient(), conn.TiFlashOnly) - if err != nil { - return errors.Trace(err) - } - storeMap := make(map[uint64]bool) - for _, store := range tiflashStores { - storeMap[store.GetId()] = true - } - return splitter.Split(ctx, ranges, rewriteRules, storeMap, func(keys [][]byte) { + return splitter.Split(ctx, ranges, rewriteRules, func(keys [][]byte) { for range keys { updateCh.Inc() } @@ -416,3 +407,59 @@ func paginateScanRegion( } return regions, nil } + +func hasRejectStorePeer( + ctx context.Context, + client SplitClient, + regionID uint64, + rejectStores map[uint64]bool, +) (bool, error) { + regionInfo, err := client.GetRegionByID(ctx, regionID) + if err != nil { + return false, err + } + if regionInfo == nil { + return false, nil + } + for _, peer := range regionInfo.Region.GetPeers() { + if rejectStores[peer.GetStoreId()] { + return true, nil + } + } + retryTimes := ctx.Value(retryTimes).(int) + if retryTimes > 10 { + log.Warn("get region info", zap.Stringer("region", regionInfo.Region)) + } + return false, nil +} + +func waitForRemoveRejectStores( + ctx context.Context, + client SplitClient, + regionInfo *RegionInfo, + rejectStores map[uint64]bool, +) bool { + interval := RejectStoreCheckInterval + regionID := regionInfo.Region.GetId() + for i := 0; i < RejectStoreCheckRetryTimes; i++ { + ctx1 := context.WithValue(ctx, retryTimes, i) + ok, err := hasRejectStorePeer(ctx1, client, regionID, rejectStores) + if err != nil { + log.Warn("wait for rejecting store failed", + zap.Stringer("region", regionInfo.Region), + zap.Error(err)) + return false + } + // Do not have any peer in the rejected store, return true + if !ok { + return true + } + interval = 2 * interval + if interval > RejectStoreMaxCheckInterval { + interval = RejectStoreMaxCheckInterval + } + time.Sleep(interval) + } + + return false +} diff --git a/pkg/task/restore.go b/pkg/task/restore.go index 336758dd7..2486ad319 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -174,7 +174,8 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf if err != nil { return err } - err = client.RemoveTiFlashReplica(tables, placementRules) + + err = client.RemoveTiFlashReplica(tables, newTables, placementRules) if err != nil { return err } @@ -222,6 +223,16 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf if batchSize > maxRestoreBatchSizeLimit { batchSize = maxRestoreBatchSizeLimit // 256 } + + tiflashStores, err := conn.GetAllTiKVStores(ctx, client.GetPDClient(), conn.TiFlashOnly) + if err != nil { + return errors.Trace(err) + } + rejectStoreMap := make(map[uint64]bool) + for _, store := range tiflashStores { + rejectStoreMap[store.GetId()] = true + } + for { if len(ranges) == 0 { break @@ -246,7 +257,7 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf } // After split, we can restore backup files. - err = client.RestoreFiles(fileBatch, rewriteRules, updateCh) + err = client.RestoreFiles(fileBatch, rewriteRules, rejectStoreMap, updateCh) if err != nil { break } From 3341f4054ffec3de0ee04ba8a4139df1b1e5ed8e Mon Sep 17 00:00:00 2001 From: 3pointer Date: Thu, 2 Apr 2020 11:06:32 +0800 Subject: [PATCH 45/46] Max index length (#220) * restore: set max-index-length to max * restore:add max-index-length params * address comment * address comment --- pkg/task/restore.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/task/restore.go b/pkg/task/restore.go index 2486ad319..9dce5139e 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -10,6 +10,7 @@ import ( "github.com/pingcap/log" "github.com/pingcap/parser/model" "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/pingcap/tidb/config" "github.com/spf13/pflag" "go.uber.org/zap" @@ -148,6 +149,15 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf return err } // execute DDL first + + // set max-index-length before execute DDLs and create tables + // we set this value to max(3072*4), otherwise we might not restore table + // when upstream and downstream both set this value greater than default(3072) + conf := config.GetGlobalConfig() + conf.MaxIndexLength = config.DefMaxOfMaxIndexLength + config.StoreGlobalConfig(conf) + log.Warn("set max-index-length to max(3072*4) to skip check index length in DDL") + err = client.ExecDDLs(ddlJobs) if err != nil { return errors.Trace(err) From 7de169dd6ffa9f2e5debcda16848c73a62abe6f5 Mon Sep 17 00:00:00 2001 From: kennytm Date: Thu, 2 Apr 2020 11:38:19 +0800 Subject: [PATCH 46/46] glue: create schema/table directly with info (#216) * glue: create schema/table directly with info * go.mod: change to use the master version * gluetidb: fix failure to create schema * gluetidb: exclude non-public indices when restoring * go.mod: removed unused replace Co-authored-by: 3pointer --- go.mod | 8 ++--- go.sum | 21 ++++++----- pkg/backup/client.go | 10 ++++++ pkg/glue/glue.go | 5 ++- pkg/gluetidb/glue.go | 36 +++++++++++-------- pkg/restore/db.go | 44 ++++------------------- pkg/restore/util.go | 39 -------------------- tests/_utils/run_services | 7 ++-- tests/br_alter_pk_server/config/tidb.toml | 8 +++++ tests/br_alter_pk_server/config/tikv.toml | 14 ++++++++ tests/br_alter_pk_server/run.sh | 42 ++++++++++++++++++++++ 11 files changed, 123 insertions(+), 111 deletions(-) create mode 100644 tests/br_alter_pk_server/config/tidb.toml create mode 100644 tests/br_alter_pk_server/config/tikv.toml create mode 100755 tests/br_alter_pk_server/run.sh diff --git a/go.mod b/go.mod index 2659bdd03..5d3251f08 100644 --- a/go.mod +++ b/go.mod @@ -21,11 +21,11 @@ require ( github.com/onsi/gomega v1.8.1 // indirect github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 - github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75 + github.com/pingcap/kvproto v0.0.0-20200330093347-98f910b71904 github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd - github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87 + github.com/pingcap/parser v0.0.0-20200326020624-68d423641be5 github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 - github.com/pingcap/tidb v1.1.0-beta.0.20200325094938-30e1edae0897 + github.com/pingcap/tidb v0.0.0-20200401141416-959eca8f3a39 github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible github.com/pingcap/tipb v0.0.0-20200212061130-c4d518eb1d60 github.com/prometheus/client_golang v1.0.0 @@ -37,7 +37,7 @@ require ( github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 go.opencensus.io v0.22.2 // indirect - go.uber.org/zap v1.14.0 + go.uber.org/zap v1.14.1 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 google.golang.org/api v0.14.0 google.golang.org/grpc v1.25.1 diff --git a/go.sum b/go.sum index 67b7e16fd..06ea01c73 100644 --- a/go.sum +++ b/go.sum @@ -363,15 +363,14 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17Xtb github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20200214064158-62d31900d88e/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20200221034943-a2aa1d1e20a8/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20200228095611-2cf9a243b8d5/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75 h1:DB3NTM0ilba/6sW+vccdEnP10bVvrVunDwWvRa0hSKc= -github.com/pingcap/kvproto v0.0.0-20200317112120-78042b285b75/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200330093347-98f910b71904 h1:pMFUXvhJ62hX8m0Q4RsL7L+hSW1mAMG26So5eFMoAtI= +github.com/pingcap/kvproto v0.0.0-20200330093347-98f910b71904/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd h1:CV3VsP3Z02MVtdpTMfEgRJ4T9NGgGTxdHpJerent7rM= github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87 h1:533jEUp3mtfWjk0el+awLbyGVxiHcUIGWcR1Y7gB+fg= -github.com/pingcap/parser v0.0.0-20200317021010-cd90cc2a7d87/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= +github.com/pingcap/parser v0.0.0-20200326020624-68d423641be5 h1:fXVqoeYfV+xI8K2he5NNv00c6YksrjeM6+vkNo1ZK2Q= +github.com/pingcap/parser v0.0.0-20200326020624-68d423641be5/go.mod h1:9v0Edh8IbgjGYW2ArJr19E+bvL8zKahsFp+ixWeId+4= github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3 h1:Yrp99FnjHAEuDrSBql2l0IqCtJX7KwJbTsD5hIArkvk= github.com/pingcap/pd/v4 v4.0.0-beta.1.0.20200305072537-61d9f9cc35d3/go.mod h1:25GfNw6+Jcr9kca5rtmTb4gKCJ4jOpow2zV2S9Dgafs= github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= @@ -379,8 +378,8 @@ github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1 h1:YUnUZ914SHFMsOS github.com/pingcap/sysutil v0.0.0-20200302022240-21c8c70d0ab1/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb h1:bDbgLaNTRNK6Qw7KjvEqqfCQstY8WMEcXyXTU7yzYKg= github.com/pingcap/sysutil v0.0.0-20200309085538-962fd285f3bb/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= -github.com/pingcap/tidb v1.1.0-beta.0.20200325094938-30e1edae0897 h1:wTNFJMM6GNmG09YoN3/3K8BqiK74zKkurjm4iY+m2mI= -github.com/pingcap/tidb v1.1.0-beta.0.20200325094938-30e1edae0897/go.mod h1:4CGOiKZSaOU/Da3QYMtp0c3uBE2SxpcLOpESXmeQhcs= +github.com/pingcap/tidb v0.0.0-20200401141416-959eca8f3a39 h1:nYRL69Qc4kuvp+tlDNB5wXjvDetX0J7g0DsW4RQxfXM= +github.com/pingcap/tidb v0.0.0-20200401141416-959eca8f3a39/go.mod h1:btnHsqUQvJnY18+OP2Z6MCRq1tX4B8JUCrmqctSKxOg= github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible h1:84F7MFMfdAYObrznvRslmVu43aoihrlL+7mMyMlOi0o= github.com/pingcap/tidb-tools v4.0.0-beta.1.0.20200306084441-875bd09aa3d5+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20190428032612-535e1abaa330/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= @@ -535,8 +534,8 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.12.0 h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.0 h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o= -go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= +go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -676,8 +675,8 @@ golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200301222351-066e0c02454c h1:FD7jysxM+EJqg5UYYy3XYDsAiUickFsn4UiaanJkf8c= golang.org/x/tools v0.0.0-20200301222351-066e0c02454c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200313205530-4303120df7d8 h1:gkI/wGGwpcG5W4hLCzZNGxA4wzWBGGDStRI1MrjDl2Q= -golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200325203130-f53864d0dba1 h1:odiryKYJy7CjdrZxhrcE1Z8L9+kGyGZOnfpuauvdCeU= +golang.org/x/tools v0.0.0-20200325203130-f53864d0dba1/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 6cd8abe45..72563096b 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -224,6 +224,16 @@ func BuildBackupRangeAndSchema( zap.Stringer("table", tableInfo.Name), zap.Int64("AutoIncID", globalAutoID)) + // remove all non-public indices + n := 0 + for _, index := range tableInfo.Indices { + if index.State == model.StatePublic { + tableInfo.Indices[n] = index + n++ + } + } + tableInfo.Indices = tableInfo.Indices[:n] + if dbData == nil { dbData, err = json.Marshal(dbInfo) if err != nil { diff --git a/pkg/glue/glue.go b/pkg/glue/glue.go index 88a05c5c3..8e5bb8577 100644 --- a/pkg/glue/glue.go +++ b/pkg/glue/glue.go @@ -9,7 +9,6 @@ import ( pd "github.com/pingcap/pd/v4/client" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/meta/autoid" ) // Glue is an abstraction of TiDB function calls used in BR. @@ -28,8 +27,8 @@ type Glue interface { // Session is an abstraction of the session.Session interface. type Session interface { Execute(ctx context.Context, sql string) error - ShowCreateDatabase(schema *model.DBInfo) (string, error) - ShowCreateTable(table *model.TableInfo, allocator autoid.Allocator) (string, error) + CreateDatabase(ctx context.Context, schema *model.DBInfo) error + CreateTable(ctx context.Context, dbName model.CIStr, table *model.TableInfo) error Close() } diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go index 5f4aff6fa..73ef66e4f 100644 --- a/pkg/gluetidb/glue.go +++ b/pkg/gluetidb/glue.go @@ -3,15 +3,14 @@ package gluetidb import ( - "bytes" "context" "github.com/pingcap/parser/model" + "github.com/pingcap/parser/mysql" pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" - "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/session" "github.com/pingcap/br/pkg/glue" @@ -62,22 +61,29 @@ func (gs *tidbSession) Execute(ctx context.Context, sql string) error { return err } -// ShowCreateDatabase implements glue.Session -func (gs *tidbSession) ShowCreateDatabase(schema *model.DBInfo) (string, error) { - var buf bytes.Buffer - if err := executor.ConstructResultOfShowCreateDatabase(gs.se, schema, true, &buf); err != nil { - return "", err +// CreateDatabase implements glue.Session +func (gs *tidbSession) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { + d := domain.GetDomain(gs.se).DDL() + schema = schema.Clone() + if len(schema.Charset) == 0 { + schema.Charset = mysql.DefaultCharset } - return buf.String(), nil + return d.CreateSchemaWithInfo(gs.se, schema, ddl.OnExistIgnore, true) } -// ShowCreateTable implements glue.Session -func (gs *tidbSession) ShowCreateTable(table *model.TableInfo, allocator autoid.Allocator) (string, error) { - var buf bytes.Buffer - if err := executor.ConstructResultOfShowCreateTable(gs.se, table, allocator, &buf); err != nil { - return "", err +// CreateTable implements glue.Session +func (gs *tidbSession) CreateTable(ctx context.Context, dbName model.CIStr, table *model.TableInfo) error { + d := domain.GetDomain(gs.se).DDL() + + // Clone() does not clone partitions yet :( + table = table.Clone() + if table.Partition != nil { + newPartition := *table.Partition + newPartition.Definitions = append([]model.PartitionDefinition{}, table.Partition.Definitions...) + table.Partition = &newPartition } - return buf.String(), nil + + return d.CreateTableWithInfo(gs.se, dbName, table, ddl.OnExistIgnore, true) } // Close implements glue.Session diff --git a/pkg/restore/db.go b/pkg/restore/db.go index ae90df371..6197ff7a2 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -6,7 +6,6 @@ import ( "context" "fmt" "sort" - "strings" "github.com/pingcap/errors" "github.com/pingcap/log" @@ -70,57 +69,28 @@ func (db *DB) ExecDDL(ctx context.Context, ddlJob *model.Job) error { // CreateDatabase executes a CREATE DATABASE SQL. func (db *DB) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { - createSQL, err := db.se.ShowCreateDatabase(schema) + err := db.se.CreateDatabase(ctx, schema) if err != nil { - log.Error("build create database SQL failed", zap.Stringer("db", schema.Name), zap.Error(err)) - return errors.Trace(err) - } - err = db.se.Execute(ctx, createSQL) - if err != nil { - log.Error("create database failed", zap.String("query", createSQL), zap.Error(err)) + log.Error("create database failed", zap.Stringer("db", schema.Name), zap.Error(err)) } return errors.Trace(err) } // CreateTable executes a CREATE TABLE SQL. func (db *DB) CreateTable(ctx context.Context, table *utils.Table) error { - tableInfo := table.Info - createSQL, err := db.se.ShowCreateTable(tableInfo, newIDAllocator(tableInfo.AutoIncID)) - if err != nil { - log.Error( - "build create table SQL failed", - zap.Stringer("db", table.Db.Name), - zap.Stringer("table", tableInfo.Name), - zap.Error(err)) - return errors.Trace(err) - } - switchDbSQL := fmt.Sprintf("use %s;", utils.EncloseName(table.Db.Name.O)) - err = db.se.Execute(ctx, switchDbSQL) - if err != nil { - log.Error("switch db failed", - zap.String("SQL", switchDbSQL), - zap.Stringer("db", table.Db.Name), - zap.Error(err)) - return errors.Trace(err) - } - // Insert `IF NOT EXISTS` statement to skip the created tables - words := strings.SplitN(createSQL, " ", 3) - if len(words) > 2 && strings.ToUpper(words[0]) == "CREATE" && strings.ToUpper(words[1]) == "TABLE" { - createSQL = "CREATE TABLE IF NOT EXISTS " + words[2] - } - err = db.se.Execute(ctx, createSQL) + err := db.se.CreateTable(ctx, table.Db.Name, table.Info) if err != nil { log.Error("create table failed", - zap.String("SQL", createSQL), zap.Stringer("db", table.Db.Name), zap.Stringer("table", table.Info.Name), zap.Error(err)) return errors.Trace(err) } alterAutoIncIDSQL := fmt.Sprintf( - "alter table %s auto_increment = %d", - utils.EncloseName(tableInfo.Name.O), - tableInfo.AutoIncID) + "alter table %s.%s auto_increment = %d", + utils.EncloseName(table.Db.Name.O), + utils.EncloseName(table.Info.Name.O), + table.Info.AutoIncID) err = db.se.Execute(ctx, alterAutoIncIDSQL) if err != nil { log.Error("alter AutoIncID failed", diff --git a/pkg/restore/util.go b/pkg/restore/util.go index d322c9de0..2652b1e7b 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -16,7 +16,6 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" @@ -28,44 +27,6 @@ import ( var recordPrefixSep = []byte("_r") -// idAllocator always returns a specified ID -type idAllocator struct { - id int64 -} - -func newIDAllocator(id int64) *idAllocator { - return &idAllocator{id: id} -} - -func (alloc *idAllocator) Alloc(tableID int64, n uint64, increment, offset int64) (min int64, max int64, err error) { - return alloc.id, alloc.id, nil -} - -func (alloc *idAllocator) AllocSeqCache(sequenceID int64) (min int64, max int64, round int64, err error) { - // TODO fix this function after support backup sequence - return 0, 0, 0, nil -} - -func (alloc *idAllocator) Rebase(tableID, newBase int64, allocIDs bool) error { - return nil -} - -func (alloc *idAllocator) Base() int64 { - return alloc.id -} - -func (alloc *idAllocator) End() int64 { - return alloc.id -} - -func (alloc *idAllocator) NextGlobalAutoID(tableID int64) (int64, error) { - return alloc.id, nil -} - -func (alloc *idAllocator) GetType() autoid.AllocatorType { - return autoid.RowIDAllocType -} - // GetRewriteRules returns the rewrite rule of the new table and the old table. func GetRewriteRules( newTable *model.TableInfo, diff --git a/tests/_utils/run_services b/tests/_utils/run_services index 07fe1a2ad..f31152932 100644 --- a/tests/_utils/run_services +++ b/tests/_utils/run_services @@ -38,6 +38,9 @@ stop_services() { start_services() { stop_services + TIDB_CONFIG="${1-tests}/config/tidb.toml" + TIKV_CONFIG="${1-tests}/config/tikv.toml" + echo "Starting PD..." mkdir -p "$TEST_DIR/pd" bin/pd-server \ @@ -63,7 +66,7 @@ start_services() { -A "$TIKV_ADDR$i" \ --status-addr "$TIKV_STATUS_ADDR$i" \ --log-file "$TEST_DIR/tikv${i}.log" \ - -C "tests/config/tikv.toml" \ + -C "$TIKV_CONFIG" \ -s "$TEST_DIR/tikv${i}" & done @@ -83,7 +86,7 @@ start_services() { --status 10080 \ --store tikv \ --path "$PD_ADDR" \ - --config "tests/config/tidb.toml" \ + --config "$TIDB_CONFIG" \ --log-file "$TEST_DIR/tidb.log" & echo "Verifying TiDB is started..." diff --git a/tests/br_alter_pk_server/config/tidb.toml b/tests/br_alter_pk_server/config/tidb.toml new file mode 100644 index 000000000..30b7d4869 --- /dev/null +++ b/tests/br_alter_pk_server/config/tidb.toml @@ -0,0 +1,8 @@ +# config of tidb + +# Schema lease duration +# There are lot of ddl in the tests, setting this +# to 360s to test whther BR is gracefully shutdown. +lease = "360s" + +alter-primary-key = true diff --git a/tests/br_alter_pk_server/config/tikv.toml b/tests/br_alter_pk_server/config/tikv.toml new file mode 100644 index 000000000..edcd02a98 --- /dev/null +++ b/tests/br_alter_pk_server/config/tikv.toml @@ -0,0 +1,14 @@ +# config of tikv + +[coprocessor] +region-max-keys = 20 +region-split-keys = 12 + +[rocksdb] +max-open-files = 4096 +[raftdb] +max-open-files = 4096 +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +sync-log = false +capacity = "10GB" diff --git a/tests/br_alter_pk_server/run.sh b/tests/br_alter_pk_server/run.sh new file mode 100755 index 000000000..6485a43be --- /dev/null +++ b/tests/br_alter_pk_server/run.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu + +cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source $cur/../_utils/run_services + +DB="$TEST_NAME" + +# prepare database +echo "Restart cluster with alter-primary-key = true" +start_services "$cur" + +run_sql "drop schema if exists $DB;" +run_sql "create schema $DB;" + +run_sql "create table $DB.a (a int primary key, b int unique);" +run_sql "insert into $DB.a values (42, 42);" + +# backup +run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB" + +# restore +run_sql "drop schema $DB;" +run_br --pd $PD_ADDR restore db --db "$DB" -s "local://$TEST_DIR/$DB" + +run_sql "drop schema $DB;" +echo "Restart service with alter-primary-key = false" +start_services