diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 000000000..872699919 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,29 @@ +--- +name: "🐛 Bug Report" +about: Something isn't working as expected +title: '' +labels: 'bug' +--- + +Please answer these questions before submitting your issue. Thanks! + +1. What did you do? +If possible, provide a recipe for reproducing the error. + + +2. What did you expect to see? + + + +3. What did you see instead? + + + +4. What version of BR and TiDB/TiKV/PD are you using? + + diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 000000000..e895af84d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,19 @@ +--- +name: "🚀 Feature Request" +about: I have a suggestion +labels: enhancement +--- + +## Feature Request + +### Describe your feature request related problem: + + +### Describe the feature you'd like: + + +### Describe alternatives you've considered: + + +### Teachability, Documentation, Adoption, Migration Strategy: + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..0f6ae8de0 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,37 @@ + + +### What problem does this PR solve? + + +### What is changed and how it works? + + +### Check List + +Tests + + - Unit test + - Integration test + - Manual test (add detailed scripts or steps below) + - No code + +Code changes + + - Has exported function/method change + - Has exported variable/fields change + - Has interface methods change + - Has persistent data change + +Side effects + + - Possible performance regression + - Increased code complexity + - Breaking backward compatibility + +Related changes + + - Need to cherry-pick to the release branch + - Need to update the documentation + - Need to be included in the release note diff --git a/.golangci.yml b/.golangci.yml index 969cac759..1b025678e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,7 +9,8 @@ issues: text: "Potential HTTP request made with variable url" linters: - gosec - - path: .go - text: "Use of weak random number generator" + # TODO Remove it. + - path: split_client.go + text: "SA1019:" linters: - - gosec + - staticcheck diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..6db4cfd21 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,18 @@ +# BR (Backup and Restore) Change Log +All notable changes to this project are documented in this file. +See also, +- [TiDB Changelog](https://github.com/pingcap/tidb/blob/master/CHANGELOG.md), +- [TiKV Changelog](https://github.com/tikv/tikv/blob/master/CHANGELOG.md), +- [PD Changelog](https://github.com/pingcap/pd/blob/master/CHANGELOG.md). + +## [3.1.0-beta.1] - 2020-01-10 + +- Fix the inaccurate backup progress information [#127](https://github.com/pingcap/br/pull/127) +- Improve the performance of splitting Regions [#122](https://github.com/pingcap/br/pull/122) +- Add the backup and restore feature for partitioned tables [#137](https://github.com/pingcap/br/pull/137) +- Add the feature of automatically scheduling PD schedulers [#123](https://github.com/pingcap/br/pull/123) +- Fix the issue that data is overwritten after non `PKIsHandle` tables are restored [#139](https://github.com/pingcap/br/pull/139) + +## [3.1.0-beta] - 2019-12-20 + +Initial release of the distributed backup and restore tool diff --git a/Makefile b/Makefile index a03cedc54..839a27b9e 100644 --- a/Makefile +++ b/Makefile @@ -26,6 +26,8 @@ build_for_integration_test: -o bin/br.test # build key locker GO111MODULE=on go build -race -o bin/locker tests/br_key_locked/*.go + # build gc + GO111MODULE=on go build -race -o bin/gc tests/br_z_gc_safepoint/*.go test: GO111MODULE=on go test -race -tags leak ./... diff --git a/cmd/backup.go b/cmd/backup.go index 34e1a8970..39aa4fd28 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -1,165 +1,21 @@ package cmd import ( - "context" - - "github.com/pingcap/errors" - "github.com/pingcap/log" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/session" "github.com/spf13/cobra" - "github.com/spf13/pflag" - "github.com/pingcap/br/pkg/backup" - "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) -const ( - flagBackupTimeago = "timeago" - flagBackupRateLimit = "ratelimit" - flagBackupConcurrency = "concurrency" - flagBackupChecksum = "checksum" - flagLastBackupTS = "lastbackupts" -) - -func defineBackupFlags(flagSet *pflag.FlagSet) { - flagSet.StringP( - flagBackupTimeago, "", "", - "The history version of the backup task, e.g. 1m, 1h. Do not exceed GCSafePoint") - flagSet.Uint64P( - flagBackupRateLimit, "", 0, "The rate limit of the backup task, MB/s per node") - flagSet.Uint32P( - flagBackupConcurrency, "", 4, "The size of thread pool on each node that execute the backup task") - flagSet.BoolP(flagBackupChecksum, "", true, - "Run checksum after backup") - flagSet.Uint64P(flagLastBackupTS, "", 0, "the last time backup ts") - _ = flagSet.MarkHidden(flagLastBackupTS) -} - -func runBackup(flagSet *pflag.FlagSet, cmdName, db, table string) error { - ctx, cancel := context.WithCancel(defaultContext) - defer cancel() - - mgr, err := GetDefaultMgr() - if err != nil { - return err - } - defer mgr.Close() - - timeago, err := flagSet.GetString(flagBackupTimeago) - if err != nil { - return err - } - - ratelimit, err := flagSet.GetUint64(flagBackupRateLimit) - if err != nil { - return err - } - - concurrency, err := flagSet.GetUint32(flagBackupConcurrency) - if err != nil { - return err - } - if concurrency == 0 { - err = errors.New("at least one thread required") - return err - } - - checksum, err := flagSet.GetBool(flagBackupChecksum) - if err != nil { - return err - } - - lastBackupTS, err := flagSet.GetUint64(flagLastBackupTS) - if err != nil { - return nil - } - - u, err := storage.ParseBackendFromFlags(flagSet, FlagStorage) - if err != nil { - return err - } - - client, err := backup.NewBackupClient(ctx, mgr) - if err != nil { - return nil - } - - err = client.SetStorage(ctx, u) - if err != nil { - return err - } - - backupTS, err := client.GetTS(ctx, timeago) - if err != nil { - return err - } - - defer summary.Summary(cmdName) - - ranges, backupSchemas, err := backup.BuildBackupRangeAndSchema( - mgr.GetDomain(), mgr.GetTiKV(), backupTS, db, table) - if err != nil { - return err - } - - // The number of regions need to backup - approximateRegions := 0 - for _, r := range ranges { - var regionCount int - regionCount, err = mgr.GetRegionCount(ctx, r.StartKey, r.EndKey) - if err != nil { - return err - } - approximateRegions += regionCount - } - - summary.CollectInt("backup total regions", approximateRegions) - // Backup - // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( - ctx, cmdName, int64(approximateRegions), !HasLogFile()) - err = client.BackupRanges( - ctx, ranges, lastBackupTS, backupTS, ratelimit, concurrency, updateCh) - if err != nil { - return err - } - // Backup has finished - close(updateCh) - - // Checksum - backupSchemasConcurrency := backup.DefaultSchemaConcurrency - if backupSchemas.Len() < backupSchemasConcurrency { - backupSchemasConcurrency = backupSchemas.Len() - } - updateCh = utils.StartProgress( - ctx, "Checksum", int64(backupSchemas.Len()), !HasLogFile()) - backupSchemas.SetSkipChecksum(!checksum) - backupSchemas.Start( - ctx, mgr.GetTiKV(), backupTS, uint(backupSchemasConcurrency), updateCh) - - err = client.CompleteMeta(backupSchemas) - if err != nil { - return err - } - - valid, err := client.FastChecksum() - if err != nil { - return err - } - if !valid { - log.Error("backup FastChecksum failed!") - } - // Checksum has finished - close(updateCh) - - err = client.SaveBackupMeta(ctx) - if err != nil { +func runBackupCommand(command *cobra.Command, cmdName string) error { + cfg := task.BackupConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { return err } - return nil + return task.RunBackup(GetDefaultContext(), cmdName, &cfg) } // NewBackupCommand return a full backup subcommand. @@ -189,7 +45,7 @@ func NewBackupCommand() *cobra.Command { newTableBackupCommand(), ) - defineBackupFlags(command.PersistentFlags()) + task.DefineBackupFlags(command.PersistentFlags()) return command } @@ -200,7 +56,7 @@ func newFullBackupCommand() *cobra.Command { Short: "backup all database", RunE: func(command *cobra.Command, _ []string) error { // empty db/table means full backup. - return runBackup(command.Flags(), "Full backup", "", "") + return runBackupCommand(command, "Full backup") }, } return command @@ -212,19 +68,10 @@ func newDbBackupCommand() *cobra.Command { Use: "db", Short: "backup a database", RunE: func(command *cobra.Command, _ []string) error { - db, err := command.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.Errorf("empty database name is not allowed") - } - return runBackup(command.Flags(), "Database backup", db, "") + return runBackupCommand(command, "Database backup") }, } - command.Flags().StringP(flagDatabase, "", "", "backup a table in the specific db") - _ = command.MarkFlagRequired(flagDatabase) - + task.DefineDatabaseFlags(command) return command } @@ -234,26 +81,9 @@ func newTableBackupCommand() *cobra.Command { Use: "table", Short: "backup a table", RunE: func(command *cobra.Command, _ []string) error { - db, err := command.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.Errorf("empty database name is not allowed") - } - table, err := command.Flags().GetString(flagTable) - if err != nil { - return err - } - if len(table) == 0 { - return errors.Errorf("empty table name is not allowed") - } - return runBackup(command.Flags(), "Table backup", db, table) + return runBackupCommand(command, "Table backup") }, } - command.Flags().StringP(flagDatabase, "", "", "backup a table in the specific db") - command.Flags().StringP(flagTable, "t", "", "backup the specific table") - _ = command.MarkFlagRequired(flagDatabase) - _ = command.MarkFlagRequired(flagTable) + task.DefineTableFlags(command) return command } diff --git a/cmd/cmd.go b/cmd/cmd.go index e97adec43..fdadaa6f8 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -2,46 +2,28 @@ package cmd import ( "context" - "fmt" "net/http" "net/http/pprof" "sync" "sync/atomic" - "github.com/pingcap/errors" "github.com/pingcap/log" - "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util/logutil" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "go.uber.org/zap" - "github.com/pingcap/br/pkg/conn" - "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/task" + "github.com/pingcap/br/pkg/utils" ) var ( initOnce = sync.Once{} defaultContext context.Context - pdAddress string hasLogFile uint64 - - connOnce = sync.Once{} - defaultMgr *conn.Mgr ) const ( - // FlagPD is the name of url flag. - FlagPD = "pd" - // FlagCA is the name of CA flag. - FlagCA = "ca" - // FlagCert is the name of cert flag. - FlagCert = "cert" - // FlagKey is the name of key flag. - FlagKey = "key" - // FlagStorage is the name of storage flag. - FlagStorage = "storage" // FlagLogLevel is the name of log-level flag. FlagLogLevel = "log-level" // FlagLogFile is the name of log-file flag. @@ -51,26 +33,23 @@ const ( // FlagSlowLogFile is the name of slow-log-file flag. FlagSlowLogFile = "slow-log-file" - flagDatabase = "db" - - flagTable = "table" + flagVersion = "version" + flagVersionShort = "V" ) // AddFlags adds flags to the given cmd. func AddFlags(cmd *cobra.Command) { - cmd.PersistentFlags().StringP(FlagPD, "u", "127.0.0.1:2379", "PD address") - cmd.PersistentFlags().String(FlagCA, "", "CA certificate path for TLS connection") - cmd.PersistentFlags().String(FlagCert, "", "Certificate path for TLS connection") - cmd.PersistentFlags().String(FlagKey, "", "Private key path for TLS connection") - cmd.PersistentFlags().StringP(FlagStorage, "s", "", - `specify the url where backup storage, eg, "local:///path/to/save"`) + cmd.Version = utils.BRInfo() + cmd.Flags().BoolP(flagVersion, flagVersionShort, false, "Display version information about BR") + cmd.SetVersionTemplate("{{printf \"%s\" .Version}}\n") + cmd.PersistentFlags().StringP(FlagLogLevel, "L", "info", "Set the log level") cmd.PersistentFlags().String(FlagLogFile, "", "Set the log file path. If not set, logs will output to stdout") cmd.PersistentFlags().String(FlagStatusAddr, "", "Set the HTTP listening address for the status report service. Set to empty string to disable") - storage.DefineFlags(cmd.PersistentFlags()) + task.DefineCommonFlags(cmd.PersistentFlags()) cmd.PersistentFlags().StringP(FlagSlowLogFile, "", "", "Set the slow log file path. If not set, discard slow logs") @@ -133,12 +112,6 @@ func Init(cmd *cobra.Command) (err error) { } } }() - // Set the PD server address. - pdAddress, e = cmd.Flags().GetString(FlagPD) - if e != nil { - err = e - return - } }) return err } @@ -148,30 +121,6 @@ func HasLogFile() bool { return atomic.LoadUint64(&hasLogFile) != uint64(0) } -// GetDefaultMgr returns the default mgr for command line usage. -func GetDefaultMgr() (*conn.Mgr, error) { - if pdAddress == "" { - return nil, errors.New("pd address can not be empty") - } - - // Lazy initialize and defaultMgr - var err error - connOnce.Do(func() { - var storage kv.Storage - storage, err = tikv.Driver{}.Open( - // Disable GC because TiDB enables GC already. - fmt.Sprintf("tikv://%s?disableGC=true", pdAddress)) - if err != nil { - return - } - defaultMgr, err = conn.NewMgr(defaultContext, pdAddress, storage.(tikv.Storage)) - }) - if err != nil { - return nil, err - } - return defaultMgr, nil -} - // SetDefaultContext sets the default context for command line usage. func SetDefaultContext(ctx context.Context) { defaultContext = ctx diff --git a/cmd/restore.go b/cmd/restore.go index eee65ba86..2dfec9846 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -1,33 +1,20 @@ package cmd import ( - "context" - "strings" - - "github.com/gogo/protobuf/proto" - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/backup" - "github.com/pingcap/log" "github.com/pingcap/tidb/session" "github.com/spf13/cobra" - flag "github.com/spf13/pflag" - "go.uber.org/zap" - "github.com/pingcap/br/pkg/conn" - "github.com/pingcap/br/pkg/restore" - "github.com/pingcap/br/pkg/storage" "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) -var schedulers = map[string]struct{}{ - "balance-leader-scheduler": {}, - "balance-hot-region-scheduler": {}, - "balance-region-scheduler": {}, - - "shuffle-leader-scheduler": {}, - "shuffle-region-scheduler": {}, - "shuffle-hot-region-scheduler": {}, +func runRestoreCommand(command *cobra.Command, cmdName string) error { + cfg := task.RestoreConfig{Config: task.Config{LogProgress: HasLogFile()}} + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + return err + } + return task.RunRestore(GetDefaultContext(), cmdName, &cfg) } // NewRestoreCommand returns a restore subcommand @@ -54,166 +41,17 @@ func NewRestoreCommand() *cobra.Command { newDbRestoreCommand(), newTableRestoreCommand(), ) - - command.PersistentFlags().Uint("concurrency", 128, - "The size of thread pool that execute the restore task") - command.PersistentFlags().Uint64("ratelimit", 0, - "The rate limit of the restore task, MB/s per node. Set to 0 for unlimited speed.") - command.PersistentFlags().BoolP("checksum", "", true, - "Run checksum after restore") - command.PersistentFlags().BoolP("online", "", false, - "Whether online when restore") - // TODO remove hidden flag if it's stable - _ = command.PersistentFlags().MarkHidden("online") + task.DefineRestoreFlags(command.PersistentFlags()) return command } -func runRestore(flagSet *flag.FlagSet, cmdName, dbName, tableName string) error { - ctx, cancel := context.WithCancel(GetDefaultContext()) - defer cancel() - - mgr, err := GetDefaultMgr() - if err != nil { - return err - } - defer mgr.Close() - - client, err := restore.NewRestoreClient( - ctx, mgr.GetPDClient(), mgr.GetTiKV()) - if err != nil { - return errors.Trace(err) - } - defer client.Close() - err = initRestoreClient(ctx, client, flagSet) - if err != nil { - return errors.Trace(err) - } - - files := make([]*backup.File, 0) - tables := make([]*utils.Table, 0) - - defer summary.Summary(cmdName) - - switch { - case len(dbName) == 0 && len(tableName) == 0: - // full restore - for _, db := range client.GetDatabases() { - err = client.CreateDatabase(db.Schema) - if err != nil { - return errors.Trace(err) - } - for _, table := range db.Tables { - files = append(files, table.Files...) - } - tables = append(tables, db.Tables...) - } - case len(dbName) != 0 && len(tableName) == 0: - // database restore - db := client.GetDatabase(dbName) - err = client.CreateDatabase(db.Schema) - if err != nil { - return errors.Trace(err) - } - for _, table := range db.Tables { - files = append(files, table.Files...) - } - tables = db.Tables - case len(dbName) != 0 && len(tableName) != 0: - // table restore - db := client.GetDatabase(dbName) - err = client.CreateDatabase(db.Schema) - if err != nil { - return errors.Trace(err) - } - table := db.GetTable(tableName) - files = table.Files - tables = append(tables, table) - default: - return errors.New("must set db when table was set") - } - var newTS uint64 - if client.IsIncremental() { - newTS, err = client.GetTS(ctx) - if err != nil { - return err - } - } - summary.CollectInt("restore files", len(files)) - rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) - if err != nil { - return errors.Trace(err) - } - ranges, err := restore.ValidateFileRanges(files, rewriteRules) - if err != nil { - return err - } - summary.CollectInt("restore ranges", len(ranges)) - - // Redirect to log if there is no log file to avoid unreadable output. - updateCh := utils.StartProgress( - ctx, - cmdName, - // Split/Scatter + Download/Ingest - int64(len(ranges)+len(files)), - !HasLogFile()) - - err = restore.SplitRanges(ctx, client, ranges, rewriteRules, updateCh) - if err != nil { - log.Error("split regions failed", zap.Error(err)) - return errors.Trace(err) - } - - if !client.IsIncremental() { - var pdAddr string - pdAddr, err = flagSet.GetString(FlagPD) - if err != nil { - return errors.Trace(err) - } - pdAddrs := strings.Split(pdAddr, ",") - err = client.ResetTS(pdAddrs) - if err != nil { - log.Error("reset pd TS failed", zap.Error(err)) - return errors.Trace(err) - } - } - - removedSchedulers, err := RestorePrepareWork(ctx, client, mgr) - if err != nil { - return errors.Trace(err) - } - - err = client.RestoreAll(rewriteRules, updateCh) - if err != nil { - return errors.Trace(err) - } - - err = RestorePostWork(ctx, client, mgr, removedSchedulers) - if err != nil { - return errors.Trace(err) - } - // Restore has finished. - close(updateCh) - - // Checksum - updateCh = utils.StartProgress( - ctx, "Checksum", int64(len(newTables)), !HasLogFile()) - err = client.ValidateChecksum( - ctx, mgr.GetTiKV().GetClient(), tables, newTables, updateCh) - if err != nil { - return err - } - close(updateCh) - - return nil -} - func newFullRestoreCommand() *cobra.Command { command := &cobra.Command{ Use: "full", Short: "restore all tables", RunE: func(cmd *cobra.Command, _ []string) error { - return runRestore(cmd.Flags(), "Full Restore", "", "") + return runRestoreCommand(cmd, "Full restore") }, } return command @@ -224,18 +62,10 @@ func newDbRestoreCommand() *cobra.Command { Use: "db", Short: "restore tables in a database", RunE: func(cmd *cobra.Command, _ []string) error { - db, err := cmd.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.New("empty database name is not allowed") - } - return runRestore(cmd.Flags(), "Database Restore", db, "") + return runRestoreCommand(cmd, "Database restore") }, } - command.Flags().String(flagDatabase, "", "database name") - _ = command.MarkFlagRequired(flagDatabase) + task.DefineDatabaseFlags(command) return command } @@ -244,129 +74,9 @@ func newTableRestoreCommand() *cobra.Command { Use: "table", Short: "restore a table", RunE: func(cmd *cobra.Command, _ []string) error { - db, err := cmd.Flags().GetString(flagDatabase) - if err != nil { - return err - } - if len(db) == 0 { - return errors.New("empty database name is not allowed") - } - table, err := cmd.Flags().GetString(flagTable) - if err != nil { - return err - } - if len(table) == 0 { - return errors.New("empty table name is not allowed") - } - return runRestore(cmd.Flags(), "Table Restore", db, table) + return runRestoreCommand(cmd, "Table restore") }, } - - command.Flags().String(flagDatabase, "", "database name") - command.Flags().String(flagTable, "", "table name") - - _ = command.MarkFlagRequired(flagDatabase) - _ = command.MarkFlagRequired(flagTable) + task.DefineTableFlags(command) return command } - -func initRestoreClient(ctx context.Context, client *restore.Client, flagSet *flag.FlagSet) error { - u, err := storage.ParseBackendFromFlags(flagSet, FlagStorage) - if err != nil { - return err - } - rateLimit, err := flagSet.GetUint64("ratelimit") - if err != nil { - return err - } - client.SetRateLimit(rateLimit * utils.MB) - s, err := storage.Create(ctx, u) - if err != nil { - return errors.Trace(err) - } - metaData, err := s.Read(ctx, utils.MetaFile) - if err != nil { - return errors.Trace(err) - } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(metaData, backupMeta) - if err != nil { - return errors.Trace(err) - } - err = client.InitBackupMeta(backupMeta, u) - if err != nil { - return errors.Trace(err) - } - - concurrency, err := flagSet.GetUint("concurrency") - if err != nil { - return err - } - client.SetConcurrency(concurrency) - - isOnline, err := flagSet.GetBool("online") - if err != nil { - return err - } - if isOnline { - client.EnableOnline() - } - - return nil -} - -// RestorePrepareWork execute some prepare work before restore -func RestorePrepareWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr) ([]string, error) { - if client.IsOnline() { - return nil, nil - } - err := client.SwitchToImportMode(ctx) - if err != nil { - return nil, errors.Trace(err) - } - existSchedulers, err := mgr.ListSchedulers(ctx) - if err != nil { - return nil, errors.Trace(err) - } - needRemoveSchedulers := make([]string, 0, len(existSchedulers)) - for _, s := range existSchedulers { - if _, ok := schedulers[s]; ok { - needRemoveSchedulers = append(needRemoveSchedulers, s) - } - } - return removePDLeaderScheduler(ctx, mgr, needRemoveSchedulers) -} - -func removePDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, existSchedulers []string) ([]string, error) { - removedSchedulers := make([]string, 0, len(existSchedulers)) - for _, scheduler := range existSchedulers { - err := mgr.RemoveScheduler(ctx, scheduler) - if err != nil { - return nil, err - } - removedSchedulers = append(removedSchedulers, scheduler) - } - return removedSchedulers, nil -} - -// RestorePostWork execute some post work after restore -func RestorePostWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr, removedSchedulers []string) error { - if client.IsOnline() { - return nil - } - err := client.SwitchToNormalMode(ctx) - if err != nil { - return errors.Trace(err) - } - return addPDLeaderScheduler(ctx, mgr, removedSchedulers) -} - -func addPDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, removedSchedulers []string) error { - for _, scheduler := range removedSchedulers { - err := mgr.AddScheduler(ctx, scheduler) - if err != nil { - return err - } - } - return nil -} diff --git a/cmd/validate.go b/cmd/validate.go index dd1e11fb0..559cb9983 100644 --- a/cmd/validate.go +++ b/cmd/validate.go @@ -15,12 +15,11 @@ import ( "github.com/pingcap/log" "github.com/pingcap/parser/model" "github.com/pingcap/pd/pkg/mock/mockid" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" "github.com/spf13/cobra" "go.uber.org/zap" "github.com/pingcap/br/pkg/restore" - "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" ) @@ -55,24 +54,14 @@ func newCheckSumCommand() *cobra.Command { ctx, cancel := context.WithCancel(GetDefaultContext()) defer cancel() - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { + var cfg task.Config + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { return err } - s, err := storage.Create(ctx, u) - if err != nil { - return errors.Trace(err) - } - - metaData, err := s.Read(ctx, utils.MetaFile) - if err != nil { - return errors.Trace(err) - } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(metaData, backupMeta) + _, s, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) if err != nil { - return errors.Trace(err) + return err } dbs, err := utils.LoadBackupTables(backupMeta) @@ -153,24 +142,14 @@ func newBackupMetaCommand() *cobra.Command { if err != nil { return err } - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { - return err - } - s, err := storage.Create(ctx, u) - if err != nil { - log.Error("create storage failed", zap.Error(err)) - return errors.Trace(err) - } - data, err := s.Read(ctx, utils.MetaFile) - if err != nil { - log.Error("load backupmeta failed", zap.Error(err)) + + var cfg task.Config + if err = cfg.ParseFromFlags(cmd.Flags()); err != nil { return err } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(data, backupMeta) + _, _, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) if err != nil { - log.Error("parse backupmeta failed", zap.Error(err)) + log.Error("read backupmeta failed", zap.Error(err)) return err } dbs, err := utils.LoadBackupTables(backupMeta) @@ -187,15 +166,15 @@ func newBackupMetaCommand() *cobra.Command { tables = append(tables, db.Tables...) } // Check if the ranges of files overlapped - rangeTree := restore_util.NewRangeTree() + rangeTree := restore.NewRangeTree() for _, file := range files { - if out := rangeTree.InsertRange(restore_util.Range{ + if out := rangeTree.InsertRange(restore.Range{ StartKey: file.GetStartKey(), EndKey: file.GetEndKey(), }); out != nil { log.Error( "file ranges overlapped", - zap.Stringer("out", out.(*restore_util.Range)), + zap.Stringer("out", out.(*restore.Range)), zap.Stringer("file", file), ) } @@ -206,7 +185,7 @@ func newBackupMetaCommand() *cobra.Command { for offset := uint64(0); offset < tableIDOffset; offset++ { _, _ = tableIDAllocator.Alloc() // Ignore error } - rewriteRules := &restore_util.RewriteRules{ + rewriteRules := &restore.RewriteRules{ Table: make([]*import_sstpb.RewriteRule, 0), Data: make([]*import_sstpb.RewriteRule, 0), } @@ -242,8 +221,7 @@ func newBackupMetaCommand() *cobra.Command { return nil }, } - command.Flags().String("path", "", "the path of backupmeta") - command.Flags().Uint64P("offset", "", 0, "the offset of table id alloctor") + command.Flags().Uint64("offset", 0, "the offset of table id alloctor") command.Hidden = true return command } @@ -255,24 +233,16 @@ func decodeBackupMetaCommand() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithCancel(GetDefaultContext()) defer cancel() - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { - return errors.Trace(err) - } - s, err := storage.Create(ctx, u) - if err != nil { - return errors.Trace(err) + + var cfg task.Config + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err } - metaData, err := s.Read(ctx, utils.MetaFile) + _, s, backupMeta, err := task.ReadBackupMeta(ctx, &cfg) if err != nil { - return errors.Trace(err) + return err } - backupMeta := &backup.BackupMeta{} - err = proto.Unmarshal(metaData, backupMeta) - if err != nil { - return errors.Trace(err) - } backupMetaJSON, err := json.Marshal(backupMeta) if err != nil { return errors.Trace(err) @@ -310,14 +280,16 @@ func encodeBackupMetaCommand() *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithCancel(GetDefaultContext()) defer cancel() - u, err := storage.ParseBackendFromFlags(cmd.Flags(), FlagStorage) - if err != nil { - return errors.Trace(err) + + var cfg task.Config + if err := cfg.ParseFromFlags(cmd.Flags()); err != nil { + return err } - s, err := storage.Create(ctx, u) + _, s, err := task.GetStorage(ctx, &cfg) if err != nil { - return errors.Trace(err) + return err } + metaData, err := s.Read(ctx, utils.MetaJSONFile) if err != nil { return errors.Trace(err) diff --git a/cmd/version.go b/cmd/version.go deleted file mode 100644 index af4f7d386..000000000 --- a/cmd/version.go +++ /dev/null @@ -1,20 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - - "github.com/pingcap/br/pkg/utils" -) - -// NewVersionCommand returns a restore subcommand -func NewVersionCommand() *cobra.Command { - bp := &cobra.Command{ - Use: "version", - Short: "output version information", - Args: cobra.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - utils.PrintBRInfo() - }, - } - return bp -} diff --git a/go.mod b/go.mod index da0689b44..1761ada78 100644 --- a/go.mod +++ b/go.mod @@ -6,30 +6,39 @@ require ( cloud.google.com/go/storage v1.4.0 github.com/aws/aws-sdk-go v1.26.1 github.com/cheggaaa/pb/v3 v3.0.1 + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect + github.com/fatih/color v1.9.0 // indirect github.com/fsouza/fake-gcs-server v1.15.0 github.com/go-sql-driver/mysql v1.4.1 github.com/gogo/protobuf v1.3.1 github.com/google/btree v1.0.0 github.com/google/uuid v1.1.1 + github.com/mattn/go-runewidth v0.0.7 // indirect github.com/onsi/ginkgo v1.10.3 // indirect github.com/onsi/gomega v1.7.1 // indirect - github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 - github.com/pingcap/errors v0.11.4 - github.com/pingcap/kvproto v0.0.0-20191230111320-549d10f19d46 - github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd - github.com/pingcap/parser v0.0.0-20191205054626-288fe5207ce6 - github.com/pingcap/pd v1.1.0-beta.0.20191115131715-6b7dc037010e - github.com/pingcap/tidb v1.1.0-beta.0.20191205065313-6083b21f986b - github.com/pingcap/tidb-tools v3.1.0-beta.0.20191230034204-f90021ce2de1+incompatible + github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 + github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 + github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 // indirect + github.com/pingcap/kvproto v0.0.0-20200214082216-7ccc45d0063f + github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 + github.com/pingcap/parser v0.0.0-20200213042211-e357ed5f237b + github.com/pingcap/pd v1.1.0-beta.0.20200213133706-fbbe75e180e6 + github.com/pingcap/tidb v1.1.0-beta.0.20200218111531-28c9efc12b19 + github.com/pingcap/tidb-tools v4.0.0-beta+incompatible github.com/pingcap/tipb v0.0.0-20191126033718-169898888b24 - github.com/prometheus/client_golang v1.1.0 + github.com/prometheus/client_golang v1.0.0 + github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect github.com/sirupsen/logrus v1.4.2 github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.3 go.opencensus.io v0.22.2 // indirect - go.uber.org/zap v1.10.0 - golang.org/x/net v0.0.0-20191011234655-491137f69257 // indirect + go.uber.org/atomic v1.5.1 // indirect + go.uber.org/zap v1.13.0 + golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 // indirect google.golang.org/api v0.14.0 - google.golang.org/grpc v1.24.0 + google.golang.org/grpc v1.25.1 + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect ) diff --git a/go.sum b/go.sum index 2c77de885..485a32d07 100644 --- a/go.sum +++ b/go.sum @@ -20,46 +20,58 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY= -github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/aws/aws-sdk-go v1.26.1 h1:JGQggXhOiNJIqsmbYUl3cYtJZUffeOWlHtxfzGK7WPI= github.com/aws/aws-sdk-go v1.26.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d h1:rQlvB2AYWme2bIB18r/SipGiMEVJYE9U0z+MGoU/LtQ= github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cheggaaa/pb/v3 v3.0.1 h1:m0BngUk2LuSRYdx4fujDKNRXNDpbNCfptPfVT2m6OJY= github.com/cheggaaa/pb/v3 v3.0.1/go.mod h1:SqqeMF/pMOIu3xgGoxtPYhMNQP258xE4x/XRTYua+KU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 h1:3jFq2xL4ZajGK4aZY8jz+DAF0FHjI51BXjjSwCzS1Dk= github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 h1:hxuZop6tSoOi0sxFzoGGYdRqNrPubyaIf9KoBG9tPiE= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 h1:LpMLYGyy67BoAFGda1NeOBQwqlv7nUXpm+rIVHGxZZ4= +github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -75,29 +87,60 @@ github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.15.0 h1:ss/ztlt10Y64A5qslmxZKsiqW/i28t5DkRtv6qSFaLQ= github.com/fsouza/fake-gcs-server v1.15.0/go.mod h1:HNxAJ/+FY/XSsxuwz8iIYdp2GtMmPbJ8WQjjGMxd6Qk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/gin-contrib/cors v1.3.0/go.mod h1:artPvLlhkF7oG06nK8v3U8TNz6IeX+w1uzCSEId5/Vc= +github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w= +github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.6/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -106,19 +149,19 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -142,12 +185,6 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -158,11 +195,13 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.5.1 h1:3scN4iuXkNOyP98jF55Lv8a9j1o/IwvnDIZ0LHJK1nk= -github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -172,42 +211,64 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5i github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -225,9 +286,11 @@ github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3 github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8= github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= @@ -236,34 +299,42 @@ github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKw github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.3.0 h1:e5+lF2E4Y2WCIxBefVowBuB0iHrUH4HZ8q+6mGF7fJc= github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g= +github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8= +github.com/pingcap-incubator/tidb-dashboard v0.0.0-20200110133619-1c1c65dd8750/go.mod h1:Yx2Ix+adNvCO8F3tHgZmgt9sJhOjJy/B4CW/6filV4w= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= +github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4 h1:iRtOAQ6FXkY/BGvst3CDfTva4nTqh6CL8WXvanLdbu0= +github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= +github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12 h1:rfD9v3+ppLPzoQBgZev0qYCpegrwyFx/BUpkApEiKdY= +github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9 h1:KH4f4Si9XK6/IW50HtoaiLIFHGkapOM6w83za47UYik= github.com/pingcap/errcode v0.0.0-20180921232412-a1a7271709d9/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c h1:hvQd3aOLKLF7xvRV6DzvPkKY4QXzfVbjU1BhW0d9yL8= -github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= -github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011 h1:58naV4XMEqm0hl9LcYo6cZoGBGiLtefMQMF/vo3XLgQ= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d h1:F8vp38kTAckN+v8Jlc98uMBvKIzr1a+UhnLyVYn8Q5Q= +github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= -github.com/pingcap/kvproto v0.0.0-20191018025622-fbf07f9804da/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20191113105027-4f292e1801d8 h1:P9jGgwVkLHlbEGtgGKrY0k/yy6N8L8Gdj8dliFswllU= -github.com/pingcap/kvproto v0.0.0-20191113105027-4f292e1801d8/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/kvproto v0.0.0-20191230111320-549d10f19d46 h1:pY8O95L5XCLdX6HP3eo1SlckJrbhmOx+3l6T5B6gCwo= -github.com/pingcap/kvproto v0.0.0-20191230111320-549d10f19d46/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= -github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd h1:hWDol43WY5PGhsh3+8794bFHY1bPrmu6bTalpssCrGg= -github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= -github.com/pingcap/parser v0.0.0-20191205054626-288fe5207ce6 h1:KrJorS9gGYMhsQjENNWAeB5ho28xbowZ74pfJWkOmFc= -github.com/pingcap/parser v0.0.0-20191205054626-288fe5207ce6/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= -github.com/pingcap/pd v1.1.0-beta.0.20191115131715-6b7dc037010e h1:6En0+9KDJ6CjkuzbGwHgo+P1YjEt22jn+nujl6cSAXA= -github.com/pingcap/pd v1.1.0-beta.0.20191115131715-6b7dc037010e/go.mod h1:ribyi6AyFNOElWgb6VnUsky4JFciEoGApSUzIcJxGSI= -github.com/pingcap/tidb v1.1.0-beta.0.20191205065313-6083b21f986b h1:Up6nsW7HGjXQUovsgsH6DbWkpyGI28oQPV8/JdNSnPA= -github.com/pingcap/tidb v1.1.0-beta.0.20191205065313-6083b21f986b/go.mod h1:DpKqioE0vAyiX9wFR8tG/HCnn0jj4/ZPMxEqLgUUMAI= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E= +github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= +github.com/pingcap/kvproto v0.0.0-20200213074014-83e827908584 h1:DhQfXNn9m36b2/4zUfPHDDR6CwS2VONbfPC4s+LMVj0= +github.com/pingcap/kvproto v0.0.0-20200213074014-83e827908584/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20200214082216-7ccc45d0063f h1:wi4TNMBfsgiMsOlTSHBq4JKFViabIA1W0d+owiLtp70= +github.com/pingcap/kvproto v0.0.0-20200214082216-7ccc45d0063f/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= +github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/parser v0.0.0-20200213042211-e357ed5f237b h1:oKql7mOA71N7NxMn3MHtYcxntXrFxNPDMDalF/dW3iM= +github.com/pingcap/parser v0.0.0-20200213042211-e357ed5f237b/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/pd v1.1.0-beta.0.20200213133706-fbbe75e180e6 h1:6Ut7/Gg6nO2tkrufRncFsI4WnYsmrLI0DN8xcGGOFL8= +github.com/pingcap/pd v1.1.0-beta.0.20200213133706-fbbe75e180e6/go.mod h1:zezAKmc5aqNUREQdxxeP4WuAx22FlPQL/p7xFYKoThU= +github.com/pingcap/tidb v1.1.0-beta.0.20200218111531-28c9efc12b19 h1:0BR+dr+e+LK7dCGpMMyY7pK5KccTl1JxLvS7flQZbOo= +github.com/pingcap/tidb v1.1.0-beta.0.20200218111531-28c9efc12b19/go.mod h1:NtZod8uyqDhHvo5Y85y2SI6rjPcfsDdTkq/Rs4Hkrn0= github.com/pingcap/tidb-tools v3.0.6-0.20191119150227-ff0a3c6e5763+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tidb-tools v3.1.0-beta.0.20191230034204-f90021ce2de1+incompatible h1:Hci3A0Xx5J6sGEB1DwJnaAeFFo2+ztNlAkUnWcdsVNE= -github.com/pingcap/tidb-tools v3.1.0-beta.0.20191230034204-f90021ce2de1+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= +github.com/pingcap/tidb-tools v4.0.0-beta+incompatible h1:+XJdcVLCM8GDgXiMS6lFV59N3XPVOqtNHeWNLVrb2pg= +github.com/pingcap/tidb-tools v4.0.0-beta+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20191126033718-169898888b24 h1:9cdSUluc+Q4yGzGg8AeG46/e8Rw7pJ5jJz9Y4QRNvKE= github.com/pingcap/tipb v0.0.0-20191126033718-169898888b24/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -271,39 +342,33 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7 h1:FUL3b97ZY2EPqg2NbXKuMHs5pXJB9hjj1fDHnF2vl28= github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v2.18.10+incompatible h1:cy84jW6EVRPa5g9HAHrlbxMSIjBhDSX0OFYyMYminYs= -github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v2.19.10+incompatible h1:lA4Pi29JEVIQIgATSeftHSY0rMGI9CLrl2ZvDLiahto= +github.com/shirou/gopsutil v2.19.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca h1:3fECS8atRjByijiI8yYiuwLwQ2ZxXobW7ua/8GRB3pI= github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= @@ -312,11 +377,14 @@ github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= @@ -324,12 +392,20 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI= +github.com/swaggo/cli v1.20.0/go.mod h1:7jzoQluD0EWMc0rxx6kkPoRNfYNHkNJI/NokjEwJiwM= +github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E= +github.com/swaggo/gin-swagger v1.2.0/go.mod h1:qlH2+W7zXGZkczuL+r2nEBR2JTT+/lX05Nn6vPhc7OI= +github.com/swaggo/http-swagger v0.0.0-20200103000832-0e9263c4b516/go.mod h1:O1lAbCgAAX/KZ80LM/OXwtWFI/5TvZlwxSg8Cq08PV0= +github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= +github.com/swaggo/swag v1.6.3/go.mod h1:wcc83tB4Mb2aNiL/HP4MFeQdpHUrca+Rp/DRNgWAUio= +github.com/swaggo/swag v1.6.4/go.mod h1:3LVbAPI0ekF7sEPuA4XcVsSeVLAxx3hAPD3+O6b1vL4= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d h1:4J9HCZVpvDmj2tiKGSTUnb3Ok/9CEQb9oqu9LHKQQpc= github.com/syndtr/goleveldb v0.0.0-20180815032940-ae2bd5eed72d/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ= @@ -343,44 +419,59 @@ github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1 github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo= github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.2 h1:JON3E2/GPW2iDNGoSAusl1KDf5TRQ8k8q7Tp097pZGs= -github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43 h1:BasDe+IErOQKrMVXab7UayvSlIpiyGwRvuX3EKYY7UA= -github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= +github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/unrolled/render v0.0.0-20171102162132-65450fb6b2d3/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= -github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20= -github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yookoala/realpath v1.0.0 h1:7OA9pj4FZd+oZDsyvXWQvjn5oBdcHRTV44PpdMSuImQ= github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf h1:rmttwKPEgG/l4UscTDYtaJgeUsedKPKSyFfNQLI6q+I= -go.etcd.io/etcd v0.0.0-20190320044326-77d4b742cdbf/go.mod h1:KSGwdbiFchh5KIC9My2+ZVl5/3ANcwohw50dpPwa2cw= +go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc= +go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4= +go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190909091759-094676da4a83 h1:mgAKeshyNqWKdENOnQsg+8dRTwZFIwFaO3HNl52sweA= -golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876 h1:sKJQZMuxjOAR/Uo2LBfU90onWEf1dF4C+0hPJCc9Mpc= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -389,7 +480,6 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -398,16 +488,19 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -415,11 +508,14 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190909003024-a7b16738d86b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191011234655-491137f69257 h1:ry8e2D+cwaV6hk7lb3aRTjjZo24shrbK0e11QEOkTIg= -golang.org/x/net v0.0.0-20191011234655-491137f69257/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -433,8 +529,10 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -443,10 +541,15 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190909082730-f460065e899a h1:mIzbOulag9/gXacgxKlFVwpCOWSfBT3/pDyyCwGA9as= -golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200103143344-a1369afcdac7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -456,11 +559,10 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190130214255-bb1329dc71a0/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -468,16 +570,27 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a h1:TwMENskLwU2NnWBzrJGEWHqSiGUkO/B4rfyhwqDxDYQ= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042 h1:BKiPVwWbEdmAh+5CBwk13CYeVJQRDJpDnKgDyMOGz9M= +golang.org/x/tools v0.0.0-20200107184032-11e9d9cc0042/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -491,10 +604,7 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190108161440-ae2f86662275/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -502,38 +612,48 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/go-playground/validator.v9 v9.31.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -541,6 +661,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 h1:VO9oZbbkvTwqLimlQt15QNdOOBArT2dw/bvzsMZBiqQ= sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 h1:e1sMhtVq9AfcEy8AXNb8eSg6gbzfdpYhoNqnPJa+GzI= diff --git a/main.go b/main.go index 8ae652f86..103699614 100644 --- a/main.go +++ b/main.go @@ -47,7 +47,6 @@ func main() { cmd.AddFlags(rootCmd) cmd.SetDefaultContext(ctx) rootCmd.AddCommand( - cmd.NewVersionCommand(), cmd.NewValidateCommand(), cmd.NewBackupCommand(), cmd.NewRestoreCommand(), diff --git a/pkg/backup/client.go b/pkg/backup/client.go index 5eb367843..2d7d8b2e7 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -16,11 +16,14 @@ import ( "github.com/pingcap/log" "github.com/pingcap/parser/model" pd "github.com/pingcap/pd/client" + "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/store/tikv" + "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/ranger" "go.uber.org/zap" @@ -66,44 +69,40 @@ func NewBackupClient(ctx context.Context, mgr ClientMgr) (*Client, error) { } // GetTS returns the latest timestamp. -func (bc *Client) GetTS(ctx context.Context, timeAgo string) (uint64, error) { +func (bc *Client) GetTS(ctx context.Context, duration time.Duration) (uint64, error) { p, l, err := bc.mgr.GetPDClient().GetTS(ctx) if err != nil { return 0, errors.Trace(err) } + backupTS := oracle.ComposeTS(p, l) - if timeAgo != "" { - duration, err := time.ParseDuration(timeAgo) - if err != nil { - return 0, errors.Trace(err) - } - t := duration.Nanoseconds() / int64(time.Millisecond) - log.Info("backup time ago", zap.Int64("MillisecondsAgo", t)) - - // check backup time do not exceed GCSafePoint - safePoint, err := GetGCSafePoint(ctx, bc.mgr.GetPDClient()) - if err != nil { - return 0, errors.Trace(err) - } - if p-t < safePoint.Physical { - return 0, errors.New("given backup time exceed GCSafePoint") + switch { + case duration < 0: + return 0, errors.New("negative timeago is not allowed") + case duration > 0: + log.Info("backup time ago", zap.Duration("timeago", duration)) + + backupTime := oracle.GetTimeFromTS(backupTS) + backupAgo := backupTime.Add(-duration) + if backupTS < oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) { + return 0, errors.New("backup ts overflow please choose a smaller timeago") } - p -= t + backupTS = oracle.ComposeTS(oracle.GetPhysical(backupAgo), l) } - ts := utils.Timestamp{ - Physical: p, - Logical: l, + // check backup time do not exceed GCSafePoint + err = CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), backupTS) + if err != nil { + return 0, errors.Trace(err) } - backupTS := utils.EncodeTs(ts) log.Info("backup encode timestamp", zap.Uint64("BackupTS", backupTS)) return backupTS, nil } // SetStorage set ExternalStorage for client -func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend) error { +func (bc *Client) SetStorage(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) error { var err error - bc.storage, err = storage.Create(ctx, backend) + bc.storage, err = storage.Create(ctx, backend, sendCreds) if err != nil { return err } @@ -172,63 +171,27 @@ func appendRanges(tbl *model.TableInfo, tblID int64) ([]kv.KeyRange, error) { func BuildBackupRangeAndSchema( dom *domain.Domain, storage kv.Storage, + tableFilter *filter.Filter, backupTS uint64, - dbName, tableName string, ) ([]Range, *Schemas, error) { - SystemDatabases := [3]string{ - "information_schema", - "performance_schema", - "mysql", - } - info, err := dom.GetSnapshotInfoSchema(backupTS) if err != nil { return nil, nil, errors.Trace(err) } - var dbInfos []*model.DBInfo - var cTableName model.CIStr - switch { - case len(dbName) == 0 && len(tableName) != 0: - return nil, nil, errors.New("no database is not specified") - case len(dbName) != 0 && len(tableName) == 0: - // backup database - cDBName := model.NewCIStr(dbName) - dbInfo, exist := info.SchemaByName(cDBName) - if !exist { - return nil, nil, errors.Errorf("schema %s not found", dbName) - } - dbInfos = append(dbInfos, dbInfo) - case len(dbName) != 0 && len(tableName) != 0: - // backup table - cTableName = model.NewCIStr(tableName) - cDBName := model.NewCIStr(dbName) - dbInfo, exist := info.SchemaByName(cDBName) - if !exist { - return nil, nil, errors.Errorf("schema %s not found", dbName) - } - dbInfos = append(dbInfos, dbInfo) - case len(dbName) == 0 && len(tableName) == 0: - // backup full - dbInfos = info.AllSchemas() - } ranges := make([]Range, 0) backupSchemas := newBackupSchemas() -LoadDb: - for _, dbInfo := range dbInfos { + for _, dbInfo := range info.AllSchemas() { // skip system databases - for _, sysDbName := range SystemDatabases { - if sysDbName == dbInfo.Name.L { - continue LoadDb - } - } - dbData, err := json.Marshal(dbInfo) - if err != nil { - return nil, nil, errors.Trace(err) + if util.IsMemOrSysDB(dbInfo.Name.L) { + continue } - idAlloc := autoid.NewAllocator(storage, dbInfo.ID, false) + + var dbData []byte + idAlloc := autoid.NewAllocator(storage, dbInfo.ID, false, autoid.RowIDAllocType) + for _, tableInfo := range dbInfo.Tables { - if len(cTableName.L) != 0 && cTableName.L != tableInfo.Name.L { + if !tableFilter.Match(&filter.Table{Schema: dbInfo.Name.L, Name: tableInfo.Name.L}) { // Skip tables other than the given table. continue } @@ -242,6 +205,12 @@ LoadDb: zap.Stringer("table", tableInfo.Name), zap.Int64("AutoIncID", globalAutoID)) + if dbData == nil { + dbData, err = json.Marshal(dbInfo) + if err != nil { + return nil, nil, errors.Trace(err) + } + } tableData, err := json.Marshal(tableInfo) if err != nil { return nil, nil, errors.Trace(err) @@ -266,11 +235,8 @@ LoadDb: } } - if len(cTableName.L) != 0 { - // Must find the given table. - if backupSchemas.Len() == 0 { - return nil, nil, errors.Errorf("table %s not found", cTableName) - } + if backupSchemas.Len() == 0 { + return nil, nil, errors.New("nothing to backup") } return ranges, backupSchemas, nil } @@ -281,7 +247,7 @@ func (bc *Client) BackupRanges( ranges []Range, lastBackupTS uint64, backupTS uint64, - rate uint64, + rateLimit uint64, concurrency uint32, updateCh chan<- struct{}, ) error { @@ -297,7 +263,7 @@ func (bc *Client) BackupRanges( go func() { for _, r := range ranges { err := bc.backupRange( - ctx, r.StartKey, r.EndKey, lastBackupTS, backupTS, rate, concurrency, updateCh) + ctx, r.StartKey, r.EndKey, lastBackupTS, backupTS, rateLimit, concurrency, updateCh) if err != nil { errCh <- err return @@ -342,7 +308,7 @@ func (bc *Client) backupRange( startKey, endKey []byte, lastBackupTS uint64, backupTS uint64, - rateMBs uint64, + rateLimit uint64, concurrency uint32, updateCh chan<- struct{}, ) (err error) { @@ -357,12 +323,10 @@ func (bc *Client) backupRange( summary.CollectSuccessUnit(key, elapsed) } }() - // The unit of rate limit in protocol is bytes per second. - rateLimit := rateMBs * 1024 * 1024 log.Info("backup started", zap.Binary("StartKey", startKey), zap.Binary("EndKey", endKey), - zap.Uint64("RateLimit", rateMBs), + zap.Uint64("RateLimit", rateLimit), zap.Uint32("Concurrency", concurrency)) ctx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/pkg/backup/client_test.go b/pkg/backup/client_test.go index 6971026d5..e3ad8130b 100644 --- a/pkg/backup/client_test.go +++ b/pkg/backup/client_test.go @@ -10,11 +10,11 @@ import ( "github.com/pingcap/parser/model" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/br/pkg/conn" - "github.com/pingcap/br/pkg/utils" ) type testBackup struct { @@ -50,49 +50,45 @@ func (r *testBackup) TestGetTS(c *C) { deviation = 100 ) - // timeago not valid - timeAgo := "invalid" - _, err = r.backupClient.GetTS(r.ctx, timeAgo) - c.Assert(err, ErrorMatches, "time: invalid duration invalid") - // timeago not work - timeAgo = "" expectedDuration := 0 currentTs := time.Now().UnixNano() / int64(time.Millisecond) - ts, err := r.backupClient.GetTS(r.ctx, timeAgo) + ts, err := r.backupClient.GetTS(r.ctx, 0) c.Assert(err, IsNil) - pdTs := utils.DecodeTs(ts).Physical + pdTs := oracle.ExtractPhysical(ts) duration := int(currentTs - pdTs) c.Assert(duration, Greater, expectedDuration-deviation) c.Assert(duration, Less, expectedDuration+deviation) // timeago = "1.5m" - timeAgo = "1.5m" expectedDuration = 90000 currentTs = time.Now().UnixNano() / int64(time.Millisecond) - ts, err = r.backupClient.GetTS(r.ctx, timeAgo) + ts, err = r.backupClient.GetTS(r.ctx, 90*time.Second) c.Assert(err, IsNil) - pdTs = utils.DecodeTs(ts).Physical + pdTs = oracle.ExtractPhysical(ts) duration = int(currentTs - pdTs) c.Assert(duration, Greater, expectedDuration-deviation) c.Assert(duration, Less, expectedDuration+deviation) // timeago = "-1m" - timeAgo = "-1m" - expectedDuration = -60000 - currentTs = time.Now().UnixNano() / int64(time.Millisecond) - ts, err = r.backupClient.GetTS(r.ctx, timeAgo) - c.Assert(err, IsNil) - pdTs = utils.DecodeTs(ts).Physical - duration = int(currentTs - pdTs) - c.Assert(duration, Greater, expectedDuration-deviation) - c.Assert(duration, Less, expectedDuration+deviation) + _, err = r.backupClient.GetTS(r.ctx, -time.Minute) + c.Assert(err, ErrorMatches, "negative timeago is not allowed") + + // timeago = "1000000h" overflows + _, err = r.backupClient.GetTS(r.ctx, 1000000*time.Hour) + c.Assert(err, ErrorMatches, "backup ts overflow.*") - // timeago = "1000000h" exceed GCSafePoint - // because GCSafePoint in mockPDClient is 0 - timeAgo = "1000000h" - _, err = r.backupClient.GetTS(r.ctx, timeAgo) - c.Assert(err, ErrorMatches, "given backup time exceed GCSafePoint") + // timeago = "10h" exceed GCSafePoint + p, l, err := r.backupClient.mgr.GetPDClient().GetTS(r.ctx) + c.Assert(err, IsNil) + now := oracle.ComposeTS(p, l) + _, err = r.backupClient.mgr.GetPDClient().UpdateGCSafePoint(r.ctx, now) + c.Assert(err, IsNil) + _, err = r.backupClient.GetTS(r.ctx, 10*time.Hour) + // mocktikv pdClient.UpdateGCSafePoint return 0 forever + // so this error won't happen + // c.Assert(err, ErrorMatches, "GC safepoint [0-9]+ exceed TS [0-9]+") + c.Assert(err, IsNil) } func (r *testBackup) TestBuildTableRange(c *C) { diff --git a/pkg/backup/safe_point.go b/pkg/backup/safe_point.go index bc24a01ba..bb73bc7d9 100644 --- a/pkg/backup/safe_point.go +++ b/pkg/backup/safe_point.go @@ -7,32 +7,29 @@ import ( "github.com/pingcap/log" pd "github.com/pingcap/pd/client" "go.uber.org/zap" - - "github.com/pingcap/br/pkg/utils" ) -// GetGCSafePoint returns the current gc safe point. +// getGCSafePoint returns the current gc safe point. // TODO: Some cluster may not enable distributed GC. -func GetGCSafePoint(ctx context.Context, pdClient pd.Client) (utils.Timestamp, error) { +func getGCSafePoint(ctx context.Context, pdClient pd.Client) (uint64, error) { safePoint, err := pdClient.UpdateGCSafePoint(ctx, 0) if err != nil { - return utils.Timestamp{}, err + return 0, err } - return utils.DecodeTs(safePoint), nil + return safePoint, nil } // CheckGCSafepoint checks whether the ts is older than GC safepoint. // Note: It ignores errors other than exceed GC safepoint. func CheckGCSafepoint(ctx context.Context, pdClient pd.Client, ts uint64) error { // TODO: use PDClient.GetGCSafePoint instead once PD client exports it. - safePoint, err := GetGCSafePoint(ctx, pdClient) + safePoint, err := getGCSafePoint(ctx, pdClient) if err != nil { log.Warn("fail to get GC safe point", zap.Error(err)) return nil } - safePointTS := utils.EncodeTs(safePoint) - if ts <= safePointTS { - return errors.Errorf("GC safepoint %d exceed TS %d", safePointTS, ts) + if ts <= safePoint { + return errors.Errorf("GC safepoint %d exceed TS %d", safePoint, ts) } return nil } diff --git a/pkg/backup/schema_test.go b/pkg/backup/schema_test.go index 3d10fd967..f657310bf 100644 --- a/pkg/backup/schema_test.go +++ b/pkg/backup/schema_test.go @@ -5,6 +5,7 @@ import ( "math" . "github.com/pingcap/check" + "github.com/pingcap/tidb-tools/pkg/filter" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" @@ -34,28 +35,32 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { tk := testkit.NewTestKit(c, s.mock.Storage) // Table t1 is not exist. + testFilter, err := filter.New(false, &filter.Rules{ + DoTables: []*filter.Table{{Schema: "test", Name: "t1"}}, + }) + c.Assert(err, IsNil) _, backupSchemas, err := BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "test", "t1") + s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) c.Assert(err, NotNil) c.Assert(backupSchemas, IsNil) // Database is not exist. + fooFilter, err := filter.New(false, &filter.Rules{ + DoTables: []*filter.Table{{Schema: "foo", Name: "t1"}}, + }) + c.Assert(err, IsNil) _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "foo", "t1") + s.mock.Domain, s.mock.Storage, fooFilter, math.MaxUint64) c.Assert(err, NotNil) c.Assert(backupSchemas, IsNil) // Empty databse. - _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "", "") - c.Assert(err, IsNil) - c.Assert(backupSchemas, NotNil) - c.Assert(backupSchemas.Len(), Equals, 0) - updateCh := make(chan struct{}, 2) - backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 1, updateCh) - schemas, err := backupSchemas.finishTableChecksum() + noFilter, err := filter.New(false, &filter.Rules{}) c.Assert(err, IsNil) - c.Assert(len(schemas), Equals, 0) + _, backupSchemas, err = BuildBackupRangeAndSchema( + s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) + c.Assert(err, NotNil) + c.Assert(backupSchemas, IsNil) tk.MustExec("use test") tk.MustExec("drop table if exists t1;") @@ -63,11 +68,12 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { tk.MustExec("insert into t1 values (10);") _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "test", "t1") + s.mock.Domain, s.mock.Storage, testFilter, math.MaxUint64) c.Assert(err, IsNil) c.Assert(backupSchemas.Len(), Equals, 1) + updateCh := make(chan struct{}, 2) backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 1, updateCh) - schemas, err = backupSchemas.finishTableChecksum() + schemas, err := backupSchemas.finishTableChecksum() <-updateCh c.Assert(err, IsNil) c.Assert(len(schemas), Equals, 1) @@ -82,7 +88,7 @@ func (s *testBackupSchemaSuite) TestBuildBackupRangeAndSchema(c *C) { tk.MustExec("insert into t2 values (11);") _, backupSchemas, err = BuildBackupRangeAndSchema( - s.mock.Domain, s.mock.Storage, math.MaxUint64, "", "") + s.mock.Domain, s.mock.Storage, noFilter, math.MaxUint64) c.Assert(err, IsNil) c.Assert(backupSchemas.Len(), Equals, 2) backupSchemas.Start(context.Background(), s.mock.Storage, math.MaxUint64, 2, updateCh) diff --git a/pkg/conn/conn.go b/pkg/conn/conn.go index 2736cc3d1..892a56d8c 100644 --- a/pkg/conn/conn.go +++ b/pkg/conn/conn.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/keepalive" ) @@ -222,7 +223,15 @@ func (mgr *Mgr) getGrpcConnLocked(ctx context.Context, storeID uint64) (*grpc.Cl ctx, store.GetAddress(), opt, - grpc.WithBackoffMaxDelay(time.Second*3), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{ + BaseDelay: time.Second, // Default was 1s. + Multiplier: 1.6, // Default + Jitter: 0.2, // Default + MaxDelay: 3 * time.Second, // Default was 120s. + }, + MinConnectTimeout: 5 * time.Second, + }), grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: time.Duration(keepAlive) * time.Second, Timeout: time.Duration(keepAliveTimeout) * time.Second, diff --git a/pkg/restore/backoff.go b/pkg/restore/backoff.go new file mode 100644 index 000000000..dae14e109 --- /dev/null +++ b/pkg/restore/backoff.go @@ -0,0 +1,117 @@ +package restore + +import ( + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/utils" +) + +var ( + errNotLeader = errors.NewNoStackError("not leader") + errEpochNotMatch = errors.NewNoStackError("epoch not match") + errKeyNotInRegion = errors.NewNoStackError("key not in region") + errRegionNotFound = errors.NewNoStackError("region not found") + errResp = errors.NewNoStackError("response error") + errRewriteRuleNotFound = errors.NewNoStackError("rewrite rule not found") + errRangeIsEmpty = errors.NewNoStackError("range is empty") + errGrpc = errors.NewNoStackError("gRPC error") + + // TODO: add `error` field to `DownloadResponse` for distinguish the errors of gRPC + // and the errors of request + errBadFormat = errors.NewNoStackError("bad format") + errWrongKeyPrefix = errors.NewNoStackError("wrong key prefix") + errFileCorrupted = errors.NewNoStackError("file corrupted") + errCannotRead = errors.NewNoStackError("cannot read externel storage") +) + +const ( + importSSTRetryTimes = 16 + importSSTWaitInterval = 10 * time.Millisecond + importSSTMaxWaitInterval = 1 * time.Second + + downloadSSTRetryTimes = 8 + downloadSSTWaitInterval = 10 * time.Millisecond + downloadSSTMaxWaitInterval = 1 * time.Second + + resetTsRetryTime = 16 + resetTSWaitInterval = 50 * time.Millisecond + resetTSMaxWaitInterval = 500 * time.Millisecond +) + +type importerBackoffer struct { + attempt int + delayTime time.Duration + maxDelayTime time.Duration +} + +func newImportSSTBackoffer() utils.Backoffer { + return &importerBackoffer{ + attempt: importSSTRetryTimes, + delayTime: importSSTWaitInterval, + maxDelayTime: importSSTMaxWaitInterval, + } +} + +func newDownloadSSTBackoffer() utils.Backoffer { + return &importerBackoffer{ + attempt: downloadSSTRetryTimes, + delayTime: downloadSSTWaitInterval, + maxDelayTime: downloadSSTMaxWaitInterval, + } +} + +func (bo *importerBackoffer) NextBackoff(err error) time.Duration { + switch errors.Cause(err) { + case errResp, errGrpc, errEpochNotMatch, errNotLeader: + bo.delayTime = 2 * bo.delayTime + bo.attempt-- + case errRangeIsEmpty, errRewriteRuleNotFound: + // Excepted error, finish the operation + bo.delayTime = 0 + bo.attempt = 0 + default: + // Unexcepted error + bo.delayTime = 0 + bo.attempt = 0 + log.Warn("unexcepted error, stop to retry", zap.Error(err)) + } + if bo.delayTime > bo.maxDelayTime { + return bo.maxDelayTime + } + return bo.delayTime +} + +func (bo *importerBackoffer) Attempt() int { + return bo.attempt +} + +type resetTSBackoffer struct { + attempt int + delayTime time.Duration + maxDelayTime time.Duration +} + +func newResetTSBackoffer() utils.Backoffer { + return &resetTSBackoffer{ + attempt: resetTsRetryTime, + delayTime: resetTSWaitInterval, + maxDelayTime: resetTSMaxWaitInterval, + } +} + +func (bo *resetTSBackoffer) NextBackoff(err error) time.Duration { + bo.delayTime = 2 * bo.delayTime + bo.attempt-- + if bo.delayTime > bo.maxDelayTime { + return bo.maxDelayTime + } + return bo.delayTime +} + +func (bo *resetTSBackoffer) Attempt() int { + return bo.attempt +} diff --git a/pkg/restore/backoff_test.go b/pkg/restore/backoff_test.go new file mode 100644 index 000000000..537f0980c --- /dev/null +++ b/pkg/restore/backoff_test.go @@ -0,0 +1,58 @@ +package restore + +import ( + "context" + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/util/testleak" + + "github.com/pingcap/br/pkg/utils" +) + +var _ = Suite(&testBackofferSuite{}) + +type testBackofferSuite struct { + mock *utils.MockCluster +} + +func (s *testBackofferSuite) SetUpSuite(c *C) { + var err error + s.mock, err = utils.NewMockCluster() + c.Assert(err, IsNil) +} + +func (s *testBackofferSuite) TearDownSuite(c *C) { + testleak.AfterTest(c)() +} + +func (s *testBackofferSuite) TestImporterBackoffer(c *C) { + var counter int + err := utils.WithRetry(context.Background(), func() error { + defer func() { counter++ }() + switch counter { + case 0: + return errGrpc + case 1: + return errResp + case 2: + return errRangeIsEmpty + } + return nil + }, newImportSSTBackoffer()) + c.Assert(counter, Equals, 3) + c.Assert(err, Equals, errRangeIsEmpty) + + counter = 0 + backoffer := importerBackoffer{ + attempt: 10, + delayTime: time.Nanosecond, + maxDelayTime: time.Nanosecond, + } + err = utils.WithRetry(context.Background(), func() error { + defer func() { counter++ }() + return errResp + }, &backoffer) + c.Assert(counter, Equals, 10) + c.Assert(err, Equals, errResp) +} diff --git a/pkg/restore/client.go b/pkg/restore/client.go index 2fbe12d1d..0e414572e 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -2,7 +2,6 @@ package restore import ( "context" - "fmt" "math" "sync" "time" @@ -13,11 +12,12 @@ import ( "github.com/pingcap/log" "github.com/pingcap/parser/model" pd "github.com/pingcap/pd/client" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv/oracle" "go.uber.org/zap" "google.golang.org/grpc" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/keepalive" "github.com/pingcap/br/pkg/checksum" @@ -25,15 +25,9 @@ import ( "github.com/pingcap/br/pkg/utils" ) -const ( - resetTsRetryTime = 16 - resetTSWaitInterval = 50 * time.Millisecond - resetTSMaxWaitInterval = 500 * time.Millisecond - - // defaultChecksumConcurrency is the default number of the concurrent - // checksum tasks. - defaultChecksumConcurrency = 64 -) +// defaultChecksumConcurrency is the default number of the concurrent +// checksum tasks. +const defaultChecksumConcurrency = 64 // Client sends requests to restore files type Client struct { @@ -106,7 +100,7 @@ func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup. rc.databases = databases rc.backupMeta = backupMeta - metaClient := restore_util.NewClient(rc.pdClient) + metaClient := NewSplitClient(rc.pdClient) importClient := NewImportClient(metaClient) rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, rc.rateLimit) return nil @@ -128,11 +122,7 @@ func (rc *Client) GetTS(ctx context.Context) (uint64, error) { if err != nil { return 0, errors.Trace(err) } - ts := utils.Timestamp{ - Physical: p, - Logical: l, - } - restoreTS := utils.EncodeTs(ts) + restoreTS := oracle.ComposeTS(p, l) return restoreTS, nil } @@ -141,13 +131,10 @@ func (rc *Client) ResetTS(pdAddrs []string) error { restoreTS := rc.backupMeta.GetEndVersion() log.Info("reset pd timestamp", zap.Uint64("ts", restoreTS)) i := 0 - return withRetry(func() error { + return utils.WithRetry(rc.ctx, func() error { idx := i % len(pdAddrs) return utils.ResetTS(pdAddrs[idx], restoreTS) - }, func(e error) bool { - i++ - return true - }, resetTsRetryTime, resetTSWaitInterval, resetTSMaxWaitInterval) + }, newResetTSBackoffer()) } // GetDatabases returns all databases. @@ -191,8 +178,8 @@ func (rc *Client) CreateTables( dom *domain.Domain, tables []*utils.Table, newTS uint64, -) (*restore_util.RewriteRules, []*model.TableInfo, error) { - rewriteRules := &restore_util.RewriteRules{ +) (*RewriteRules, []*model.TableInfo, error) { + rewriteRules := &RewriteRules{ Table: make([]*import_sstpb.RewriteRule, 0), Data: make([]*import_sstpb.RewriteRule, 0), } @@ -231,31 +218,28 @@ func (rc *Client) setSpeedLimit() error { return nil } -// RestoreTable tries to restore the data of a table. -func (rc *Client) RestoreTable( - table *utils.Table, - rewriteRules *restore_util.RewriteRules, +// RestoreFiles tries to restore the files. +func (rc *Client) RestoreFiles( + files []*backup.File, + rewriteRules *RewriteRules, updateCh chan<- struct{}, ) (err error) { start := time.Now() defer func() { elapsed := time.Since(start) - log.Info("restore table", - zap.Stringer("table", table.Schema.Name), zap.Duration("take", elapsed)) - key := fmt.Sprintf("%s.%s", table.Db.Name.String(), table.Schema.Name.String()) - if err != nil { - summary.CollectFailureUnit(key, err) + if err == nil { + log.Info("Restore Files", + zap.Int("files", len(files)), zap.Duration("take", elapsed)) + summary.CollectSuccessUnit("files", elapsed) } else { - summary.CollectSuccessUnit(key, elapsed) + summary.CollectFailureUnit("files", err) } }() - log.Debug("start to restore table", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), - zap.Array("files", files(table.Files)), + log.Debug("start to restore files", + zap.Int("files", len(files)), ) - errCh := make(chan error, len(table.Files)) + errCh := make(chan error, len(files)) wg := new(sync.WaitGroup) defer close(errCh) err = rc.setSpeedLimit() @@ -263,7 +247,7 @@ func (rc *Client) RestoreTable( return err } - for _, file := range table.Files { + for _, file := range files { wg.Add(1) fileReplica := file rc.workerPool.Apply( @@ -277,99 +261,18 @@ func (rc *Client) RestoreTable( } }) } - for range table.Files { + for range files { err := <-errCh if err != nil { rc.cancel() wg.Wait() log.Error( - "restore table failed", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), + "restore files failed", zap.Error(err), ) return err } } - log.Info( - "finish to restore table", - zap.Stringer("table", table.Schema.Name), - zap.Stringer("db", table.Db.Name), - ) - return nil -} - -// RestoreDatabase tries to restore the data of a database -func (rc *Client) RestoreDatabase( - db *utils.Database, - rewriteRules *restore_util.RewriteRules, - updateCh chan<- struct{}, -) (err error) { - start := time.Now() - defer func() { - elapsed := time.Since(start) - log.Info("Restore Database", zap.Stringer("db", db.Schema.Name), zap.Duration("take", elapsed)) - }() - errCh := make(chan error, len(db.Tables)) - wg := new(sync.WaitGroup) - defer close(errCh) - for _, table := range db.Tables { - wg.Add(1) - tblReplica := table - rc.tableWorkerPool.Apply(func() { - defer wg.Done() - select { - case <-rc.ctx.Done(): - errCh <- nil - case errCh <- rc.RestoreTable( - tblReplica, rewriteRules, updateCh): - } - }) - } - for range db.Tables { - err = <-errCh - if err != nil { - wg.Wait() - return err - } - } - return nil -} - -// RestoreAll tries to restore all the data of backup files. -func (rc *Client) RestoreAll( - rewriteRules *restore_util.RewriteRules, - updateCh chan<- struct{}, -) (err error) { - start := time.Now() - defer func() { - elapsed := time.Since(start) - log.Info("Restore All", zap.Duration("take", elapsed)) - }() - errCh := make(chan error, len(rc.databases)) - wg := new(sync.WaitGroup) - defer close(errCh) - for _, db := range rc.databases { - wg.Add(1) - dbReplica := db - rc.tableWorkerPool.Apply(func() { - defer wg.Done() - select { - case <-rc.ctx.Done(): - errCh <- nil - case errCh <- rc.RestoreDatabase( - dbReplica, rewriteRules, updateCh): - } - }) - } - - for range rc.databases { - err = <-errCh - if err != nil { - wg.Wait() - return err - } - } return nil } @@ -397,7 +300,15 @@ func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMo gctx, store.GetAddress(), opt, - grpc.WithBackoffMaxDelay(time.Second*3), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{ + BaseDelay: time.Second, // Default was 1s. + Multiplier: 1.6, // Default + Jitter: 0.2, // Default + MaxDelay: 3 * time.Second, // Default was 120s. + }, + MinConnectTimeout: 5 * time.Second, + }), grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: time.Duration(keepAlive) * time.Second, Timeout: time.Duration(keepAliveTimeout) * time.Second, diff --git a/pkg/restore/db_test.go b/pkg/restore/db_test.go index 9583f7f8c..98341f510 100644 --- a/pkg/restore/db_test.go +++ b/pkg/restore/db_test.go @@ -64,7 +64,7 @@ func (s *testRestoreSchemaSuite) TestRestoreAutoIncID(c *C) { Db: dbInfo, } // Get the next AutoIncID - idAlloc := autoid.NewAllocator(s.mock.Storage, dbInfo.ID, false) + idAlloc := autoid.NewAllocator(s.mock.Storage, dbInfo.ID, false, autoid.RowIDAllocType) globalAutoID, err := idAlloc.NextGlobalAutoID(table.Schema.ID) c.Assert(err, IsNil, Commentf("Error allocate next auto id")) c.Assert(autoIncID, Equals, uint64(globalAutoID)) diff --git a/pkg/restore/import.go b/pkg/restore/import.go index 2ee7428f6..de35ecaea 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -2,6 +2,7 @@ package restore import ( "context" + "strings" "sync" "time" @@ -11,31 +12,15 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/log" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" "google.golang.org/grpc" "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" ) -var ( - errNotLeader = errors.New("not leader") - errEpochNotMatch = errors.New("epoch not match") - errRewriteRuleNotFound = errors.New("rewrite rule not found") - errRangeIsEmpty = errors.New("range is empty") -) - -const ( - importScanResgionTime = 10 * time.Second - importFileRetryTimes = 16 - importFileWaitInterval = 10 * time.Millisecond - importFileMaxWaitInterval = 1 * time.Second - - downloadSSTRetryTimes = 8 - downloadSSTWaitInterval = 10 * time.Millisecond - downloadSSTMaxWaitInterval = 1 * time.Second -) +const importScanRegionTime = 10 * time.Second // ImporterClient is used to import a file to TiKV type ImporterClient interface { @@ -60,12 +45,12 @@ type ImporterClient interface { type importClient struct { mu sync.Mutex - metaClient restore_util.Client + metaClient SplitClient clients map[uint64]import_sstpb.ImportSSTClient } // NewImportClient returns a new ImporterClient -func NewImportClient(metaClient restore_util.Client) ImporterClient { +func NewImportClient(metaClient SplitClient) ImporterClient { return &importClient{ metaClient: metaClient, clients: make(map[uint64]import_sstpb.ImportSSTClient), @@ -133,7 +118,7 @@ func (ic *importClient) getImportClient( // FileImporter used to import a file to TiKV. type FileImporter struct { - metaClient restore_util.Client + metaClient SplitClient importClient ImporterClient backend *backup.StorageBackend rateLimit uint64 @@ -145,7 +130,7 @@ type FileImporter struct { // NewFileImporter returns a new file importClient. func NewFileImporter( ctx context.Context, - metaClient restore_util.Client, + metaClient SplitClient, importClient ImporterClient, backend *backup.StorageBackend, rateLimit uint64, @@ -163,7 +148,7 @@ func NewFileImporter( // Import tries to import a file. // All rules must contain encoded keys. -func (importer *FileImporter) Import(file *backup.File, rewriteRules *restore_util.RewriteRules) error { +func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRules) error { log.Debug("import file", zap.Stringer("file", file)) // Rewrite the start key and end key of file to scan regions startKey, endKey, err := rewriteFileKeys(file, rewriteRules) @@ -173,76 +158,68 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *restore_ut log.Debug("rewrite file keys", zap.Stringer("file", file), zap.Binary("startKey", startKey), - zap.Binary("endKey", endKey), - ) - err = withRetry(func() error { - ctx, cancel := context.WithTimeout(importer.ctx, importScanResgionTime) + zap.Binary("endKey", endKey)) + err = utils.WithRetry(importer.ctx, func() error { + ctx, cancel := context.WithTimeout(importer.ctx, importScanRegionTime) defer cancel() // Scan regions covered by the file range - regionInfos, err := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) - if err != nil { - return errors.Trace(err) + regionInfos, err1 := importer.metaClient.ScanRegions(ctx, startKey, endKey, 0) + if err1 != nil { + return errors.Trace(err1) } log.Debug("scan regions", zap.Stringer("file", file), zap.Int("count", len(regionInfos))) // Try to download and ingest the file in every region for _, regionInfo := range regionInfos { - var downloadMeta *import_sstpb.SSTMeta info := regionInfo // Try to download file. - err = withRetry(func() error { - var err error - var isEmpty bool - downloadMeta, isEmpty, err = importer.downloadSST(info, file, rewriteRules) - if err != nil { - if err != errRewriteRuleNotFound { - log.Warn("download file failed", - zap.Stringer("file", file), - zap.Stringer("region", info.Region), - zap.Binary("startKey", startKey), - zap.Binary("endKey", endKey), - zap.Error(err), - ) - } - return err - } - if isEmpty { - log.Info( - "file don't have any key in this region, skip it", - zap.Stringer("file", file), - zap.Stringer("region", info.Region), - ) - return errRangeIsEmpty - } - return nil - }, func(e error) bool { - // Scan regions may return some regions which cannot match any rewrite rule, - // like [t{tableID}, t{tableID}_r), those regions should be skipped - return e != errRewriteRuleNotFound && e != errRangeIsEmpty - }, downloadSSTRetryTimes, downloadSSTWaitInterval, downloadSSTMaxWaitInterval) - if err != nil { - if err == errRewriteRuleNotFound || err == errRangeIsEmpty { + var downloadMeta *import_sstpb.SSTMeta + err1 = utils.WithRetry(importer.ctx, func() error { + var e error + downloadMeta, e = importer.downloadSST(info, file, rewriteRules) + return e + }, newDownloadSSTBackoffer()) + if err1 != nil { + if err1 == errRewriteRuleNotFound || err1 == errRangeIsEmpty { // Skip this region continue } - return err + log.Error("download file failed", + zap.Stringer("file", file), + zap.Stringer("region", info.Region), + zap.Binary("startKey", startKey), + zap.Binary("endKey", endKey), + zap.Error(err1)) + return err1 + } + err1 = importer.ingestSST(downloadMeta, info) + // If error is `NotLeader`, update the region info and retry + for errors.Cause(err1) == errNotLeader { + log.Debug("ingest sst returns not leader error, retry it", + zap.Stringer("region", info.Region)) + var newInfo *RegionInfo + newInfo, err1 = importer.metaClient.GetRegion(importer.ctx, info.Region.GetStartKey()) + if err1 != nil { + break + } + if !checkRegionEpoch(newInfo, info) { + err1 = errEpochNotMatch + break + } + err1 = importer.ingestSST(downloadMeta, newInfo) } - err = importer.ingestSST(downloadMeta, info) - if err != nil { - log.Warn("ingest file failed", + if err1 != nil { + log.Error("ingest file failed", zap.Stringer("file", file), zap.Stringer("range", downloadMeta.GetRange()), zap.Stringer("region", info.Region), - zap.Error(err), - ) - return err + zap.Error(err1)) + return err1 } summary.CollectSuccessUnit(summary.TotalKV, file.TotalKvs) summary.CollectSuccessUnit(summary.TotalBytes, file.TotalBytes) } return nil - }, func(e error) bool { - return true - }, importFileRetryTimes, importFileWaitInterval, importFileMaxWaitInterval) + }, newImportSSTBackoffer()) return err } @@ -255,36 +232,28 @@ func (importer *FileImporter) setDownloadSpeedLimit(storeID uint64) error { } func (importer *FileImporter) downloadSST( - regionInfo *restore_util.RegionInfo, + regionInfo *RegionInfo, file *backup.File, - rewriteRules *restore_util.RewriteRules, -) (*import_sstpb.SSTMeta, bool, error) { + rewriteRules *RewriteRules, +) (*import_sstpb.SSTMeta, error) { id, err := uuid.New().MarshalBinary() if err != nil { - return nil, true, errors.Trace(err) + return nil, errors.Trace(err) } // Assume one region reflects to one rewrite rule _, key, err := codec.DecodeBytes(regionInfo.Region.GetStartKey(), []byte{}) if err != nil { - return nil, true, err + return nil, err } regionRule := matchNewPrefix(key, rewriteRules) if regionRule == nil { - log.Debug("cannot find rewrite rule, skip region", - zap.Stringer("region", regionInfo.Region), - zap.Array("tableRule", rules(rewriteRules.Table)), - zap.Array("dataRule", rules(rewriteRules.Data)), - zap.Binary("key", key), - ) - return nil, true, errRewriteRuleNotFound + return nil, errors.Trace(errRewriteRuleNotFound) } rule := import_sstpb.RewriteRule{ OldKeyPrefix: encodeKeyPrefix(regionRule.GetOldKeyPrefix()), NewKeyPrefix: encodeKeyPrefix(regionRule.GetNewKeyPrefix()), } sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) - sstMeta.RegionId = regionInfo.Region.GetId() - sstMeta.RegionEpoch = regionInfo.Region.GetRegionEpoch() req := &import_sstpb.DownloadRequest{ Sst: sstMeta, StorageBackend: importer.backend, @@ -299,20 +268,20 @@ func (importer *FileImporter) downloadSST( for _, peer := range regionInfo.Region.GetPeers() { resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) if err != nil { - return nil, true, err + return nil, extractDownloadSSTError(err) } if resp.GetIsEmpty() { - return &sstMeta, true, nil + return nil, errors.Trace(errRangeIsEmpty) } } sstMeta.Range.Start = truncateTS(resp.Range.GetStart()) sstMeta.Range.End = truncateTS(resp.Range.GetEnd()) - return &sstMeta, false, nil + return &sstMeta, nil } func (importer *FileImporter) ingestSST( sstMeta *import_sstpb.SSTMeta, - regionInfo *restore_util.RegionInfo, + regionInfo *RegionInfo, ) error { leader := regionInfo.Leader if leader == nil { @@ -330,17 +299,45 @@ func (importer *FileImporter) ingestSST( log.Debug("download SST", zap.Stringer("sstMeta", sstMeta)) resp, err := importer.importClient.IngestSST(importer.ctx, leader.GetStoreId(), req) if err != nil { - return err + if strings.Contains(err.Error(), "RegionNotFound") { + return errors.Trace(errRegionNotFound) + } + return errors.Trace(err) } respErr := resp.GetError() if respErr != nil { - if respErr.EpochNotMatch != nil { - return errEpochNotMatch + log.Debug("ingest sst resp error", zap.Stringer("error", respErr)) + if respErr.GetKeyNotInRegion() != nil { + return errors.Trace(errKeyNotInRegion) } - if respErr.NotLeader != nil { - return errNotLeader + if respErr.GetNotLeader() != nil { + return errors.Trace(errNotLeader) } - return errors.Errorf("ingest failed: %v", respErr) + return errors.Wrap(errResp, respErr.String()) } return nil } + +func checkRegionEpoch(new, old *RegionInfo) bool { + if new.Region.GetId() == old.Region.GetId() && + new.Region.GetRegionEpoch().GetVersion() == old.Region.GetRegionEpoch().GetVersion() && + new.Region.GetRegionEpoch().GetConfVer() == old.Region.GetRegionEpoch().GetConfVer() { + return true + } + return false +} + +func extractDownloadSSTError(e error) error { + err := errGrpc + switch { + case strings.Contains(e.Error(), "bad format"): + err = errBadFormat + case strings.Contains(e.Error(), "wrong prefix"): + err = errWrongKeyPrefix + case strings.Contains(e.Error(), "corrupted"): + err = errFileCorrupted + case strings.Contains(e.Error(), "Cannot read"): + err = errCannotRead + } + return errors.Trace(err) +} diff --git a/pkg/restore/range.go b/pkg/restore/range.go new file mode 100644 index 000000000..f3914539e --- /dev/null +++ b/pkg/restore/range.go @@ -0,0 +1,148 @@ +package restore + +import ( + "bytes" + "fmt" + + "github.com/google/btree" + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/tablecodec" + "go.uber.org/zap" +) + +// Range represents a range of keys. +type Range struct { + StartKey []byte + EndKey []byte +} + +// String formats a range to a string +func (r *Range) String() string { + return fmt.Sprintf("[%x %x]", r.StartKey, r.EndKey) +} + +// Less compares a range with a btree.Item +func (r *Range) Less(than btree.Item) bool { + t := than.(*Range) + return len(r.EndKey) != 0 && bytes.Compare(r.EndKey, t.StartKey) <= 0 +} + +// contains returns if a key is included in the range. +func (r *Range) contains(key []byte) bool { + start, end := r.StartKey, r.EndKey + return bytes.Compare(key, start) >= 0 && + (len(end) == 0 || bytes.Compare(key, end) < 0) +} + +// sortRanges checks if the range overlapped and sort them +func sortRanges(ranges []Range, rewriteRules *RewriteRules) ([]Range, error) { + rangeTree := NewRangeTree() + for _, rg := range ranges { + if rewriteRules != nil { + startID := tablecodec.DecodeTableID(rg.StartKey) + endID := tablecodec.DecodeTableID(rg.EndKey) + var rule *import_sstpb.RewriteRule + if startID == endID { + rg.StartKey, rule = replacePrefix(rg.StartKey, rewriteRules) + if rule == nil { + log.Warn("cannot find rewrite rule", zap.Binary("key", rg.StartKey)) + } else { + log.Debug( + "rewrite start key", + zap.Binary("key", rg.StartKey), + zap.Stringer("rule", rule)) + } + rg.EndKey, rule = replacePrefix(rg.EndKey, rewriteRules) + if rule == nil { + log.Warn("cannot find rewrite rule", zap.Binary("key", rg.EndKey)) + } else { + log.Debug( + "rewrite end key", + zap.Binary("key", rg.EndKey), + zap.Stringer("rule", rule)) + } + } else { + log.Warn("table id does not match", + zap.Binary("startKey", rg.StartKey), + zap.Binary("endKey", rg.EndKey), + zap.Int64("startID", startID), + zap.Int64("endID", endID)) + return nil, errors.New("table id does not match") + } + } + if out := rangeTree.InsertRange(rg); out != nil { + return nil, errors.Errorf("ranges overlapped: %s, %s", out, rg) + } + } + sortedRanges := make([]Range, 0, len(ranges)) + rangeTree.Ascend(func(rg *Range) bool { + if rg == nil { + return false + } + sortedRanges = append(sortedRanges, *rg) + return true + }) + return sortedRanges, nil +} + +// RangeTree stores the ranges in an orderly manner. +// All the ranges it stored do not overlap. +type RangeTree struct { + tree *btree.BTree +} + +// NewRangeTree returns a new RangeTree. +func NewRangeTree() *RangeTree { + return &RangeTree{tree: btree.New(32)} +} + +// Find returns nil or a range in the range tree +func (rt *RangeTree) Find(key []byte) *Range { + var ret *Range + r := &Range{ + StartKey: key, + } + rt.tree.DescendLessOrEqual(r, func(i btree.Item) bool { + ret = i.(*Range) + return false + }) + if ret == nil || !ret.contains(key) { + return nil + } + return ret +} + +// InsertRange inserts ranges into the range tree. +// it returns true if all ranges inserted successfully. +// it returns false if there are some overlapped ranges. +func (rt *RangeTree) InsertRange(rg Range) btree.Item { + return rt.tree.ReplaceOrInsert(&rg) +} + +// RangeIterator allows callers of Ascend to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend function will immediately return. +type RangeIterator func(rg *Range) bool + +// Ascend calls the iterator for every value in the tree within [first, last], +// until the iterator returns false. +func (rt *RangeTree) Ascend(iterator RangeIterator) { + rt.tree.Ascend(func(i btree.Item) bool { + return iterator(i.(*Range)) + }) +} + +// RegionInfo includes a region and the leader of the region. +type RegionInfo struct { + Region *metapb.Region + Leader *metapb.Peer +} + +// RewriteRules contains rules for rewriting keys of tables. +type RewriteRules struct { + Table []*import_sstpb.RewriteRule + Data []*import_sstpb.RewriteRule +} diff --git a/pkg/restore/range_test.go b/pkg/restore/range_test.go new file mode 100644 index 000000000..a9edc5b82 --- /dev/null +++ b/pkg/restore/range_test.go @@ -0,0 +1,75 @@ +package restore + +import ( + "bytes" + + . "github.com/pingcap/check" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/tidb/tablecodec" +) + +type testRangeSuite struct{} + +var _ = Suite(&testRangeSuite{}) + +type rangeEquals struct { + *CheckerInfo +} + +var RangeEquals Checker = &rangeEquals{ + &CheckerInfo{Name: "RangeEquals", Params: []string{"obtained", "expected"}}, +} + +func (checker *rangeEquals) Check(params []interface{}, names []string) (result bool, error string) { + obtained := params[0].([]Range) + expected := params[1].([]Range) + if len(obtained) != len(expected) { + return false, "" + } + for i := range obtained { + if !bytes.Equal(obtained[i].StartKey, expected[i].StartKey) || + !bytes.Equal(obtained[i].EndKey, expected[i].EndKey) { + return false, "" + } + } + return true, "" +} + +func (s *testRangeSuite) TestSortRange(c *C) { + dataRules := []*import_sstpb.RewriteRule{ + {OldKeyPrefix: tablecodec.GenTableRecordPrefix(1), NewKeyPrefix: tablecodec.GenTableRecordPrefix(4)}, + {OldKeyPrefix: tablecodec.GenTableRecordPrefix(2), NewKeyPrefix: tablecodec.GenTableRecordPrefix(5)}, + } + rewriteRules := &RewriteRules{ + Table: make([]*import_sstpb.RewriteRule, 0), + Data: dataRules, + } + ranges1 := []Range{ + {append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), + append(tablecodec.GenTableRecordPrefix(1), []byte("bbb")...)}, + } + rs1, err := sortRanges(ranges1, rewriteRules) + c.Assert(err, IsNil, Commentf("sort range1 failed: %v", err)) + c.Assert(rs1, RangeEquals, []Range{ + {append(tablecodec.GenTableRecordPrefix(4), []byte("aaa")...), + append(tablecodec.GenTableRecordPrefix(4), []byte("bbb")...)}, + }) + + ranges2 := []Range{ + {append(tablecodec.GenTableRecordPrefix(1), []byte("aaa")...), + append(tablecodec.GenTableRecordPrefix(2), []byte("bbb")...)}, + } + _, err = sortRanges(ranges2, rewriteRules) + c.Assert(err, ErrorMatches, ".*table id does not match.*") + + ranges3 := initRanges() + rewriteRules1 := initRewriteRules() + rs3, err := sortRanges(ranges3, rewriteRules1) + c.Assert(err, IsNil, Commentf("sort range1 failed: %v", err)) + c.Assert(rs3, RangeEquals, []Range{ + {[]byte("bbd"), []byte("bbf")}, + {[]byte("bbf"), []byte("bbj")}, + {[]byte("xxa"), []byte("xxe")}, + {[]byte("xxe"), []byte("xxz")}, + }) +} diff --git a/pkg/restore/split.go b/pkg/restore/split.go new file mode 100644 index 000000000..3248fdd0d --- /dev/null +++ b/pkg/restore/split.go @@ -0,0 +1,306 @@ +package restore + +import ( + "bytes" + "context" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/log" + "github.com/pingcap/tidb/util/codec" + "go.uber.org/zap" +) + +// Constants for split retry machinery. +const ( + SplitRetryTimes = 32 + SplitRetryInterval = 50 * time.Millisecond + SplitMaxRetryInterval = time.Second + + SplitCheckMaxRetryTimes = 64 + SplitCheckInterval = 8 * time.Millisecond + SplitMaxCheckInterval = time.Second + + ScatterWaitMaxRetryTimes = 64 + ScatterWaitInterval = 50 * time.Millisecond + ScatterMaxWaitInterval = time.Second + + ScatterWaitUpperInterval = 180 * time.Second +) + +// RegionSplitter is a executor of region split by rules. +type RegionSplitter struct { + client SplitClient +} + +// NewRegionSplitter returns a new RegionSplitter. +func NewRegionSplitter(client SplitClient) *RegionSplitter { + return &RegionSplitter{ + client: client, + } +} + +// OnSplitFunc is called before split a range. +type OnSplitFunc func(key [][]byte) + +// Split executes a region split. It will split regions by the rewrite rules, +// then it will split regions by the end key of each range. +// tableRules includes the prefix of a table, since some ranges may have +// a prefix with record sequence or index sequence. +// note: all ranges and rewrite rules must have raw key. +func (rs *RegionSplitter) Split( + ctx context.Context, + ranges []Range, + rewriteRules *RewriteRules, + onSplit OnSplitFunc, +) error { + if len(ranges) == 0 { + return nil + } + startTime := time.Now() + // Sort the range for getting the min and max key of the ranges + sortedRanges, err := sortRanges(ranges, rewriteRules) + if err != nil { + return errors.Trace(err) + } + minKey := codec.EncodeBytes([]byte{}, sortedRanges[0].StartKey) + maxKey := codec.EncodeBytes([]byte{}, sortedRanges[len(sortedRanges)-1].EndKey) + for _, rule := range rewriteRules.Table { + if bytes.Compare(minKey, rule.GetNewKeyPrefix()) > 0 { + minKey = rule.GetNewKeyPrefix() + } + if bytes.Compare(maxKey, rule.GetNewKeyPrefix()) < 0 { + maxKey = rule.GetNewKeyPrefix() + } + } + for _, rule := range rewriteRules.Data { + if bytes.Compare(minKey, rule.GetNewKeyPrefix()) > 0 { + minKey = rule.GetNewKeyPrefix() + } + if bytes.Compare(maxKey, rule.GetNewKeyPrefix()) < 0 { + maxKey = rule.GetNewKeyPrefix() + } + } + interval := SplitRetryInterval + scatterRegions := make([]*RegionInfo, 0) +SplitRegions: + for i := 0; i < SplitRetryTimes; i++ { + var regions []*RegionInfo + regions, err = rs.client.ScanRegions(ctx, minKey, maxKey, 0) + if err != nil { + return errors.Trace(err) + } + if len(regions) == 0 { + log.Warn("cannot scan any region") + return nil + } + splitKeyMap := getSplitKeys(rewriteRules, sortedRanges, regions) + regionMap := make(map[uint64]*RegionInfo) + for _, region := range regions { + regionMap[region.Region.GetId()] = region + } + for regionID, keys := range splitKeyMap { + var newRegions []*RegionInfo + newRegions, err = rs.splitAndScatterRegions(ctx, regionMap[regionID], keys) + if err != nil { + interval = 2 * interval + if interval > SplitMaxRetryInterval { + interval = SplitMaxRetryInterval + } + time.Sleep(interval) + if i > 3 { + log.Warn("splitting regions failed, retry it", zap.Error(err), zap.ByteStrings("keys", keys)) + } + continue SplitRegions + } + scatterRegions = append(scatterRegions, newRegions...) + onSplit(keys) + } + break + } + if err != nil { + return errors.Trace(err) + } + log.Info("splitting regions done, wait for scattering regions", + zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime))) + startTime = time.Now() + scatterCount := 0 + for _, region := range scatterRegions { + rs.waitForScatterRegion(ctx, region) + if time.Since(startTime) > ScatterWaitUpperInterval { + break + } + scatterCount++ + } + if scatterCount == len(scatterRegions) { + log.Info("waiting for scattering regions done", + zap.Int("regions", len(scatterRegions)), zap.Duration("take", time.Since(startTime))) + } else { + log.Warn("waiting for scattering regions timeout", + zap.Int("scatterCount", scatterCount), + zap.Int("regions", len(scatterRegions)), + zap.Duration("take", time.Since(startTime))) + } + return nil +} + +func (rs *RegionSplitter) hasRegion(ctx context.Context, regionID uint64) (bool, error) { + regionInfo, err := rs.client.GetRegionByID(ctx, regionID) + if err != nil { + return false, err + } + return regionInfo != nil, nil +} + +func (rs *RegionSplitter) isScatterRegionFinished(ctx context.Context, regionID uint64) (bool, error) { + resp, err := rs.client.GetOperator(ctx, regionID) + if err != nil { + return false, err + } + // Heartbeat may not be sent to PD + if respErr := resp.GetHeader().GetError(); respErr != nil { + if respErr.GetType() == pdpb.ErrorType_REGION_NOT_FOUND { + return true, nil + } + return false, errors.Errorf("get operator error: %s", respErr.GetType()) + } + retryTimes := ctx.Value(retryTimes).(int) + if retryTimes > 3 { + log.Warn("get operator", zap.Uint64("regionID", regionID), zap.Stringer("resp", resp)) + } + // If the current operator of the region is not 'scatter-region', we could assume + // that 'scatter-operator' has finished or timeout + ok := string(resp.GetDesc()) != "scatter-region" || resp.GetStatus() != pdpb.OperatorStatus_RUNNING + return ok, nil +} + +func (rs *RegionSplitter) waitForSplit(ctx context.Context, regionID uint64) { + interval := SplitCheckInterval + for i := 0; i < SplitCheckMaxRetryTimes; i++ { + ok, err := rs.hasRegion(ctx, regionID) + if err != nil { + log.Warn("wait for split failed", zap.Error(err)) + return + } + if ok { + break + } + interval = 2 * interval + if interval > SplitMaxCheckInterval { + interval = SplitMaxCheckInterval + } + time.Sleep(interval) + } +} + +type retryTimeKey struct{} + +var retryTimes = new(retryTimeKey) + +func (rs *RegionSplitter) waitForScatterRegion(ctx context.Context, regionInfo *RegionInfo) { + interval := ScatterWaitInterval + regionID := regionInfo.Region.GetId() + for i := 0; i < ScatterWaitMaxRetryTimes; i++ { + ctx1 := context.WithValue(ctx, retryTimes, i) + ok, err := rs.isScatterRegionFinished(ctx1, regionID) + if err != nil { + log.Warn("scatter region failed: do not have the region", + zap.Stringer("region", regionInfo.Region)) + return + } + if ok { + break + } + interval = 2 * interval + if interval > ScatterMaxWaitInterval { + interval = ScatterMaxWaitInterval + } + time.Sleep(interval) + } +} + +func (rs *RegionSplitter) splitAndScatterRegions( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) ([]*RegionInfo, error) { + newRegions, err := rs.client.BatchSplitRegions(ctx, regionInfo, keys) + if err != nil { + return nil, err + } + for _, region := range newRegions { + // Wait for a while until the regions successfully splits. + rs.waitForSplit(ctx, region.Region.Id) + if err = rs.client.ScatterRegion(ctx, region); err != nil { + log.Warn("scatter region failed", zap.Stringer("region", region.Region), zap.Error(err)) + } + } + return newRegions, nil +} + +// getSplitKeys checks if the regions should be split by the new prefix of the rewrites rule and the end key of +// the ranges, groups the split keys by region id +func getSplitKeys(rewriteRules *RewriteRules, ranges []Range, regions []*RegionInfo) map[uint64][][]byte { + splitKeyMap := make(map[uint64][][]byte) + checkKeys := make([][]byte, 0) + for _, rule := range rewriteRules.Table { + checkKeys = append(checkKeys, rule.GetNewKeyPrefix()) + } + for _, rule := range rewriteRules.Data { + checkKeys = append(checkKeys, rule.GetNewKeyPrefix()) + } + for _, rg := range ranges { + checkKeys = append(checkKeys, rg.EndKey) + } + for _, key := range checkKeys { + if region := needSplit(key, regions); region != nil { + splitKeys, ok := splitKeyMap[region.Region.GetId()] + if !ok { + splitKeys = make([][]byte, 0, 1) + } + splitKeyMap[region.Region.GetId()] = append(splitKeys, key) + log.Debug("get key for split region", zap.Binary("key", key), zap.Stringer("region", region.Region)) + } + } + return splitKeyMap +} + +// needSplit checks whether a key is necessary to split, if true returns the split region +func needSplit(splitKey []byte, regions []*RegionInfo) *RegionInfo { + // If splitKey is the max key. + if len(splitKey) == 0 { + return nil + } + splitKey = codec.EncodeBytes([]byte{}, splitKey) + for _, region := range regions { + // If splitKey is the boundary of the region + if bytes.Equal(splitKey, region.Region.GetStartKey()) { + return nil + } + // If splitKey is in a region + if bytes.Compare(splitKey, region.Region.GetStartKey()) > 0 && beforeEnd(splitKey, region.Region.GetEndKey()) { + return region + } + } + return nil +} + +func beforeEnd(key []byte, end []byte) bool { + return bytes.Compare(key, end) < 0 || len(end) == 0 +} + +func replacePrefix(s []byte, rewriteRules *RewriteRules) ([]byte, *import_sstpb.RewriteRule) { + // We should search the dataRules firstly. + for _, rule := range rewriteRules.Data { + if bytes.HasPrefix(s, rule.GetOldKeyPrefix()) { + return append(append([]byte{}, rule.GetNewKeyPrefix()...), s[len(rule.GetOldKeyPrefix()):]...), rule + } + } + for _, rule := range rewriteRules.Table { + if bytes.HasPrefix(s, rule.GetOldKeyPrefix()) { + return append(append([]byte{}, rule.GetNewKeyPrefix()...), s[len(rule.GetOldKeyPrefix()):]...), rule + } + } + + return s, nil +} diff --git a/pkg/restore/split_client.go b/pkg/restore/split_client.go new file mode 100644 index 000000000..8a618a191 --- /dev/null +++ b/pkg/restore/split_client.go @@ -0,0 +1,353 @@ +package restore + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "path" + "strconv" + "strings" + "sync" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/kvproto/pkg/tikvpb" + pd "github.com/pingcap/pd/client" + "github.com/pingcap/pd/server/schedule/placement" + "google.golang.org/grpc" +) + +// SplitClient is an external client used by RegionSplitter. +type SplitClient interface { + // GetStore gets a store by a store id. + GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) + // GetRegion gets a region which includes a specified key. + GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) + // GetRegionByID gets a region by a region id. + GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) + // SplitRegion splits a region from a key, if key is not included in the region, it will return nil. + // note: the key should not be encoded + SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) + // BatchSplitRegions splits a region from a batch of keys. + // note: the keys should not be encoded + BatchSplitRegions(ctx context.Context, regionInfo *RegionInfo, keys [][]byte) ([]*RegionInfo, error) + // ScatterRegion scatters a specified region. + ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error + // GetOperator gets the status of operator of the specified region. + GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) + // ScanRegion gets a list of regions, starts from the region that contains key. + // Limit limits the maximum number of regions returned. + ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) + // GetPlacementRule loads a placement rule from PD. + GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) + // SetPlacementRule insert or update a placement rule to PD. + SetPlacementRule(ctx context.Context, rule placement.Rule) error + // DeletePlacementRule removes a placement rule from PD. + DeletePlacementRule(ctx context.Context, groupID, ruleID string) error + // SetStoreLabel add or update specified label of stores. If labelValue + // is empty, it clears the label. + SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error +} + +// pdClient is a wrapper of pd client, can be used by RegionSplitter. +type pdClient struct { + mu sync.Mutex + client pd.Client + storeCache map[uint64]*metapb.Store +} + +// NewSplitClient returns a client used by RegionSplitter. +func NewSplitClient(client pd.Client) SplitClient { + return &pdClient{ + client: client, + storeCache: make(map[uint64]*metapb.Store), + } +} + +func (c *pdClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + c.mu.Lock() + defer c.mu.Unlock() + store, ok := c.storeCache[storeID] + if ok { + return store, nil + } + store, err := c.client.GetStore(ctx, storeID) + if err != nil { + return nil, err + } + c.storeCache[storeID] = store + return store, nil + +} + +func (c *pdClient) GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) { + region, leader, err := c.client.GetRegion(ctx, key) + if err != nil { + return nil, err + } + if region == nil { + return nil, nil + } + return &RegionInfo{ + Region: region, + Leader: leader, + }, nil +} + +func (c *pdClient) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) { + region, leader, err := c.client.GetRegionByID(ctx, regionID) + if err != nil { + return nil, err + } + if region == nil { + return nil, nil + } + return &RegionInfo{ + Region: region, + Leader: leader, + }, nil +} + +func (c *pdClient) SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) { + var peer *metapb.Peer + if regionInfo.Leader != nil { + peer = regionInfo.Leader + } else { + if len(regionInfo.Region.Peers) == 0 { + return nil, errors.New("region does not have peer") + } + peer = regionInfo.Region.Peers[0] + } + storeID := peer.GetStoreId() + store, err := c.GetStore(ctx, storeID) + if err != nil { + return nil, err + } + conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + if err != nil { + return nil, err + } + defer conn.Close() + + client := tikvpb.NewTikvClient(conn) + resp, err := client.SplitRegion(ctx, &kvrpcpb.SplitRegionRequest{ + Context: &kvrpcpb.Context{ + RegionId: regionInfo.Region.Id, + RegionEpoch: regionInfo.Region.RegionEpoch, + Peer: peer, + }, + SplitKey: key, + }) + if err != nil { + return nil, err + } + if resp.RegionError != nil { + return nil, errors.Errorf("split region failed: region=%v, key=%x, err=%v", regionInfo.Region, key, resp.RegionError) + } + + // BUG: Left is deprecated, it may be nil even if split is succeed! + // Assume the new region is the left one. + newRegion := resp.GetLeft() + if newRegion == nil { + regions := resp.GetRegions() + for _, r := range regions { + if bytes.Equal(r.GetStartKey(), regionInfo.Region.GetStartKey()) { + newRegion = r + break + } + } + } + if newRegion == nil { + return nil, errors.New("split region failed: new region is nil") + } + var leader *metapb.Peer + // Assume the leaders will be at the same store. + if regionInfo.Leader != nil { + for _, p := range newRegion.GetPeers() { + if p.GetStoreId() == regionInfo.Leader.GetStoreId() { + leader = p + break + } + } + } + return &RegionInfo{ + Region: newRegion, + Leader: leader, + }, nil +} + +func (c *pdClient) BatchSplitRegions( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) ([]*RegionInfo, error) { + var peer *metapb.Peer + if regionInfo.Leader != nil { + peer = regionInfo.Leader + } else { + if len(regionInfo.Region.Peers) == 0 { + return nil, errors.New("region does not have peer") + } + peer = regionInfo.Region.Peers[0] + } + + storeID := peer.GetStoreId() + store, err := c.GetStore(ctx, storeID) + if err != nil { + return nil, err + } + conn, err := grpc.Dial(store.GetAddress(), grpc.WithInsecure()) + if err != nil { + return nil, err + } + defer conn.Close() + client := tikvpb.NewTikvClient(conn) + resp, err := client.SplitRegion(ctx, &kvrpcpb.SplitRegionRequest{ + Context: &kvrpcpb.Context{ + RegionId: regionInfo.Region.Id, + RegionEpoch: regionInfo.Region.RegionEpoch, + Peer: peer, + }, + SplitKeys: keys, + }) + if err != nil { + return nil, err + } + if resp.RegionError != nil { + return nil, errors.Errorf("split region failed: region=%v, err=%v", regionInfo.Region, resp.RegionError) + } + + regions := resp.GetRegions() + newRegionInfos := make([]*RegionInfo, 0, len(regions)) + for _, region := range regions { + // Skip the original region + if region.GetId() == regionInfo.Region.GetId() { + continue + } + var leader *metapb.Peer + // Assume the leaders will be at the same store. + if regionInfo.Leader != nil { + for _, p := range region.GetPeers() { + if p.GetStoreId() == regionInfo.Leader.GetStoreId() { + leader = p + break + } + } + } + newRegionInfos = append(newRegionInfos, &RegionInfo{ + Region: region, + Leader: leader, + }) + } + return newRegionInfos, nil +} + +func (c *pdClient) ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error { + return c.client.ScatterRegion(ctx, regionInfo.Region.GetId()) +} + +func (c *pdClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) { + return c.client.GetOperator(ctx, regionID) +} + +func (c *pdClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { + regions, leaders, err := c.client.ScanRegions(ctx, key, endKey, limit) + if err != nil { + return nil, err + } + regionInfos := make([]*RegionInfo, 0, len(regions)) + for i := range regions { + regionInfos = append(regionInfos, &RegionInfo{ + Region: regions[i], + Leader: leaders[i], + }) + } + return regionInfos, nil +} + +func (c *pdClient) GetPlacementRule(ctx context.Context, groupID, ruleID string) (placement.Rule, error) { + var rule placement.Rule + addr := c.getPDAPIAddr() + if addr == "" { + return rule, errors.New("failed to add stores labels: no leader") + } + req, _ := http.NewRequestWithContext(ctx, "GET", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) + res, err := http.DefaultClient.Do(req) + if err != nil { + return rule, errors.WithStack(err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return rule, errors.WithStack(err) + } + res.Body.Close() + err = json.Unmarshal(b, &rule) + if err != nil { + return rule, errors.WithStack(err) + } + return rule, nil +} + +func (c *pdClient) SetPlacementRule(ctx context.Context, rule placement.Rule) error { + addr := c.getPDAPIAddr() + if addr == "" { + return errors.New("failed to add stores labels: no leader") + } + m, _ := json.Marshal(rule) + req, _ := http.NewRequestWithContext(ctx, "POST", addr+path.Join("/pd/api/v1/config/rule"), bytes.NewReader(m)) + res, err := http.DefaultClient.Do(req) + if err != nil { + return errors.WithStack(err) + } + return errors.Trace(res.Body.Close()) +} + +func (c *pdClient) DeletePlacementRule(ctx context.Context, groupID, ruleID string) error { + addr := c.getPDAPIAddr() + if addr == "" { + return errors.New("failed to add stores labels: no leader") + } + req, _ := http.NewRequestWithContext(ctx, "DELETE", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) + res, err := http.DefaultClient.Do(req) + if err != nil { + return errors.WithStack(err) + } + return errors.Trace(res.Body.Close()) +} + +func (c *pdClient) SetStoresLabel( + ctx context.Context, stores []uint64, labelKey, labelValue string, +) error { + b := []byte(fmt.Sprintf(`{"%s": "%s"}`, labelKey, labelValue)) + addr := c.getPDAPIAddr() + if addr == "" { + return errors.New("failed to add stores labels: no leader") + } + for _, id := range stores { + req, _ := http.NewRequestWithContext( + ctx, "POST", + addr+path.Join("/pd/api/v1/store", strconv.FormatUint(id, 10), "label"), + bytes.NewReader(b), + ) + res, err := http.DefaultClient.Do(req) + if err != nil { + return errors.WithStack(err) + } + err = res.Body.Close() + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +func (c *pdClient) getPDAPIAddr() string { + addr := c.client.GetLeaderAddr() + if addr != "" && !strings.HasPrefix(addr, "http") { + addr = "http://" + addr + } + return strings.TrimRight(addr, "/") +} diff --git a/pkg/restore/split_test.go b/pkg/restore/split_test.go new file mode 100644 index 000000000..509c4cfa0 --- /dev/null +++ b/pkg/restore/split_test.go @@ -0,0 +1,301 @@ +package restore + +import ( + "bytes" + "context" + "sync" + + . "github.com/pingcap/check" + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/import_sstpb" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/pd/server/schedule/placement" + "github.com/pingcap/tidb/util/codec" +) + +type testClient struct { + mu sync.RWMutex + stores map[uint64]*metapb.Store + regions map[uint64]*RegionInfo + nextRegionID uint64 +} + +func newTestClient(stores map[uint64]*metapb.Store, regions map[uint64]*RegionInfo, nextRegionID uint64) *testClient { + return &testClient{ + stores: stores, + regions: regions, + nextRegionID: nextRegionID, + } +} + +func (c *testClient) GetAllRegions() map[uint64]*RegionInfo { + c.mu.RLock() + defer c.mu.RUnlock() + return c.regions +} + +func (c *testClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { + c.mu.RLock() + defer c.mu.RUnlock() + store, ok := c.stores[storeID] + if !ok { + return nil, errors.Errorf("store not found") + } + return store, nil +} + +func (c *testClient) GetRegion(ctx context.Context, key []byte) (*RegionInfo, error) { + c.mu.RLock() + defer c.mu.RUnlock() + for _, region := range c.regions { + if bytes.Compare(key, region.Region.StartKey) >= 0 && + (len(region.Region.EndKey) == 0 || bytes.Compare(key, region.Region.EndKey) < 0) { + return region, nil + } + } + return nil, errors.Errorf("region not found: key=%s", string(key)) +} + +func (c *testClient) GetRegionByID(ctx context.Context, regionID uint64) (*RegionInfo, error) { + c.mu.RLock() + defer c.mu.RUnlock() + region, ok := c.regions[regionID] + if !ok { + return nil, errors.Errorf("region not found: id=%d", regionID) + } + return region, nil +} + +func (c *testClient) SplitRegion(ctx context.Context, regionInfo *RegionInfo, key []byte) (*RegionInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + var target *RegionInfo + splitKey := codec.EncodeBytes([]byte{}, key) + for _, region := range c.regions { + if bytes.Compare(splitKey, region.Region.StartKey) >= 0 && + (len(region.Region.EndKey) == 0 || bytes.Compare(splitKey, region.Region.EndKey) < 0) { + target = region + } + } + if target == nil { + return nil, errors.Errorf("region not found: key=%s", string(key)) + } + newRegion := &RegionInfo{ + Region: &metapb.Region{ + Peers: target.Region.Peers, + Id: c.nextRegionID, + StartKey: target.Region.StartKey, + EndKey: splitKey, + }, + } + c.regions[c.nextRegionID] = newRegion + c.nextRegionID++ + target.Region.StartKey = splitKey + c.regions[target.Region.Id] = target + return newRegion, nil +} + +func (c *testClient) BatchSplitRegions( + ctx context.Context, regionInfo *RegionInfo, keys [][]byte, +) ([]*RegionInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + newRegions := make([]*RegionInfo, 0) + for _, key := range keys { + var target *RegionInfo + splitKey := codec.EncodeBytes([]byte{}, key) + for _, region := range c.regions { + if bytes.Compare(splitKey, region.Region.GetStartKey()) > 0 && + beforeEnd(splitKey, region.Region.GetEndKey()) { + target = region + } + } + if target == nil { + continue + } + newRegion := &RegionInfo{ + Region: &metapb.Region{ + Peers: target.Region.Peers, + Id: c.nextRegionID, + StartKey: target.Region.StartKey, + EndKey: splitKey, + }, + } + c.regions[c.nextRegionID] = newRegion + c.nextRegionID++ + target.Region.StartKey = splitKey + c.regions[target.Region.Id] = target + newRegions = append(newRegions, newRegion) + } + return newRegions, nil +} + +func (c *testClient) ScatterRegion(ctx context.Context, regionInfo *RegionInfo) error { + return nil +} + +func (c *testClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) { + return &pdpb.GetOperatorResponse{ + Header: new(pdpb.ResponseHeader), + }, nil +} + +func (c *testClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*RegionInfo, error) { + regions := make([]*RegionInfo, 0) + for _, region := range c.regions { + if limit > 0 && len(regions) >= limit { + break + } + if (len(region.Region.GetEndKey()) != 0 && bytes.Compare(region.Region.GetEndKey(), key) <= 0) || + bytes.Compare(region.Region.GetStartKey(), endKey) > 0 { + continue + } + regions = append(regions, region) + } + return regions, nil +} + +func (c *testClient) GetPlacementRule(ctx context.Context, groupID, ruleID string) (r placement.Rule, err error) { + return +} + +func (c *testClient) SetPlacementRule(ctx context.Context, rule placement.Rule) error { + return nil +} + +func (c *testClient) DeletePlacementRule(ctx context.Context, groupID, ruleID string) error { + return nil +} + +func (c *testClient) SetStoresLabel(ctx context.Context, stores []uint64, labelKey, labelValue string) error { + return nil +} + +// region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) +// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) +// rewrite rules: aa -> xx, cc -> bb +// expected regions after split: +// [, aay), [aay, bb), [bb, bba), [bba, bbf), [bbf, bbh), [bbh, bbj), +// [bbj, cca), [cca, xx), [xx, xxe), [xxe, xxz), [xxz, ) +func (s *testRestoreUtilSuite) TestSplit(c *C) { + client := initTestClient() + ranges := initRanges() + rewriteRules := initRewriteRules() + regionSplitter := NewRegionSplitter(client) + + ctx := context.Background() + err := regionSplitter.Split(ctx, ranges, rewriteRules, func(key [][]byte) {}) + if err != nil { + c.Assert(err, IsNil, Commentf("split regions failed: %v", err)) + } + regions := client.GetAllRegions() + if !validateRegions(regions) { + for _, region := range regions { + c.Logf("region: %v\n", region.Region) + } + c.Log("get wrong result") + c.Fail() + } +} + +// region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) +func initTestClient() *testClient { + peers := make([]*metapb.Peer, 1) + peers[0] = &metapb.Peer{ + Id: 1, + StoreId: 1, + } + keys := [6]string{"", "aay", "bba", "bbh", "cca", ""} + regions := make(map[uint64]*RegionInfo) + for i := uint64(1); i < 6; i++ { + startKey := []byte(keys[i-1]) + if len(startKey) != 0 { + startKey = codec.EncodeBytes([]byte{}, startKey) + } + endKey := []byte(keys[i]) + if len(endKey) != 0 { + endKey = codec.EncodeBytes([]byte{}, endKey) + } + regions[i] = &RegionInfo{ + Region: &metapb.Region{ + Id: i, + Peers: peers, + StartKey: startKey, + EndKey: endKey, + }, + } + } + stores := make(map[uint64]*metapb.Store) + stores[1] = &metapb.Store{ + Id: 1, + } + return newTestClient(stores, regions, 6) +} + +// range: [aaa, aae), [aae, aaz), [ccd, ccf), [ccf, ccj) +func initRanges() []Range { + var ranges [4]Range + ranges[0] = Range{ + StartKey: []byte("aaa"), + EndKey: []byte("aae"), + } + ranges[1] = Range{ + StartKey: []byte("aae"), + EndKey: []byte("aaz"), + } + ranges[2] = Range{ + StartKey: []byte("ccd"), + EndKey: []byte("ccf"), + } + ranges[3] = Range{ + StartKey: []byte("ccf"), + EndKey: []byte("ccj"), + } + return ranges[:] +} + +func initRewriteRules() *RewriteRules { + var rules [2]*import_sstpb.RewriteRule + rules[0] = &import_sstpb.RewriteRule{ + OldKeyPrefix: []byte("aa"), + NewKeyPrefix: []byte("xx"), + } + rules[1] = &import_sstpb.RewriteRule{ + OldKeyPrefix: []byte("cc"), + NewKeyPrefix: []byte("bb"), + } + return &RewriteRules{ + Table: rules[:], + Data: rules[:], + } +} + +// expected regions after split: +// [, aay), [aay, bb), [bb, bba), [bba, bbf), [bbf, bbh), [bbh, bbj), +// [bbj, cca), [cca, xx), [xx, xxe), [xxe, xxz), [xxz, ) +func validateRegions(regions map[uint64]*RegionInfo) bool { + keys := [12]string{"", "aay", "bb", "bba", "bbf", "bbh", "bbj", "cca", "xx", "xxe", "xxz", ""} + if len(regions) != 11 { + return false + } +FindRegion: + for i := 1; i < 12; i++ { + for _, region := range regions { + startKey := []byte(keys[i-1]) + if len(startKey) != 0 { + startKey = codec.EncodeBytes([]byte{}, startKey) + } + endKey := []byte(keys[i]) + if len(endKey) != 0 { + endKey = codec.EncodeBytes([]byte{}, endKey) + } + if bytes.Equal(region.Region.GetStartKey(), startKey) && + bytes.Equal(region.Region.GetEndKey(), endKey) { + continue FindRegion + } + } + return false + } + return true +} diff --git a/pkg/restore/util.go b/pkg/restore/util.go index 126e864fd..63ee92969 100644 --- a/pkg/restore/util.go +++ b/pkg/restore/util.go @@ -13,35 +13,16 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/log" "github.com/pingcap/parser/model" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" + "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "go.uber.org/zap" - "go.uber.org/zap/zapcore" "github.com/pingcap/br/pkg/summary" ) var recordPrefixSep = []byte("_r") -type files []*backup.File - -func (fs files) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range fs { - arr.AppendString(fs[i].String()) - } - return nil -} - -type rules []*import_sstpb.RewriteRule - -func (rs rules) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range rs { - arr.AppendString(rs[i].String()) - } - return nil -} - // idAllocator always returns a specified ID type idAllocator struct { id int64 @@ -51,7 +32,7 @@ func newIDAllocator(id int64) *idAllocator { return &idAllocator{id: id} } -func (alloc *idAllocator) Alloc(tableID int64, n uint64) (min int64, max int64, err error) { +func (alloc *idAllocator) Alloc(tableID int64, n uint64, increment, offset int64) (min int64, max int64, err error) { return alloc.id, alloc.id, nil } @@ -71,12 +52,16 @@ func (alloc *idAllocator) NextGlobalAutoID(tableID int64) (int64, error) { return alloc.id, nil } +func (alloc *idAllocator) GetType() autoid.AllocatorType { + return autoid.RowIDAllocType +} + // GetRewriteRules returns the rewrite rule of the new table and the old table. func GetRewriteRules( newTable *model.TableInfo, oldTable *model.TableInfo, newTimeStamp uint64, -) *restore_util.RewriteRules { +) *RewriteRules { tableIDs := make(map[int64]int64) tableIDs[oldTable.ID] = newTable.ID if oldTable.Partition != nil { @@ -119,7 +104,7 @@ func GetRewriteRules( } } - return &restore_util.RewriteRules{ + return &RewriteRules{ Table: tableRules, Data: dataRules, } @@ -159,46 +144,17 @@ func getSSTMetaFromFile( Start: rangeStart, End: rangeEnd, }, + RegionId: region.GetId(), + RegionEpoch: region.GetRegionEpoch(), } } -type retryableFunc func() error -type continueFunc func(error) bool - -func withRetry( - retryableFunc retryableFunc, - continueFunc continueFunc, - attempts uint, - delayTime time.Duration, - maxDelayTime time.Duration, -) error { - var lastErr error - for i := uint(0); i < attempts; i++ { - err := retryableFunc() - if err != nil { - lastErr = err - // If this is the last attempt, do not wait - if !continueFunc(err) || i == attempts-1 { - break - } - delayTime = 2 * delayTime - if delayTime > maxDelayTime { - delayTime = maxDelayTime - } - time.Sleep(delayTime) - } else { - return nil - } - } - return lastErr -} - // ValidateFileRanges checks and returns the ranges of the files. func ValidateFileRanges( files []*backup.File, - rewriteRules *restore_util.RewriteRules, -) ([]restore_util.Range, error) { - ranges := make([]restore_util.Range, 0, len(files)) + rewriteRules *RewriteRules, +) ([]Range, error) { + ranges := make([]Range, 0, len(files)) fileAppended := make(map[string]bool) for _, file := range files { @@ -217,7 +173,7 @@ func ValidateFileRanges( zap.Stringer("file", file)) return nil, errors.New("table ids dont match") } - ranges = append(ranges, restore_util.Range{ + ranges = append(ranges, Range{ StartKey: file.GetStartKey(), EndKey: file.GetEndKey(), }) @@ -228,7 +184,7 @@ func ValidateFileRanges( } // ValidateFileRewriteRule uses rewrite rules to validate the ranges of a file -func ValidateFileRewriteRule(file *backup.File, rewriteRules *restore_util.RewriteRules) error { +func ValidateFileRewriteRule(file *backup.File, rewriteRules *RewriteRules) error { // Check if the start key has a matched rewrite key _, startRule := rewriteRawKey(file.GetStartKey(), rewriteRules) if rewriteRules != nil && startRule == nil { @@ -269,7 +225,7 @@ func ValidateFileRewriteRule(file *backup.File, rewriteRules *restore_util.Rewri } // Rewrites a raw key and returns a encoded key -func rewriteRawKey(key []byte, rewriteRules *restore_util.RewriteRules) ([]byte, *import_sstpb.RewriteRule) { +func rewriteRawKey(key []byte, rewriteRules *RewriteRules) ([]byte, *import_sstpb.RewriteRule) { if rewriteRules == nil { return codec.EncodeBytes([]byte{}, key), nil } @@ -281,7 +237,7 @@ func rewriteRawKey(key []byte, rewriteRules *restore_util.RewriteRules) ([]byte, return nil, nil } -func matchOldPrefix(key []byte, rewriteRules *restore_util.RewriteRules) *import_sstpb.RewriteRule { +func matchOldPrefix(key []byte, rewriteRules *RewriteRules) *import_sstpb.RewriteRule { for _, rule := range rewriteRules.Data { if bytes.HasPrefix(key, rule.GetOldKeyPrefix()) { return rule @@ -295,7 +251,7 @@ func matchOldPrefix(key []byte, rewriteRules *restore_util.RewriteRules) *import return nil } -func matchNewPrefix(key []byte, rewriteRules *restore_util.RewriteRules) *import_sstpb.RewriteRule { +func matchNewPrefix(key []byte, rewriteRules *RewriteRules) *import_sstpb.RewriteRule { for _, rule := range rewriteRules.Data { if bytes.HasPrefix(key, rule.GetNewKeyPrefix()) { return rule @@ -319,8 +275,8 @@ func truncateTS(key []byte) []byte { func SplitRanges( ctx context.Context, client *Client, - ranges []restore_util.Range, - rewriteRules *restore_util.RewriteRules, + ranges []Range, + rewriteRules *RewriteRules, updateCh chan<- struct{}, ) error { start := time.Now() @@ -328,7 +284,7 @@ func SplitRanges( elapsed := time.Since(start) summary.CollectDuration("split region", elapsed) }() - splitter := restore_util.NewRegionSplitter(restore_util.NewClient(client.GetPDClient())) + splitter := NewRegionSplitter(NewSplitClient(client.GetPDClient())) return splitter.Split(ctx, ranges, rewriteRules, func(keys [][]byte) { for range keys { updateCh <- struct{}{} @@ -336,7 +292,7 @@ func SplitRanges( }) } -func rewriteFileKeys(file *backup.File, rewriteRules *restore_util.RewriteRules) (startKey, endKey []byte, err error) { +func rewriteFileKeys(file *backup.File, rewriteRules *RewriteRules) (startKey, endKey []byte, err error) { startID := tablecodec.DecodeTableID(file.GetStartKey()) endID := tablecodec.DecodeTableID(file.GetEndKey()) var rule *import_sstpb.RewriteRule diff --git a/pkg/restore/util_test.go b/pkg/restore/util_test.go index 5da5c9ab7..bc4da9168 100644 --- a/pkg/restore/util_test.go +++ b/pkg/restore/util_test.go @@ -5,7 +5,6 @@ import ( "github.com/pingcap/kvproto/pkg/backup" "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" - restore_util "github.com/pingcap/tidb-tools/pkg/restore-util" "github.com/pingcap/tidb/tablecodec" ) @@ -34,7 +33,7 @@ func (s *testRestoreUtilSuite) TestGetSSTMetaFromFile(c *C) { } func (s *testRestoreUtilSuite) TestValidateFileRanges(c *C) { - rules := &restore_util.RewriteRules{ + rules := &RewriteRules{ Table: []*import_sstpb.RewriteRule{&import_sstpb.RewriteRule{ OldKeyPrefix: []byte(tablecodec.EncodeTablePrefix(1)), NewKeyPrefix: []byte(tablecodec.EncodeTablePrefix(2)), diff --git a/pkg/storage/flags.go b/pkg/storage/flags.go index 51fd98af1..2340467ba 100644 --- a/pkg/storage/flags.go +++ b/pkg/storage/flags.go @@ -1,55 +1,19 @@ package storage import ( - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/backup" "github.com/spf13/pflag" ) -const ( - // flagSendCredentialOption specify whether to send credentials to tikv - flagSendCredentialOption = "send-credentials-to-tikv" -) - -var ( - sendCredential bool -) - // DefineFlags adds flags to the flag set corresponding to all backend options. func DefineFlags(flags *pflag.FlagSet) { - flags.BoolP(flagSendCredentialOption, "c", true, - "Whether send credentials to tikv") defineS3Flags(flags) defineGCSFlags(flags) } -// GetBackendOptionsFromFlags obtains the backend options from the flag set. -func GetBackendOptionsFromFlags(flags *pflag.FlagSet) (options BackendOptions, err error) { - sendCredential, err = flags.GetBool(flagSendCredentialOption) - if err != nil { - err = errors.Trace(err) - return - } - - if options.S3, err = getBackendOptionsFromS3Flags(flags); err != nil { - return - } - if options.GCS, err = getBackendOptionsFromGCSFlags(flags); err != nil { - return - } - return -} - -// ParseBackendFromFlags is a convenient function to consecutively call -// GetBackendOptionsFromFlags and ParseBackend. -func ParseBackendFromFlags(flags *pflag.FlagSet, storageFlag string) (*backup.StorageBackend, error) { - u, err := flags.GetString(storageFlag) - if err != nil { - return nil, errors.Trace(err) - } - opts, err := GetBackendOptionsFromFlags(flags) - if err != nil { - return nil, err +// ParseFromFlags obtains the backend options from the flag set. +func (options *BackendOptions) ParseFromFlags(flags *pflag.FlagSet) error { + if err := options.S3.parseFromFlags(flags); err != nil { + return err } - return ParseBackend(u, &opts) + return options.GCS.parseFromFlags(flags) } diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index a0df5b03e..2eb310c3a 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -70,31 +70,28 @@ https://console.cloud.google.com/apis/credentials.`) _ = flags.MarkHidden(gcsCredentialsFile) } -func getBackendOptionsFromGCSFlags(flags *pflag.FlagSet) (options GCSBackendOptions, err error) { +func (options *GCSBackendOptions) parseFromFlags(flags *pflag.FlagSet) error { + var err error options.Endpoint, err = flags.GetString(gcsEndpointOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.StorageClass, err = flags.GetString(gcsStorageClassOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.PredefinedACL, err = flags.GetString(gcsPredefinedACL) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.CredentialsFile, err = flags.GetString(gcsCredentialsFile) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } - return + return nil } type gcsStorage struct { @@ -142,11 +139,16 @@ func (s *gcsStorage) FileExists(ctx context.Context, name string) (bool, error) return true, nil } -func newGCSStorage(ctx context.Context, gcs *backup.GCS) (*gcsStorage, error) { - return newGCSStorageWithHTTPClient(ctx, gcs, nil) +func newGCSStorage(ctx context.Context, gcs *backup.GCS, sendCredential bool) (*gcsStorage, error) { + return newGCSStorageWithHTTPClient(ctx, gcs, nil, sendCredential) } -func newGCSStorageWithHTTPClient(ctx context.Context, gcs *backup.GCS, hclient *http.Client) (*gcsStorage, error) { +func newGCSStorageWithHTTPClient( // revive:disable-line:flag-parameter + ctx context.Context, + gcs *backup.GCS, + hclient *http.Client, + sendCredential bool, +) (*gcsStorage, error) { var clientOps []option.ClientOption if gcs.CredentialsBlob == "" { creds, err := google.FindDefaultCredentials(ctx, storage.ScopeReadWrite) diff --git a/pkg/storage/gcs_test.go b/pkg/storage/gcs_test.go index da990cfe7..10bb44371 100644 --- a/pkg/storage/gcs_test.go +++ b/pkg/storage/gcs_test.go @@ -28,7 +28,7 @@ func (r *testStorageSuite) TestGCS(c *C) { PredefinedAcl: "private", CredentialsBlob: "Fake Credentials", } - stg, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + stg, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), false) c.Assert(err, IsNil) err = stg.Write(ctx, "key", []byte("data")) @@ -66,7 +66,6 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { server.CreateBucket(bucketName) { - sendCredential = true gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -74,13 +73,12 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "FakeCredentials", } - _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), true) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, "FakeCredentials") } { - sendCredential = false gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -88,7 +86,7 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "FakeCredentials", } - _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err := newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), false) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, "") } @@ -106,7 +104,6 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { defer os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") c.Assert(err, IsNil) - sendCredential = true gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -114,7 +111,7 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "", } - _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), true) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, `{"type": "service_account"}`) } @@ -132,7 +129,6 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { defer os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") c.Assert(err, IsNil) - sendCredential = false gcs := &backup.GCS{ Bucket: bucketName, Prefix: "a/b/", @@ -140,13 +136,12 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "", } - _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), false) c.Assert(err, IsNil) c.Assert(gcs.CredentialsBlob, Equals, "") } { - sendCredential = true os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") gcs := &backup.GCS{ Bucket: bucketName, @@ -155,7 +150,7 @@ func (r *testStorageSuite) TestNewGCSStorage(c *C) { PredefinedAcl: "private", CredentialsBlob: "", } - _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient()) + _, err = newGCSStorageWithHTTPClient(ctx, gcs, server.HTTPClient(), true) c.Assert(err, NotNil) } } diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 5db54556c..8e04769b5 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -117,44 +117,41 @@ func defineS3Flags(flags *pflag.FlagSet) { _ = flags.MarkHidden(s3ProviderOption) } -func getBackendOptionsFromS3Flags(flags *pflag.FlagSet) (options S3BackendOptions, err error) { +func (options *S3BackendOptions) parseFromFlags(flags *pflag.FlagSet) error { + var err error options.Endpoint, err = flags.GetString(s3EndpointOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.Region, err = flags.GetString(s3RegionOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.SSE, err = flags.GetString(s3SSEOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.ACL, err = flags.GetString(s3ACLOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.StorageClass, err = flags.GetString(s3StorageClassOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } options.ForcePathStyle = true options.Provider, err = flags.GetString(s3ProviderOption) if err != nil { - err = errors.Trace(err) - return + return errors.Trace(err) } - - return options, err + return nil } // newS3Storage initialize a new s3 storage for metadata -func newS3Storage(backend *backup.S3) (*S3Storage, error) { +func newS3Storage( // revive:disable-line:flag-parameter + backend *backup.S3, + sendCredential bool, +) (*S3Storage, error) { qs := *backend awsConfig := aws.NewConfig(). WithMaxRetries(maxRetries). diff --git a/pkg/storage/s3_test.go b/pkg/storage/s3_test.go index 92a5a8737..3eaf1c206 100644 --- a/pkg/storage/s3_test.go +++ b/pkg/storage/s3_test.go @@ -236,7 +236,7 @@ func (r *testStorageSuite) TestS3Storage(c *C) { testFn := func(test *testcase, c *C) { c.Log(test.name) ctx := aws.BackgroundContext() - sendCredential = test.sendCredential + sendCredential := test.sendCredential if test.hackCheck { checkS3Bucket = func(svc *s3.S3, bucket string) error { return nil } } @@ -245,7 +245,7 @@ func (r *testStorageSuite) TestS3Storage(c *C) { S3: test.s3, }, } - _, err := Create(ctx, s3) + _, err := Create(ctx, s3, sendCredential) if test.errReturn { c.Assert(err, NotNil) return diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 173638bdd..f9ae368ae 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -18,7 +18,7 @@ type ExternalStorage interface { } // Create creates ExternalStorage -func Create(ctx context.Context, backend *backup.StorageBackend) (ExternalStorage, error) { +func Create(ctx context.Context, backend *backup.StorageBackend, sendCreds bool) (ExternalStorage, error) { switch backend := backend.Backend.(type) { case *backup.StorageBackend_Local: return newLocalStorage(backend.Local.Path) @@ -26,14 +26,14 @@ func Create(ctx context.Context, backend *backup.StorageBackend) (ExternalStorag if backend.S3 == nil { return nil, errors.New("s3 config not found") } - return newS3Storage(backend.S3) + return newS3Storage(backend.S3, sendCreds) case *backup.StorageBackend_Noop: return newNoopStorage(), nil case *backup.StorageBackend_Gcs: if backend.Gcs == nil { return nil, errors.New("GCS config not found") } - return newGCSStorage(ctx, backend.Gcs) + return newGCSStorage(ctx, backend.Gcs, sendCreds) default: return nil, errors.Errorf("storage %T is not supported yet", backend) } diff --git a/pkg/task/backup.go b/pkg/task/backup.go new file mode 100644 index 000000000..b9613cd56 --- /dev/null +++ b/pkg/task/backup.go @@ -0,0 +1,157 @@ +package task + +import ( + "context" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/backup" + "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +const ( + flagBackupTimeago = "timeago" + flagLastBackupTS = "lastbackupts" +) + +// BackupConfig is the configuration specific for backup tasks. +type BackupConfig struct { + Config + + TimeAgo time.Duration `json:"time-ago" toml:"time-ago"` + LastBackupTS uint64 `json:"last-backup-ts" toml:"last-backup-ts"` +} + +// DefineBackupFlags defines common flags for the backup command. +func DefineBackupFlags(flags *pflag.FlagSet) { + flags.Duration( + flagBackupTimeago, 0, + "The history version of the backup task, e.g. 1m, 1h. Do not exceed GCSafePoint") + + flags.Uint64(flagLastBackupTS, 0, "the last time backup ts") + _ = flags.MarkHidden(flagLastBackupTS) +} + +// ParseFromFlags parses the backup-related flags from the flag set. +func (cfg *BackupConfig) ParseFromFlags(flags *pflag.FlagSet) error { + timeAgo, err := flags.GetDuration(flagBackupTimeago) + if err != nil { + return errors.Trace(err) + } + if timeAgo < 0 { + return errors.New("negative timeago is not allowed") + } + cfg.TimeAgo = timeAgo + cfg.LastBackupTS, err = flags.GetUint64(flagLastBackupTS) + if err != nil { + return errors.Trace(err) + } + if err = cfg.Config.ParseFromFlags(flags); err != nil { + return errors.Trace(err) + } + return nil +} + +// RunBackup starts a backup task inside the current goroutine. +func RunBackup(c context.Context, cmdName string, cfg *BackupConfig) error { + ctx, cancel := context.WithCancel(c) + defer cancel() + + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return err + } + tableFilter, err := filter.New(cfg.CaseSensitive, &cfg.Filter) + if err != nil { + return err + } + mgr, err := newMgr(ctx, cfg.PD) + if err != nil { + return err + } + defer mgr.Close() + + client, err := backup.NewBackupClient(ctx, mgr) + if err != nil { + return err + } + if err = client.SetStorage(ctx, u, cfg.SendCreds); err != nil { + return err + } + + backupTS, err := client.GetTS(ctx, cfg.TimeAgo) + if err != nil { + return err + } + + defer summary.Summary(cmdName) + + ranges, backupSchemas, err := backup.BuildBackupRangeAndSchema( + mgr.GetDomain(), mgr.GetTiKV(), tableFilter, backupTS) + if err != nil { + return err + } + + // The number of regions need to backup + approximateRegions := 0 + for _, r := range ranges { + var regionCount int + regionCount, err = mgr.GetRegionCount(ctx, r.StartKey, r.EndKey) + if err != nil { + return err + } + approximateRegions += regionCount + } + + summary.CollectInt("backup total regions", approximateRegions) + + // Backup + // Redirect to log if there is no log file to avoid unreadable output. + updateCh := utils.StartProgress( + ctx, cmdName, int64(approximateRegions), !cfg.LogProgress) + err = client.BackupRanges( + ctx, ranges, cfg.LastBackupTS, backupTS, cfg.RateLimit, cfg.Concurrency, updateCh) + if err != nil { + return err + } + // Backup has finished + close(updateCh) + + // Checksum + backupSchemasConcurrency := backup.DefaultSchemaConcurrency + if backupSchemas.Len() < backupSchemasConcurrency { + backupSchemasConcurrency = backupSchemas.Len() + } + updateCh = utils.StartProgress( + ctx, "Checksum", int64(backupSchemas.Len()), !cfg.LogProgress) + backupSchemas.SetSkipChecksum(!cfg.Checksum) + backupSchemas.Start( + ctx, mgr.GetTiKV(), backupTS, uint(backupSchemasConcurrency), updateCh) + + err = client.CompleteMeta(backupSchemas) + if err != nil { + return err + } + + valid, err := client.FastChecksum() + if err != nil { + return err + } + if !valid { + log.Error("backup FastChecksum mismatch!") + } + // Checksum has finished + close(updateCh) + + err = client.SaveBackupMeta(ctx) + if err != nil { + return err + } + return nil +} diff --git a/pkg/task/common.go b/pkg/task/common.go new file mode 100644 index 000000000..2433d94b9 --- /dev/null +++ b/pkg/task/common.go @@ -0,0 +1,236 @@ +package task + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/backup" + "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/pingcap/tidb/store/tikv" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/storage" + "github.com/pingcap/br/pkg/utils" +) + +const ( + // flagSendCreds specify whether to send credentials to tikv + flagSendCreds = "send-credentials-to-tikv" + // flagStorage is the name of storage flag. + flagStorage = "storage" + // flagPD is the name of PD url flag. + flagPD = "pd" + // flagCA is the name of TLS CA flag. + flagCA = "ca" + // flagCert is the name of TLS cert flag. + flagCert = "cert" + // flagKey is the name of TLS key flag. + flagKey = "key" + + flagDatabase = "db" + flagTable = "table" + + flagRateLimit = "ratelimit" + flagRateLimitUnit = "ratelimit-unit" + flagConcurrency = "concurrency" + flagChecksum = "checksum" +) + +// TLSConfig is the common configuration for TLS connection. +type TLSConfig struct { + CA string `json:"ca" toml:"ca"` + Cert string `json:"cert" toml:"cert"` + Key string `json:"key" toml:"key"` +} + +// Config is the common configuration for all BRIE tasks. +type Config struct { + storage.BackendOptions + + Storage string `json:"storage" toml:"storage"` + PD []string `json:"pd" toml:"pd"` + TLS TLSConfig `json:"tls" toml:"tls"` + RateLimit uint64 `json:"rate-limit" toml:"rate-limit"` + Concurrency uint32 `json:"concurrency" toml:"concurrency"` + Checksum bool `json:"checksum" toml:"checksum"` + SendCreds bool `json:"send-credentials-to-tikv" toml:"send-credentials-to-tikv"` + // LogProgress is true means the progress bar is printed to the log instead of stdout. + LogProgress bool `json:"log-progress" toml:"log-progress"` + + CaseSensitive bool `json:"case-sensitive" toml:"case-sensitive"` + Filter filter.Rules `json:"black-white-list" toml:"black-white-list"` +} + +// DefineCommonFlags defines the flags common to all BRIE commands. +func DefineCommonFlags(flags *pflag.FlagSet) { + flags.BoolP(flagSendCreds, "c", true, "Whether send credentials to tikv") + flags.StringP(flagStorage, "s", "", `specify the url where backup storage, eg, "local:///path/to/save"`) + flags.StringSliceP(flagPD, "u", []string{"127.0.0.1:2379"}, "PD address") + flags.String(flagCA, "", "CA certificate path for TLS connection") + flags.String(flagCert, "", "Certificate path for TLS connection") + flags.String(flagKey, "", "Private key path for TLS connection") + + flags.Uint64(flagRateLimit, 0, "The rate limit of the task, MB/s per node") + flags.Uint32(flagConcurrency, 4, "The size of thread pool on each node that executes the task") + flags.Bool(flagChecksum, true, "Run checksum at end of task") + + flags.Uint64(flagRateLimitUnit, utils.MB, "The unit of rate limit") + _ = flags.MarkHidden(flagRateLimitUnit) + + storage.DefineFlags(flags) +} + +// DefineDatabaseFlags defines the required --db flag. +func DefineDatabaseFlags(command *cobra.Command) { + command.Flags().String(flagDatabase, "", "database name") + _ = command.MarkFlagRequired(flagDatabase) +} + +// DefineTableFlags defines the required --db and --table flags. +func DefineTableFlags(command *cobra.Command) { + DefineDatabaseFlags(command) + command.Flags().StringP(flagTable, "t", "", "table name") + _ = command.MarkFlagRequired(flagTable) +} + +// ParseFromFlags parses the TLS config from the flag set. +func (tls *TLSConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + tls.CA, err = flags.GetString(flagCA) + if err != nil { + return errors.Trace(err) + } + tls.Cert, err = flags.GetString(flagCert) + if err != nil { + return errors.Trace(err) + } + tls.Key, err = flags.GetString(flagKey) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// ParseFromFlags parses the config from the flag set. +func (cfg *Config) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + cfg.Storage, err = flags.GetString(flagStorage) + if err != nil { + return errors.Trace(err) + } + cfg.SendCreds, err = flags.GetBool(flagSendCreds) + if err != nil { + return errors.Trace(err) + } + cfg.PD, err = flags.GetStringSlice(flagPD) + if err != nil { + return errors.Trace(err) + } + if len(cfg.PD) == 0 { + return errors.New("must provide at least one PD server address") + } + cfg.Concurrency, err = flags.GetUint32(flagConcurrency) + if err != nil { + return errors.Trace(err) + } + cfg.Checksum, err = flags.GetBool(flagChecksum) + if err != nil { + return errors.Trace(err) + } + + var rateLimit, rateLimitUnit uint64 + rateLimit, err = flags.GetUint64(flagRateLimit) + if err != nil { + return errors.Trace(err) + } + rateLimitUnit, err = flags.GetUint64(flagRateLimitUnit) + if err != nil { + return errors.Trace(err) + } + cfg.RateLimit = rateLimit * rateLimitUnit + + if dbFlag := flags.Lookup(flagDatabase); dbFlag != nil { + db := escapeFilterName(dbFlag.Value.String()) + if len(db) == 0 { + return errors.New("empty database name is not allowed") + } + if tblFlag := flags.Lookup(flagTable); tblFlag != nil { + tbl := escapeFilterName(tblFlag.Value.String()) + if len(tbl) == 0 { + return errors.New("empty table name is not allowed") + } + cfg.Filter.DoTables = []*filter.Table{{Schema: db, Name: tbl}} + } else { + cfg.Filter.DoDBs = []string{db} + } + } + + if err := cfg.BackendOptions.ParseFromFlags(flags); err != nil { + return err + } + return cfg.TLS.ParseFromFlags(flags) +} + +// newMgr creates a new mgr at the given PD address. +func newMgr(ctx context.Context, pds []string) (*conn.Mgr, error) { + pdAddress := strings.Join(pds, ",") + if len(pdAddress) == 0 { + return nil, errors.New("pd address can not be empty") + } + + // Disable GC because TiDB enables GC already. + store, err := tikv.Driver{}.Open(fmt.Sprintf("tikv://%s?disableGC=true", pdAddress)) + if err != nil { + return nil, err + } + return conn.NewMgr(ctx, pdAddress, store.(tikv.Storage)) +} + +// GetStorage gets the storage backend from the config. +func GetStorage( + ctx context.Context, + cfg *Config, +) (*backup.StorageBackend, storage.ExternalStorage, error) { + u, err := storage.ParseBackend(cfg.Storage, &cfg.BackendOptions) + if err != nil { + return nil, nil, err + } + s, err := storage.Create(ctx, u, cfg.SendCreds) + if err != nil { + return nil, nil, errors.Annotate(err, "create storage failed") + } + return u, s, nil +} + +// ReadBackupMeta reads the backupmeta file from the storage. +func ReadBackupMeta( + ctx context.Context, + cfg *Config, +) (*backup.StorageBackend, storage.ExternalStorage, *backup.BackupMeta, error) { + u, s, err := GetStorage(ctx, cfg) + if err != nil { + return nil, nil, nil, err + } + metaData, err := s.Read(ctx, utils.MetaFile) + if err != nil { + return nil, nil, nil, errors.Annotate(err, "load backupmeta failed") + } + backupMeta := &backup.BackupMeta{} + if err = proto.Unmarshal(metaData, backupMeta); err != nil { + return nil, nil, nil, errors.Annotate(err, "parse backupmeta failed") + } + return u, s, backupMeta, nil +} + +func escapeFilterName(name string) string { + if !strings.HasPrefix(name, "~") { + return name + } + return "~^" + regexp.QuoteMeta(name) + "$" +} diff --git a/pkg/task/restore.go b/pkg/task/restore.go new file mode 100644 index 000000000..a56a1d6da --- /dev/null +++ b/pkg/task/restore.go @@ -0,0 +1,254 @@ +package task + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/backup" + "github.com/pingcap/log" + "github.com/pingcap/tidb-tools/pkg/filter" + "github.com/spf13/pflag" + "go.uber.org/zap" + + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +const ( + flagOnline = "online" +) + +var schedulers = map[string]struct{}{ + "balance-leader-scheduler": {}, + "balance-hot-region-scheduler": {}, + "balance-region-scheduler": {}, + + "shuffle-leader-scheduler": {}, + "shuffle-region-scheduler": {}, + "shuffle-hot-region-scheduler": {}, +} + +// RestoreConfig is the configuration specific for restore tasks. +type RestoreConfig struct { + Config + + Online bool `json:"online" toml:"online"` +} + +// DefineRestoreFlags defines common flags for the restore command. +func DefineRestoreFlags(flags *pflag.FlagSet) { + flags.Bool("online", false, "Whether online when restore") + // TODO remove hidden flag if it's stable + _ = flags.MarkHidden("online") +} + +// ParseFromFlags parses the restore-related flags from the flag set. +func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + cfg.Online, err = flags.GetBool(flagOnline) + if err != nil { + return errors.Trace(err) + } + return cfg.Config.ParseFromFlags(flags) +} + +// RunRestore starts a restore task inside the current goroutine. +func RunRestore(c context.Context, cmdName string, cfg *RestoreConfig) error { + ctx, cancel := context.WithCancel(c) + defer cancel() + + mgr, err := newMgr(ctx, cfg.PD) + if err != nil { + return err + } + defer mgr.Close() + + client, err := restore.NewRestoreClient(ctx, mgr.GetPDClient(), mgr.GetTiKV()) + if err != nil { + return err + } + defer client.Close() + + client.SetRateLimit(cfg.RateLimit) + client.SetConcurrency(uint(cfg.Concurrency)) + if cfg.Online { + client.EnableOnline() + } + + defer summary.Summary(cmdName) + + u, _, backupMeta, err := ReadBackupMeta(ctx, &cfg.Config) + if err != nil { + return err + } + if err = client.InitBackupMeta(backupMeta, u); err != nil { + return err + } + + files, tables, err := filterRestoreFiles(client, cfg) + if err != nil { + return err + } + if len(files) == 0 { + return errors.New("all files are filtered out from the backup archive, nothing to restore") + } + summary.CollectInt("restore files", len(files)) + + var newTS uint64 + if client.IsIncremental() { + newTS, err = client.GetTS(ctx) + if err != nil { + return err + } + } + rewriteRules, newTables, err := client.CreateTables(mgr.GetDomain(), tables, newTS) + if err != nil { + return err + } + + ranges, err := restore.ValidateFileRanges(files, rewriteRules) + if err != nil { + return err + } + summary.CollectInt("restore ranges", len(ranges)) + + // Redirect to log if there is no log file to avoid unreadable output. + updateCh := utils.StartProgress( + ctx, + cmdName, + // Split/Scatter + Download/Ingest + int64(len(ranges)+len(files)), + !cfg.LogProgress) + + err = restore.SplitRanges(ctx, client, ranges, rewriteRules, updateCh) + if err != nil { + log.Error("split regions failed", zap.Error(err)) + return err + } + + if !client.IsIncremental() { + if err = client.ResetTS(cfg.PD); err != nil { + log.Error("reset pd TS failed", zap.Error(err)) + return err + } + } + + removedSchedulers, err := restorePreWork(ctx, client, mgr) + if err != nil { + return err + } + err = client.RestoreFiles(files, rewriteRules, updateCh) + // always run the post-work even on error, so we don't stuck in the import mode or emptied schedulers + postErr := restorePostWork(ctx, client, mgr, removedSchedulers) + + if err != nil { + return err + } + if postErr != nil { + return postErr + } + + // Restore has finished. + close(updateCh) + + // Checksum + updateCh = utils.StartProgress( + ctx, "Checksum", int64(len(newTables)), !cfg.LogProgress) + err = client.ValidateChecksum( + ctx, mgr.GetTiKV().GetClient(), tables, newTables, updateCh) + if err != nil { + return err + } + close(updateCh) + + return nil +} + +func filterRestoreFiles( + client *restore.Client, + cfg *RestoreConfig, +) (files []*backup.File, tables []*utils.Table, err error) { + tableFilter, err := filter.New(cfg.CaseSensitive, &cfg.Filter) + if err != nil { + return nil, nil, err + } + + for _, db := range client.GetDatabases() { + createdDatabase := false + for _, table := range db.Tables { + if !tableFilter.Match(&filter.Table{Schema: db.Schema.Name.O, Name: table.Schema.Name.O}) { + continue + } + + if !createdDatabase { + if err = client.CreateDatabase(db.Schema); err != nil { + return nil, nil, err + } + createdDatabase = true + } + + files = append(files, table.Files...) + tables = append(tables, table) + } + } + + return +} + +// restorePreWork executes some prepare work before restore +func restorePreWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr) ([]string, error) { + if client.IsOnline() { + return nil, nil + } + + if err := client.SwitchToImportMode(ctx); err != nil { + return nil, err + } + + existSchedulers, err := mgr.ListSchedulers(ctx) + if err != nil { + return nil, errors.Trace(err) + } + needRemoveSchedulers := make([]string, 0, len(existSchedulers)) + for _, s := range existSchedulers { + if _, ok := schedulers[s]; ok { + needRemoveSchedulers = append(needRemoveSchedulers, s) + } + } + return removePDLeaderScheduler(ctx, mgr, needRemoveSchedulers) +} + +func removePDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, existSchedulers []string) ([]string, error) { + removedSchedulers := make([]string, 0, len(existSchedulers)) + for _, scheduler := range existSchedulers { + err := mgr.RemoveScheduler(ctx, scheduler) + if err != nil { + return nil, err + } + removedSchedulers = append(removedSchedulers, scheduler) + } + return removedSchedulers, nil +} + +// restorePostWork executes some post work after restore +func restorePostWork(ctx context.Context, client *restore.Client, mgr *conn.Mgr, removedSchedulers []string) error { + if client.IsOnline() { + return nil + } + if err := client.SwitchToNormalMode(ctx); err != nil { + return err + } + return addPDLeaderScheduler(ctx, mgr, removedSchedulers) +} + +func addPDLeaderScheduler(ctx context.Context, mgr *conn.Mgr, removedSchedulers []string) error { + for _, scheduler := range removedSchedulers { + err := mgr.AddScheduler(ctx, scheduler) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/utils/retry.go b/pkg/utils/retry.go new file mode 100644 index 000000000..a8f446764 --- /dev/null +++ b/pkg/utils/retry.go @@ -0,0 +1,40 @@ +package utils + +import ( + "context" + "time" +) + +// RetryableFunc presents a retryable opreation +type RetryableFunc func() error + +// Backoffer implements a backoff policy for retrying operations +type Backoffer interface { + // NextBackoff returns a duration to wait before retrying again + NextBackoff(err error) time.Duration + // Attempt returns the remain attempt times + Attempt() int +} + +// WithRetry retrys a given operation with a backoff policy +func WithRetry( + ctx context.Context, + retryableFunc RetryableFunc, + backoffer Backoffer, +) error { + var lastErr error + for backoffer.Attempt() > 0 { + err := retryableFunc() + if err != nil { + lastErr = err + select { + case <-ctx.Done(): + return lastErr + case <-time.After(backoffer.NextBackoff(err)): + } + } else { + return nil + } + } + return lastErr +} diff --git a/pkg/utils/tso.go b/pkg/utils/tso.go index 44c23fc48..a4ca5f5b5 100644 --- a/pkg/utils/tso.go +++ b/pkg/utils/tso.go @@ -8,36 +8,12 @@ import ( "strings" "github.com/pingcap/errors" - "github.com/pingcap/tidb/store/tikv/oracle" ) const ( resetTSURL = "/pd/api/v1/admin/reset-ts" ) -// Timestamp is composed by a physical unix timestamp and a logical timestamp. -type Timestamp struct { - Physical int64 - Logical int64 -} - -const physicalShiftBits = 18 - -// DecodeTs decodes Timestamp from a uint64 -func DecodeTs(ts uint64) Timestamp { - physical := oracle.ExtractPhysical(ts) - logical := ts - (uint64(physical) << physicalShiftBits) - return Timestamp{ - Physical: physical, - Logical: int64(logical), - } -} - -// EncodeTs encodes Timestamp into a uint64 -func EncodeTs(tp Timestamp) uint64 { - return uint64((tp.Physical << physicalShiftBits) + tp.Logical) -} - // ResetTS resets the timestamp of PD to a bigger value func ResetTS(pdAddr string, ts uint64) error { req, err := json.Marshal(struct { diff --git a/pkg/utils/tso_test.go b/pkg/utils/tso_test.go deleted file mode 100644 index 3e6ecd9e5..000000000 --- a/pkg/utils/tso_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package utils - -import ( - "math/rand" - "time" - - . "github.com/pingcap/check" -) - -type testTsoSuite struct{} - -var _ = Suite(&testTsoSuite{}) - -func (r *testTsoSuite) TestTimestampEncodeDecode(c *C) { - rand.Seed(time.Now().UnixNano()) - for i := 0; i < 10; i++ { - ts := rand.Uint64() - tp := DecodeTs(ts) - ts1 := EncodeTs(tp) - c.Assert(ts, DeepEquals, ts1) - } -} diff --git a/pkg/utils/unit.go b/pkg/utils/unit.go index 5f8009878..a12dcb6c2 100644 --- a/pkg/utils/unit.go +++ b/pkg/utils/unit.go @@ -2,7 +2,7 @@ package utils // unit of storage const ( - B = 1 << (iota * 10) + B = uint64(1) << (iota * 10) KB MB GB diff --git a/pkg/utils/unit_test.go b/pkg/utils/unit_test.go new file mode 100644 index 000000000..5b3c00530 --- /dev/null +++ b/pkg/utils/unit_test.go @@ -0,0 +1,17 @@ +package utils + +import ( + . "github.com/pingcap/check" +) + +type testUnitSuite struct{} + +var _ = Suite(&testUnitSuite{}) + +func (r *testUnitSuite) TestLoadBackupMeta(c *C) { + c.Assert(B, Equals, uint64(1)) + c.Assert(KB, Equals, uint64(1024)) + c.Assert(MB, Equals, uint64(1024*1024)) + c.Assert(GB, Equals, uint64(1024*1024*1024)) + c.Assert(TB, Equals, uint64(1024*1024*1024*1024)) +} diff --git a/pkg/utils/version.go b/pkg/utils/version.go index bed19ffa9..13a3c7a92 100644 --- a/pkg/utils/version.go +++ b/pkg/utils/version.go @@ -1,7 +1,9 @@ package utils import ( + "bytes" "fmt" + "runtime" "github.com/pingcap/log" "github.com/pingcap/tidb/util/israce" @@ -16,25 +18,30 @@ var ( BRBuildTS = "None" BRGitHash = "None" BRGitBranch = "None" + goVersion = runtime.Version() ) -// LogBRInfo prints the BR version information. +// LogBRInfo logs version information about BR. func LogBRInfo() { log.Info("Welcome to Backup & Restore (BR)") log.Info("BR", zap.String("release-version", BRReleaseVersion)) log.Info("BR", zap.String("git-hash", BRGitHash)) log.Info("BR", zap.String("git-branch", BRGitBranch)) + log.Info("BR", zap.String("go-version", goVersion)) log.Info("BR", zap.String("utc-build-time", BRBuildTS)) log.Info("BR", zap.Bool("race-enabled", israce.RaceEnabled)) } -// PrintBRInfo prints the BR version information without log info. -func PrintBRInfo() { - fmt.Println("Release Version:", BRReleaseVersion) - fmt.Println("Git Commit Hash:", BRGitHash) - fmt.Println("Git Branch:", BRGitBranch) - fmt.Println("UTC Build Time: ", BRBuildTS) - fmt.Println("Race Enabled: ", israce.RaceEnabled) +// BRInfo returns version information about BR. +func BRInfo() string { + buf := bytes.Buffer{} + fmt.Fprintf(&buf, "Release Version: %s\n", BRReleaseVersion) + fmt.Fprintf(&buf, "Git Commit Hash: %s\n", BRGitHash) + fmt.Fprintf(&buf, "Git Branch: %s\n", BRGitBranch) + fmt.Fprintf(&buf, "Go Version: %s\n", goVersion) + fmt.Fprintf(&buf, "UTC Build Time: %s\n", BRBuildTS) + fmt.Fprintf(&buf, "Race Enabled: %t", israce.RaceEnabled) + return buf.String() } // LogArguments prints origin command arguments diff --git a/tests/br_debug_meta/run.sh b/tests/br_debug_meta/run.sh index 679602deb..8dc3ef5a3 100644 --- a/tests/br_debug_meta/run.sh +++ b/tests/br_debug_meta/run.sh @@ -15,28 +15,33 @@ set -eu DB="$TEST_NAME" +TABLE="usertable1" run_sql "CREATE DATABASE $DB;" -run_sql "CREATE TABLE $DB.usertable1 ( \ +run_sql "CREATE TABLE $DB.$TABLE( \ YCSB_KEY varchar(64) NOT NULL, \ FIELD0 varchar(1) DEFAULT NULL, \ PRIMARY KEY (YCSB_KEY) \ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" -run_sql "INSERT INTO $DB.usertable1 VALUES (\"a\", \"b\");" -run_sql "INSERT INTO $DB.usertable1 VALUES (\"aa\", \"b\");" +run_sql "INSERT INTO $DB.$TABLE VALUES (\"a\", \"b\");" +run_sql "INSERT INTO $DB.$TABLE VALUES (\"aa\", \"b\");" + +row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') # backup table echo "backup start..." run_br --pd $PD_ADDR backup table --db $DB --table usertable1 -s "local://$TEST_DIR/$DB" --ratelimit 5 --concurrency 4 +run_sql "DROP DATABASE $DB;" + # Test validate decode run_br validate decode -s "local://$TEST_DIR/$DB" # should generate backupmeta.json if [ ! -f "$TEST_DIR/$DB/backupmeta.json" ]; then - echo "TEST: [$TEST_NAME] failed!" + echo "TEST: [$TEST_NAME] decode failed!" exit 1 fi @@ -45,18 +50,22 @@ run_br validate encode -s "local://$TEST_DIR/$DB" # should generate backupmeta_from_json if [ ! -f "$TEST_DIR/$DB/backupmeta_from_json" ]; then - echo "TEST: [$TEST_NAME] failed!" + echo "TEST: [$TEST_NAME] encode failed!" exit 1 fi -DIFF=$(diff $TEST_DIR/$DB/backupmeta_from_json $TEST_DIR/$DB/backupmeta) -if [ "$DIFF" != "" ] -then - echo "TEST: [$TEST_NAME] failed!" +# replace backupmeta +mv "$TEST_DIR/$DB/backupmeta_from_json" "$TEST_DIR/$DB/backupmeta" + +# restore table +echo "restore start..." +run_br --pd $PD_ADDR restore table --db $DB --table usertable1 -s "local://$TEST_DIR/$DB" + +row_count_new=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') + +if [ "${row_count_ori}" != "${row_count_new}" ];then + echo "TEST: [$TEST_NAME] failed!, row count not equal after restore" exit 1 fi run_sql "DROP DATABASE $DB;" - -# Test version -run_br version \ No newline at end of file diff --git a/tests/br_full_ddl/run.sh b/tests/br_full_ddl/run.sh index 3db1ecd60..1e40415d7 100755 --- a/tests/br_full_ddl/run.sh +++ b/tests/br_full_ddl/run.sh @@ -28,7 +28,7 @@ for i in $(seq $DDL_COUNT); do run_sql "USE $DB; ALTER TABLE $TABLE ADD INDEX (FIELD$i);" done -for i in $(sql $DDL_COUNT); do +for i in $(seq $DDL_COUNT); do if (( RANDOM % 2 )); then run_sql "USE $DB; ALTER TABLE $TABLE DROP INDEX FIELD$i;" fi diff --git a/tests/br_other/run.sh b/tests/br_other/run.sh index 579c630df..e25dd2eae 100644 --- a/tests/br_other/run.sh +++ b/tests/br_other/run.sh @@ -55,4 +55,5 @@ fi run_sql "DROP DATABASE $DB;" # Test version -run_br version +run_br --version +run_br -V diff --git a/tests/br_z_gc_safepoint/gc.go b/tests/br_z_gc_safepoint/gc.go new file mode 100644 index 000000000..a18367259 --- /dev/null +++ b/tests/br_z_gc_safepoint/gc.go @@ -0,0 +1,64 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +// Test backup with exceeding GC safe point. + +package main + +import ( + "context" + "flag" + "time" + + "github.com/pingcap/log" + pd "github.com/pingcap/pd/client" + "github.com/pingcap/tidb/store/tikv/oracle" + "go.uber.org/zap" +) + +var ( + pdAddr = flag.String("pd", "", "PD address") + gcOffset = flag.Duration("gc-offset", time.Second*10, + "Set GC safe point to current time - gc-offset, default: 10s") +) + +func main() { + flag.Parse() + if *pdAddr == "" { + log.Fatal("pd address is empty") + } + if *gcOffset == time.Duration(0) { + log.Fatal("zero gc-offset is not allowed") + } + + timeout := time.Second * 10 + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + pdclient, err := pd.NewClientWithContext(ctx, []string{*pdAddr}, pd.SecurityOption{}) + if err != nil { + log.Fatal("create pd client failed", zap.Error(err)) + } + p, l, err := pdclient.GetTS(ctx) + if err != nil { + log.Fatal("get ts failed", zap.Error(err)) + } + now := oracle.ComposeTS(p, l) + nowMinusOffset := oracle.GetTimeFromTS(now).Add(-*gcOffset) + newSP := oracle.ComposeTS(oracle.GetPhysical(nowMinusOffset), 0) + _, err = pdclient.UpdateGCSafePoint(ctx, newSP) + if err != nil { + log.Fatal("create pd client failed", zap.Error(err)) + } + + log.Info("update GC safe point", zap.Uint64("SP", newSP), zap.Uint64("now", now)) +} diff --git a/tests/br_z_gc_safepoint/run.sh b/tests/br_z_gc_safepoint/run.sh new file mode 100755 index 000000000..916ca1fa8 --- /dev/null +++ b/tests/br_z_gc_safepoint/run.sh @@ -0,0 +1,46 @@ +#!/bin/sh +# +# Copyright 2019 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +# Test whether BR fails fast when backup ts exceeds GC safe point. +# It is call br_*z*_gc_safepoint, because it brings lots of write and +# slows down other tests to changing GC safe point. Adding a z prefix to run +# the test last. + +set -eu + +DB="$TEST_NAME" +TABLE="usertable" + +run_sql "CREATE DATABASE $DB;" + +go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB + +row_count_ori=$(run_sql "SELECT COUNT(*) FROM $DB.$TABLE;" | awk '/COUNT/{print $2}') + +# Update GC safepoint to now + 5s after 10s seconds. +sleep 10 && bin/gc -pd $PD_ADDR -gc-offset "5s" & + +# Set ratelimit to 1 bytes/second, we assume it can not finish within 10s, +# so it will trigger exceed GC safe point error. +backup_gc_fail=0 +echo "backup start (expect fail)..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 + +if [ "$backup_gc_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] failed!" + exit 1 +fi + +run_sql "DROP TABLE $DB.$TABLE;" diff --git a/tests/br_z_gc_safepoint/workload b/tests/br_z_gc_safepoint/workload new file mode 100644 index 000000000..448ca3c1a --- /dev/null +++ b/tests/br_z_gc_safepoint/workload @@ -0,0 +1,12 @@ +recordcount=1000 +operationcount=0 +workload=core + +readallfields=true + +readproportion=0 +updateproportion=0 +scanproportion=0 +insertproportion=0 + +requestdistribution=uniform \ No newline at end of file