Skip to content

Commit

Permalink
*: enable revive (#35970)
Browse files Browse the repository at this point in the history
ref #35345
  • Loading branch information
hawkingrei authored Jul 15, 2022
1 parent 405f7a0 commit 911e7cc
Show file tree
Hide file tree
Showing 167 changed files with 1,204 additions and 800 deletions.
8 changes: 8 additions & 0 deletions DEPS.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -2139,6 +2139,14 @@ def go_deps():
sum = "h1:oacPXPKHJg0hcngVVrdtTnfGJiS+PtwoQwTBZGFlV4k=",
version = "v3.3.0",
)
go_repository(
name = "com_github_mgechev_dots",
build_file_proto_mode = "disable",
importpath = "github.com/mgechev/dots",
sum = "h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0=",
version = "v0.0.0-20210922191527-e955255bf517",
)

go_repository(
name = "com_github_mgechev_revive",
build_file_proto_mode = "disable",
Expand Down
3 changes: 2 additions & 1 deletion br/pkg/lightning/log/log.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ type Config struct {
FileMaxBackups int `toml:"max-backups" json:"max-backups"`
}

// Adjust adjusts some fields in the config to a proper value.
func (cfg *Config) Adjust() {
if len(cfg.Level) == 0 {
cfg.Level = defaultLogLevel
Expand Down Expand Up @@ -75,7 +76,7 @@ var (
)

// InitLogger initializes Lightning's and also the TiDB library's loggers.
func InitLogger(cfg *Config, tidbLoglevel string) error {
func InitLogger(cfg *Config, _ string) error {
tidbLogCfg := logutil.LogConfig{}
// Disable annoying TiDB Log.
// TODO: some error logs outputs randomly, we need to fix them in TiDB.
Expand Down
6 changes: 6 additions & 0 deletions br/pkg/lightning/worker/worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,20 @@ import (
"github.com/pingcap/tidb/br/pkg/lightning/metric"
)

// Pool is the worker pool.
type Pool struct {
limit int
workers chan *Worker
name string
metrics *metric.Metrics
}

// Worker is the worker struct.
type Worker struct {
ID int64
}

// NewPool creates a new worker pool.
func NewPool(ctx context.Context, limit int, name string) *Pool {
workers := make(chan *Worker, limit)
for i := 0; i < limit; i++ {
Expand All @@ -50,6 +53,7 @@ func NewPool(ctx context.Context, limit int, name string) *Pool {
}
}

// Apply gets a worker from the pool.
func (pool *Pool) Apply() *Worker {
start := time.Now()
worker := <-pool.workers
Expand All @@ -60,6 +64,7 @@ func (pool *Pool) Apply() *Worker {
return worker
}

// Recycle puts a worker back to the pool.
func (pool *Pool) Recycle(worker *Worker) {
if worker == nil {
panic("invalid restore worker")
Expand All @@ -70,6 +75,7 @@ func (pool *Pool) Recycle(worker *Worker) {
}
}

// HasWorker returns whether the pool has worker.
func (pool *Pool) HasWorker() bool {
return len(pool.workers) > 0
}
3 changes: 2 additions & 1 deletion br/pkg/logutil/logging.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ func (t zapStreamBackupTaskInfo) MarshalLogObject(enc zapcore.ObjectEncoder) err
return nil
}

// StreamBackupTaskInfo makes the zap fields for a stream backup task info.
func StreamBackupTaskInfo(t *backuppb.StreamBackupTaskInfo) zap.Field {
return zap.Object("streamTaskInfo", zapStreamBackupTaskInfo{t})
}
Expand Down Expand Up @@ -271,7 +272,7 @@ func Redact(field zap.Field) zap.Field {
return field
}

// StringifyRanges wrappes the key range into a stringer.
// StringifyKeys wraps the key range into a stringer.
type StringifyKeys []kv.KeyRange

func (kr StringifyKeys) String() string {
Expand Down
1 change: 1 addition & 0 deletions br/pkg/membuf/buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,7 @@ func (p *Pool) NewBuffer() *Buffer {
return &Buffer{pool: p, bufs: make([][]byte, 0, 128), curBufIdx: -1}
}

// Destroy frees all buffers.
func (p *Pool) Destroy() {
close(p.blockCache)
for b := range p.blockCache {
Expand Down
16 changes: 8 additions & 8 deletions br/pkg/metautil/metafile.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,12 @@ const (
MetaV2
)

// CreateMetaFileName is the name of meta file.
func CreateMetaFileName(ts uint64) string {
return fmt.Sprintf("%s_%d", MetaFile, ts)
}

// Encrypt encrypts the content according to CipherInfo.
func Encrypt(content []byte, cipher *backuppb.CipherInfo) (encryptedContent, iv []byte, err error) {
if len(content) == 0 || cipher == nil {
return content, iv, nil
Expand All @@ -83,6 +85,7 @@ func Encrypt(content []byte, cipher *backuppb.CipherInfo) (encryptedContent, iv
}
}

// Decrypt decrypts the content according to CipherInfo and IV.
func Decrypt(content []byte, cipher *backuppb.CipherInfo, iv []byte) ([]byte, error) {
if len(content) == 0 || cipher == nil {
return content, nil
Expand Down Expand Up @@ -222,7 +225,7 @@ func (reader *MetaReader) readDataFiles(ctx context.Context, output func(*backup
}

// ArchiveSize return the size of Archive data
func (reader *MetaReader) ArchiveSize(ctx context.Context, files []*backuppb.File) uint64 {
func (*MetaReader) ArchiveSize(_ context.Context, files []*backuppb.File) uint64 {
total := uint64(0)
for _, file := range files {
total += file.Size_
Expand Down Expand Up @@ -424,9 +427,7 @@ func (op AppendOp) name() string {
}

// appends item to MetaFile
func (op AppendOp) appendFile(a *backuppb.MetaFile, b interface{}) (int, int) {
size := 0
itemCount := 0
func (op AppendOp) appendFile(a *backuppb.MetaFile, b interface{}) (size int, itemCount int) {
switch op {
case AppendMetaFile:
a.MetaFiles = append(a.MetaFiles, b.(*backuppb.File))
Expand All @@ -449,7 +450,6 @@ func (op AppendOp) appendFile(a *backuppb.MetaFile, b interface{}) (int, int) {
itemCount++
size += len(b.([]byte))
}

return size, itemCount
}

Expand Down Expand Up @@ -554,7 +554,7 @@ func (writer *MetaWriter) Update(f func(m *backuppb.BackupMeta)) {
}

// Send sends the item to buffer.
func (writer *MetaWriter) Send(m interface{}, op AppendOp) error {
func (writer *MetaWriter) Send(m interface{}, _ AppendOp) error {
select {
case writer.metasCh <- m:
// receive an error from StartWriteMetasAsync
Expand Down Expand Up @@ -665,7 +665,7 @@ func (writer *MetaWriter) FlushBackupMeta(ctx context.Context) error {

// fillMetasV1 keep the compatibility for old version.
// for MetaV1, just put in backupMeta
func (writer *MetaWriter) fillMetasV1(ctx context.Context, op AppendOp) {
func (writer *MetaWriter) fillMetasV1(_ context.Context, op AppendOp) {
switch op {
case AppendDataFile:
writer.backupMeta.Files = writer.metafiles.root.DataFiles
Expand Down Expand Up @@ -717,7 +717,7 @@ func (writer *MetaWriter) flushMetasV2(ctx context.Context, op AppendOp) error {
name := op.name()
writer.metafileSizes[name] += writer.metafiles.size
// Flush metafiles to external storage.
writer.metafileSeqNum["metafiles"] += 1
writer.metafileSeqNum["metafiles"]++
fname := fmt.Sprintf("backupmeta.%s.%09d", name, writer.metafileSeqNum["metafiles"])

encyptedContent, iv, err := Encrypt(content, writer.cipher)
Expand Down
2 changes: 1 addition & 1 deletion br/pkg/mock/mockid/mockid.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,6 @@ func (alloc *IDAllocator) Alloc() (uint64, error) {
}

// Rebase implements the IDAllocator interface.
func (alloc *IDAllocator) Rebase() error {
func (*IDAllocator) Rebase() error {
return nil
}
3 changes: 2 additions & 1 deletion br/pkg/pdutil/pd.go
Original file line number Diff line number Diff line change
Expand Up @@ -631,7 +631,7 @@ func (p *PdController) RemoveSchedulers(ctx context.Context) (undo UndoFunc, err
}

// RemoveSchedulersWithOrigin pause and remove br related schedule configs and return the origin and modified configs
func (p *PdController) RemoveSchedulersWithOrigin(ctx context.Context) (ClusterConfig, ClusterConfig, error) {
func (p *PdController) RemoveSchedulersWithOrigin(ctx context.Context) (origin ClusterConfig, modified ClusterConfig, err error) {
if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil {
span1 := span.Tracer().StartSpan("PdController.RemoveSchedulers", opentracing.ChildOf(span.Context()))
defer span1.Finish()
Expand Down Expand Up @@ -765,6 +765,7 @@ func (p *PdController) CreateOrUpdateRegionLabelRule(ctx context.Context, rule L
return errors.Trace(lastErr)
}

// DeleteRegionLabelRule deletes a region label rule.
func (p *PdController) DeleteRegionLabelRule(ctx context.Context, ruleID string) error {
var lastErr error
for i, addr := range p.addrs {
Expand Down
Loading

0 comments on commit 911e7cc

Please sign in to comment.