diff --git a/LICENSE.md b/LICENSE.md index c4f7e3384..79fef42aa 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -226,9 +226,9 @@ Copyright 2015 Steve Francia. Apache 2 license (https://github.com/spf13/cobra/blob/master/LICENSE.txt) Etcd. -https://github.com/coreos/etcd. +https://github.com/etcd-io/etcd. Copyright 2017 The etcd Authors. -Apache 2 license (https://github.com/coreos/etcd/blob/master/LICENSE) +Apache 2 license (https://github.com/etcd-io/etcd/blob/master/LICENSE) Cron. https://github.com/robfig/cron. diff --git a/README.md b/README.md index 4f28172b2..98e10ccca 100644 --- a/README.md +++ b/README.md @@ -29,4 +29,4 @@ Etcd-backup-restore is collection of components to backup and restore the [etcd] * [Tests](doc/development/tests.md) * [Adding support for a new object store provider](doc/development/new_cp_support.md) -[etcd]: https://github.com/coreos/etcd +[etcd]: https://github.com/etcd-io/etcd diff --git a/cmd/initializer.go b/cmd/initializer.go index a3485c909..0a3d90bfb 100644 --- a/cmd/initializer.go +++ b/cmd/initializer.go @@ -18,12 +18,12 @@ import ( "context" "fmt" - "github.com/coreos/etcd/pkg/types" "github.com/gardener/etcd-backup-restore/pkg/initializer" "github.com/gardener/etcd-backup-restore/pkg/initializer/validator" "github.com/gardener/etcd-backup-restore/pkg/snapshot/restorer" "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "go.etcd.io/etcd/pkg/types" ) // NewInitializeCommand returns the command to initialize etcd by validating the data diff --git a/cmd/restore.go b/cmd/restore.go index b2f81c809..2ab42b1f7 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -18,12 +18,12 @@ import ( "context" "fmt" - "github.com/coreos/etcd/pkg/types" "github.com/gardener/etcd-backup-restore/pkg/miscellaneous" "github.com/gardener/etcd-backup-restore/pkg/snapshot/restorer" "github.com/gardener/etcd-backup-restore/pkg/snapstore" "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "go.etcd.io/etcd/pkg/types" ) // NewRestoreCommand returns the command to restore diff --git a/doc/proposals/design.md b/doc/proposals/design.md index 63da85694..ca5151f97 100644 --- a/doc/proposals/design.md +++ b/doc/proposals/design.md @@ -106,4 +106,4 @@ Sidecar container has two components We want to develop incremental/continuous etcd backups (write watch logs in between full backups), to ensure our backups are fresh enough to avoid discrepancies between etcd backup and external/physical world. -[etcd]: https://github.com/coreos/etcd +[etcd]: https://github.com/etcd-io/etcd diff --git a/go.mod b/go.mod index ad14529b5..3076e7ef8 100644 --- a/go.mod +++ b/go.mod @@ -11,8 +11,6 @@ require ( github.com/aliyun/aliyun-oss-go-sdk v2.0.3+incompatible github.com/aws/aws-sdk-go v1.32.6 github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect - github.com/coreos/bbolt v1.3.3 - github.com/coreos/etcd v3.3.22+incompatible github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect github.com/ghodss/yaml v1.0.0 @@ -30,10 +28,11 @@ require ( github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966 // indirect + go.etcd.io/bbolt v1.3.3 + go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 go.opencensus.io v0.22.1 // indirect golang.org/x/net v0.0.0-20200707034311-ab3426394381 // indirect golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 // indirect - golang.org/x/text v0.3.3 // indirect golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 // indirect google.golang.org/api v0.14.0 helm.sh/helm/v3 v3.2.4 @@ -47,8 +46,8 @@ replace ( // Ref: https://github.com/Azure/go-autorest/issues/414 github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.2+incompatible // Etcd issue Ref: https://github.com/etcd-io/etcd/issues/12068 - github.com/coreos/bbolt => github.com/coreos/bbolt v1.3.3 - github.com/coreos/etcd => github.com/coreos/etcd v3.3.22+incompatible + // Etcd 3.4.x vendoring issue Ref: https://github.com/etcd-io/etcd/issues/11154#issuecomment-677940701 + github.com/coreos/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b // ae9734ed278b is the SHA for git tag v3.4.13 github.com/docker/docker => github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309 // Ref: https://github.com/etcd-io/etcd/issues/11992 github.com/golang/protobuf => github.com/golang/protobuf v1.3.5 diff --git a/go.sum b/go.sum index 3e714b9c0..c647f3055 100644 --- a/go.sum +++ b/go.sum @@ -119,6 +119,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= @@ -133,10 +134,7 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= -github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.22+incompatible h1:AnRMUyVdVvh1k7lHe61YEd227+CLoNogQuAypztGSK4= -github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= @@ -672,7 +670,10 @@ github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wK go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b h1:3kC4J3eQF6p1UEfQTkC67eEeb3rTk+shQqdX6tFyq9Q= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -721,6 +722,7 @@ golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMx golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -834,6 +836,7 @@ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -926,6 +929,7 @@ helm.sh/helm/v3 v3.2.4/go.mod h1:ZaXz/vzktgwjyGGFbUWtIQkscfE7WYoRGP2szqAFHR0= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.18.0 h1:lwYk8Vt7rsVTwjRU6pzEsa9YNhThbmbocQlKvNBB4EQ= @@ -952,9 +956,6 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDN k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kubectl v0.18.0 h1:hu52Ndq/d099YW+3sS3VARxFz61Wheiq8K9S7oa82Dk= k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU= -k8s.io/kubectl v0.18.5 h1:htctXnWqcF1VBkuzbWINqnwx/rM7byH9o2ZuHntlbJo= -k8s.io/kubectl v0.18.6 h1:IFPNuLPkZ59vSGQzynXY8XGz9yuOSRpkJupnobdYvO4= -k8s.io/kubelet v0.16.8/go.mod h1:mzDpnryQg2dlB6V3/WAgb1baIamiICtWpXMFrPOFh6I= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= diff --git a/pkg/defragmentor/defragmentor_suite_test.go b/pkg/defragmentor/defragmentor_suite_test.go index d7c69bd14..5887a5e7a 100644 --- a/pkg/defragmentor/defragmentor_suite_test.go +++ b/pkg/defragmentor/defragmentor_suite_test.go @@ -20,9 +20,9 @@ import ( "testing" "time" - "github.com/coreos/etcd/embed" "github.com/gardener/etcd-backup-restore/test/utils" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/embed" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" diff --git a/pkg/etcdutil/etcdutil.go b/pkg/etcdutil/etcdutil.go index cb7cb877f..ce45f6a9f 100644 --- a/pkg/etcdutil/etcdutil.go +++ b/pkg/etcdutil/etcdutil.go @@ -18,8 +18,8 @@ import ( "context" "crypto/tls" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/pkg/transport" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/pkg/transport" ) // GetTLSClientForEtcd creates an etcd client using the TLS config params. @@ -38,7 +38,7 @@ func GetTLSClientForEtcd(tlsConfig *EtcdConnectionConfig) (*clientv3.Client, err } if tlsConfig.CaFile != "" { - tlsinfo.CAFile = tlsConfig.CaFile + tlsinfo.TrustedCAFile = tlsConfig.CaFile cfgtls = &tlsinfo } diff --git a/pkg/initializer/initializer.go b/pkg/initializer/initializer.go index 3b861b0f6..459ea6763 100644 --- a/pkg/initializer/initializer.go +++ b/pkg/initializer/initializer.go @@ -27,6 +27,7 @@ import ( "github.com/gardener/etcd-backup-restore/pkg/snapstore" "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // Initialize has the following steps: @@ -68,7 +69,7 @@ func (e *EtcdInitializer) Initialize(mode validator.Mode, failBelowRevision int6 //NewInitializer creates an etcd initializer object. func NewInitializer(options *restorer.RestoreOptions, snapstoreConfig *snapstore.Config, logger *logrus.Logger) *EtcdInitializer { - + zapLogger, _ := zap.NewProduction() etcdInit := &EtcdInitializer{ Config: &Config{ SnapstoreConfig: snapstoreConfig, @@ -79,7 +80,8 @@ func NewInitializer(options *restorer.RestoreOptions, snapstoreConfig *snapstore DataDir: options.Config.RestoreDataDir, SnapstoreConfig: snapstoreConfig, }, - Logger: logger, + Logger: logger, + ZapLogger: zapLogger, }, Logger: logger, } diff --git a/pkg/initializer/validator/datavalidator.go b/pkg/initializer/validator/datavalidator.go index 62c284d3f..48d0097e9 100644 --- a/pkg/initializer/validator/datavalidator.go +++ b/pkg/initializer/validator/datavalidator.go @@ -23,14 +23,15 @@ import ( "os" "path/filepath" - bolt "github.com/coreos/bbolt" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/wal" - "github.com/coreos/etcd/wal/walpb" "github.com/gardener/etcd-backup-restore/pkg/miscellaneous" "github.com/gardener/etcd-backup-restore/pkg/snapstore" "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" + "go.etcd.io/etcd/etcdserver/api/snap" + "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/wal" + "go.etcd.io/etcd/wal/walpb" + "go.uber.org/zap" ) var ( @@ -129,7 +130,7 @@ func (d *DataValidator) checkForDataCorruption() error { walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term } d.Logger.Info("Verifying WAL directory...") - if err := verifyWALDir(d.walDir(), walsnap); err != nil { + if err := verifyWALDir(d.ZapLogger, d.walDir(), walsnap); err != nil { return fmt.Errorf("Invalid wal files: %v", err) } d.Logger.Info("Verifying DB file...") @@ -167,22 +168,22 @@ func directoryExist(dir string) (bool, error) { } func (d *DataValidator) verifySnapDir() (*raftpb.Snapshot, error) { - ssr := snap.New(d.snapDir()) + ssr := snap.New(d.ZapLogger, d.snapDir()) return ssr.Load() } -func verifyWALDir(waldir string, snap walpb.Snapshot) error { +func verifyWALDir(logger *zap.Logger, waldir string, snap walpb.Snapshot) error { var err error repaired := false for { - if err = wal.Verify(waldir, snap); err != nil { + if err = wal.Verify(logger, waldir, snap); err != nil { // we can only repair ErrUnexpectedEOF and we never repair twice. if repaired || err != io.ErrUnexpectedEOF { fmt.Printf("read wal error (%v) and cannot be repaired.\n", err) return err } - if !wal.Repair(waldir) { + if !wal.Repair(logger, waldir) { fmt.Printf("WAL error (%v) cannot be repaired.\n", err) return err } diff --git a/pkg/initializer/validator/datavalidator_test.go b/pkg/initializer/validator/datavalidator_test.go index c011d5e69..5b15d0922 100644 --- a/pkg/initializer/validator/datavalidator_test.go +++ b/pkg/initializer/validator/datavalidator_test.go @@ -21,6 +21,7 @@ import ( "github.com/gardener/etcd-backup-restore/pkg/snapstore" "github.com/gardener/etcd-backup-restore/test/utils" + "go.uber.org/zap" . "github.com/gardener/etcd-backup-restore/pkg/initializer/validator" . "github.com/onsi/ginkgo" @@ -44,12 +45,14 @@ var _ = Describe("Running Datavalidator", func() { Provider: "Local", } + zapLogger, _ := zap.NewProduction() validator = &DataValidator{ Config: &Config{ DataDir: restoreDataDir, SnapstoreConfig: snapstoreConfig, }, - Logger: logger.Logger, + Logger: logger.Logger, + ZapLogger: zapLogger, } }) diff --git a/pkg/initializer/validator/types.go b/pkg/initializer/validator/types.go index 05d87edee..eab1ac906 100644 --- a/pkg/initializer/validator/types.go +++ b/pkg/initializer/validator/types.go @@ -17,6 +17,7 @@ package validator import ( "github.com/gardener/etcd-backup-restore/pkg/snapstore" "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // DataDirStatus represents the status of the etcd data directory. @@ -61,8 +62,9 @@ type Config struct { // DataValidator contains implements Validator interface to perform data validation. type DataValidator struct { - Config *Config - Logger *logrus.Logger + Config *Config + Logger *logrus.Logger + ZapLogger *zap.Logger } // Validator is the interface for data validation actions. diff --git a/pkg/initializer/validator/validator_suite_test.go b/pkg/initializer/validator/validator_suite_test.go index 19ed6d585..9854056bb 100644 --- a/pkg/initializer/validator/validator_suite_test.go +++ b/pkg/initializer/validator/validator_suite_test.go @@ -10,12 +10,12 @@ import ( "testing" "time" - "github.com/coreos/etcd/embed" "github.com/gardener/etcd-backup-restore/pkg/etcdutil" "github.com/gardener/etcd-backup-restore/pkg/snapshot/snapshotter" "github.com/gardener/etcd-backup-restore/pkg/snapstore" "github.com/gardener/etcd-backup-restore/test/utils" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/embed" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" diff --git a/pkg/server/backuprestoreserver.go b/pkg/server/backuprestoreserver.go index 5938a41bb..1caeee4b8 100644 --- a/pkg/server/backuprestoreserver.go +++ b/pkg/server/backuprestoreserver.go @@ -25,7 +25,6 @@ import ( "github.com/gardener/etcd-backup-restore/pkg/metrics" "github.com/prometheus/client_golang/prometheus" - "github.com/coreos/etcd/pkg/types" "github.com/gardener/etcd-backup-restore/pkg/defragmentor" "github.com/gardener/etcd-backup-restore/pkg/etcdutil" "github.com/gardener/etcd-backup-restore/pkg/initializer" @@ -34,6 +33,7 @@ import ( "github.com/gardener/etcd-backup-restore/pkg/snapstore" cron "github.com/robfig/cron/v3" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/pkg/types" ) // BackupRestoreServer holds the details for backup-restore server. diff --git a/pkg/snapshot/restorer/init.go b/pkg/snapshot/restorer/init.go index 97e4a8d93..c964fa913 100644 --- a/pkg/snapshot/restorer/init.go +++ b/pkg/snapshot/restorer/init.go @@ -18,8 +18,8 @@ import ( "fmt" "path" - "github.com/coreos/etcd/pkg/types" flag "github.com/spf13/pflag" + "go.etcd.io/etcd/pkg/types" ) // NewRestorationConfig returns the restoration config. diff --git a/pkg/snapshot/restorer/restorer.go b/pkg/snapshot/restorer/restorer.go index ee7fc9932..893c39005 100644 --- a/pkg/snapshot/restorer/restorer.go +++ b/pkg/snapshot/restorer/restorer.go @@ -31,31 +31,35 @@ import ( "sync" "time" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/embed" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/store" - "github.com/coreos/etcd/wal" - "github.com/coreos/etcd/wal/walpb" "github.com/gardener/etcd-backup-restore/pkg/snapstore" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/embed" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/etcdserver/api/snap" + store "go.etcd.io/etcd/etcdserver/api/v2store" + "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/mvcc" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/pkg/fileutil" + "go.etcd.io/etcd/pkg/traceutil" + "go.etcd.io/etcd/raft" + "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/wal" + "go.etcd.io/etcd/wal/walpb" + "go.uber.org/zap" ) // NewRestorer returns the restorer object. func NewRestorer(store snapstore.SnapStore, logger *logrus.Entry) *Restorer { + zapLogger, _ := zap.NewProduction() return &Restorer{ - logger: logger.WithField("actor", "restorer"), - store: store, + logger: logger.WithField("actor", "restorer"), + zapLogger: zapLogger, + store: store, } } @@ -106,7 +110,7 @@ func (r *Restorer) restoreFromBaseSnapshot(ro RestoreOptions) error { return err } - cl, err := membership.NewClusterFromURLsMap(ro.Config.InitialClusterToken, ro.ClusterURLs) + cl, err := membership.NewClusterFromURLsMap(r.zapLogger, ro.Config.InitialClusterToken, ro.ClusterURLs) if err != nil { return err } @@ -118,14 +122,14 @@ func (r *Restorer) restoreFromBaseSnapshot(ro RestoreOptions) error { walDir := filepath.Join(memberDir, "wal") snapdir := filepath.Join(memberDir, "snap") - if err = makeDB(snapdir, ro.BaseSnapshot, len(cl.Members()), r.store, false); err != nil { + if err = makeDB(r.zapLogger, snapdir, ro.BaseSnapshot, len(cl.Members()), r.store, false); err != nil { return err } - return makeWALAndSnap(walDir, snapdir, cl, ro.Config.Name) + return makeWALAndSnap(r.zapLogger, walDir, snapdir, cl, ro.Config.Name) } // makeDB copies the database snapshot to the snapshot directory. -func makeDB(snapdir string, snap snapstore.Snapshot, commit int, ss snapstore.SnapStore, skipHashCheck bool) error { +func makeDB(logger *zap.Logger, snapdir string, snap snapstore.Snapshot, commit int, ss snapstore.SnapStore, skipHashCheck bool) error { rc, err := ss.Fetch(snap) if err != nil { return err @@ -193,10 +197,11 @@ func makeDB(snapdir string, snap snapstore.Snapshot, commit int, ss snapstore.Sn // update consistentIndex so applies go through on etcdserver despite // having a new raft instance be := backend.NewDefaultBackend(dbPath) - // a lessor never timeouts leases - lessor := lease.NewLessor(be, math.MaxInt64) - s := mvcc.NewStore(be, lessor, (*initIndex)(&commit)) - txn := s.Write() + // a lessor that never times out leases + lessor := lease.NewLessor(logger, be, lease.LessorConfig{MinLeaseTTL: math.MaxInt64}) + s := mvcc.NewStore(logger, be, lessor, (*initIndex)(&commit), mvcc.StoreConfig{}) + trace := traceutil.New("write", logger) + txn := s.Write(trace) btx := be.BatchTx() del := func(k, v []byte) error { txn.DeleteRange(k, nil) @@ -215,7 +220,7 @@ func makeDB(snapdir string, snap snapstore.Snapshot, commit int, ss snapstore.Sn return nil } -func makeWALAndSnap(waldir, snapdir string, cl *membership.RaftCluster, restoreName string) error { +func makeWALAndSnap(logger *zap.Logger, waldir, snapdir string, cl *membership.RaftCluster, restoreName string) error { if err := fileutil.CreateDirAll(waldir); err != nil { return err } @@ -234,7 +239,7 @@ func makeWALAndSnap(waldir, snapdir string, cl *membership.RaftCluster, restoreN return err } - w, err := wal.Create(waldir, metadata) + w, err := wal.Create(logger, waldir, metadata) if err != nil { return err } @@ -290,11 +295,11 @@ func makeWALAndSnap(waldir, snapdir string, cl *membership.RaftCluster, restoreN Index: commit, Term: term, ConfState: raftpb.ConfState{ - Nodes: nodeIDs, + Voters: nodeIDs, }, }, } - snapshotter := snap.New(snapdir) + snapshotter := snap.New(logger, snapdir) if err := snapshotter.SaveSnap(raftSnap); err != nil { panic(err) } @@ -320,6 +325,7 @@ func startEmbeddedEtcd(logger *logrus.Entry, ro RestoreOptions) (*embed.Etcd, er cfg.ACUrls = []url.URL{*acurl} cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) cfg.QuotaBackendBytes = ro.Config.EmbeddedEtcdQuotaBytes + cfg.Logger = "zap" e, err := embed.StartEtcd(cfg) if err != nil { return nil, err diff --git a/pkg/snapshot/restorer/restorer_suite_test.go b/pkg/snapshot/restorer/restorer_suite_test.go index 0f481ea2b..d9795848d 100644 --- a/pkg/snapshot/restorer/restorer_suite_test.go +++ b/pkg/snapshot/restorer/restorer_suite_test.go @@ -24,7 +24,6 @@ import ( "github.com/gardener/etcd-backup-restore/pkg/wrappers" - "github.com/coreos/etcd/embed" "github.com/gardener/etcd-backup-restore/pkg/etcdutil" "github.com/gardener/etcd-backup-restore/pkg/snapshot/snapshotter" "github.com/gardener/etcd-backup-restore/pkg/snapstore" @@ -32,6 +31,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/embed" ) const ( diff --git a/pkg/snapshot/restorer/restorer_test.go b/pkg/snapshot/restorer/restorer_test.go index 63678340e..77ac5152d 100644 --- a/pkg/snapshot/restorer/restorer_test.go +++ b/pkg/snapshot/restorer/restorer_test.go @@ -24,12 +24,12 @@ import ( "sync" "time" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/pkg/types" "github.com/gardener/etcd-backup-restore/pkg/miscellaneous" "github.com/gardener/etcd-backup-restore/pkg/snapstore" "github.com/gardener/etcd-backup-restore/test/utils" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/pkg/types" . "github.com/gardener/etcd-backup-restore/pkg/snapshot/restorer" . "github.com/onsi/ginkgo" diff --git a/pkg/snapshot/restorer/types.go b/pkg/snapshot/restorer/types.go index 5f242150b..aad8c3023 100755 --- a/pkg/snapshot/restorer/types.go +++ b/pkg/snapshot/restorer/types.go @@ -18,10 +18,11 @@ import ( "net/url" "time" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/pkg/types" "github.com/gardener/etcd-backup-restore/pkg/snapstore" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/pkg/types" + "go.uber.org/zap" ) const ( @@ -37,8 +38,9 @@ const ( // Restorer is a struct for etcd data directory restorer type Restorer struct { - logger *logrus.Entry - store snapstore.SnapStore + logger *logrus.Entry + zapLogger *zap.Logger + store snapstore.SnapStore } // RestoreOptions hold all snapshot restore related fields @@ -127,7 +129,7 @@ func DeepCopyURL(in *url.URL) *url.URL { *out = *in if in.User != nil { in, out := &in.User, &out.User - *out = new (url.Userinfo) + *out = new(url.Userinfo) *out = *in } return out @@ -177,4 +179,4 @@ func (in *RestorationConfig) DeepCopy() *RestorationConfig { out := new(RestorationConfig) in.DeepCopyInto(out) return out -} \ No newline at end of file +} diff --git a/pkg/snapshot/restorer/types_test.go b/pkg/snapshot/restorer/types_test.go index 38dd19668..a2fab22ee 100644 --- a/pkg/snapshot/restorer/types_test.go +++ b/pkg/snapshot/restorer/types_test.go @@ -19,56 +19,56 @@ import ( "net/url" "time" - "github.com/coreos/etcd/pkg/types" - "github.com/gardener/etcd-backup-restore/pkg/snapstore" . "github.com/gardener/etcd-backup-restore/pkg/snapshot/restorer" + "github.com/gardener/etcd-backup-restore/pkg/snapstore" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "go.etcd.io/etcd/pkg/types" ) var _ = Describe("restorer types", func() { var ( makeRestorationConfig = func(s string, b bool, i int) *RestorationConfig { return &RestorationConfig{ - InitialCluster: s, - InitialClusterToken: s, - RestoreDataDir: s, - InitialAdvertisePeerURLs: []string{ s, s }, - Name: s, - SkipHashCheck: b, - MaxFetchers: uint(i), - EmbeddedEtcdQuotaBytes: int64(i), + InitialCluster: s, + InitialClusterToken: s, + RestoreDataDir: s, + InitialAdvertisePeerURLs: []string{s, s}, + Name: s, + SkipHashCheck: b, + MaxFetchers: uint(i), + EmbeddedEtcdQuotaBytes: int64(i), } } makeSnap = func(s string, i int, t time.Time, b bool) snapstore.Snapshot { return snapstore.Snapshot{ - Kind: s, + Kind: s, StartRevision: int64(i), - LastRevision: int64(i), - CreatedOn: t, - SnapDir: s, - SnapName: s, - IsChunk: b, + LastRevision: int64(i), + CreatedOn: t, + SnapDir: s, + SnapName: s, + IsChunk: b, } } makeSnapList = func(s string, i int, t time.Time, b bool) snapstore.SnapList { var s1, s2 = makeSnap(s, i, t, b), makeSnap(s, i, t, b) - return snapstore.SnapList{ &s1, &s2 } + return snapstore.SnapList{&s1, &s2} } makeURL = func(s string, b bool) url.URL { return url.URL{ - Scheme: s, - Opaque: s, - User: url.UserPassword(s, s), - Host: s, - Path: s, - RawPath: s, + Scheme: s, + Opaque: s, + User: url.UserPassword(s, s), + Host: s, + Path: s, + RawPath: s, ForceQuery: b, - Fragment: s, + Fragment: s, } } makeURLs = func(s string, b bool) types.URLs { - return types.URLs{ makeURL(s, b), makeURL(s, b) } + return types.URLs{makeURL(s, b), makeURL(s, b)} } makeURLsMap = func(s string, b bool) types.URLsMap { var out = types.URLsMap{} @@ -79,10 +79,10 @@ var _ = Describe("restorer types", func() { } makeRestoreOptions = func(s string, i int, t time.Time, b bool) *RestoreOptions { return &RestoreOptions{ - Config: makeRestorationConfig(s, b, i), - ClusterURLs: makeURLsMap(s, b), - PeerURLs: makeURLs(s, b), - BaseSnapshot: makeSnap(s, i, t, b), + Config: makeRestorationConfig(s, b, i), + ClusterURLs: makeURLsMap(s, b), + PeerURLs: makeURLs(s, b), + BaseSnapshot: makeSnap(s, i, t, b), DeltaSnapList: makeSnapList(s, i, t, b), } } @@ -127,7 +127,7 @@ var _ = Describe("restorer types", func() { Describe("SnapList", func() { var ( - now = time.Now() + now = time.Now() makeA = func() snapstore.SnapList { return makeSnapList("a", 1, now, false) } ) Describe("DeepCopySnapList", func() { @@ -179,7 +179,7 @@ var _ = Describe("restorer types", func() { Describe("RestoreOptions", func() { var ( - now = time.Now() + now = time.Now() makeA = func() *RestoreOptions { return makeRestoreOptions("a", 1, now, false) } makeB = func() *RestoreOptions { return makeRestoreOptions("b", 2, now.Add(-1*time.Hour), true) } ) @@ -215,4 +215,4 @@ var _ = Describe("restorer types", func() { }) }) -}) \ No newline at end of file +}) diff --git a/pkg/snapshot/snapshotter/snapshotter.go b/pkg/snapshot/snapshotter/snapshotter.go index 47817c534..65b3546a9 100644 --- a/pkg/snapshot/snapshotter/snapshotter.go +++ b/pkg/snapshot/snapshotter/snapshotter.go @@ -25,7 +25,6 @@ import ( "sync" "time" - "github.com/coreos/etcd/clientv3" "github.com/gardener/etcd-backup-restore/pkg/errors" "github.com/gardener/etcd-backup-restore/pkg/etcdutil" "github.com/gardener/etcd-backup-restore/pkg/metrics" @@ -34,6 +33,7 @@ import ( "github.com/prometheus/client_golang/prometheus" cron "github.com/robfig/cron/v3" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/clientv3" ) // NewSnapshotter returns the snapshotter object. diff --git a/pkg/snapshot/snapshotter/snapshotter_suite_test.go b/pkg/snapshot/snapshotter/snapshotter_suite_test.go index e4363c9d2..230e03906 100644 --- a/pkg/snapshot/snapshotter/snapshotter_suite_test.go +++ b/pkg/snapshot/snapshotter/snapshotter_suite_test.go @@ -19,11 +19,11 @@ import ( "os" "testing" - "github.com/coreos/etcd/embed" "github.com/gardener/etcd-backup-restore/test/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/embed" ) var ( diff --git a/pkg/snapshot/snapshotter/types.go b/pkg/snapshot/snapshotter/types.go index 947ee8ba2..1d7c65f35 100644 --- a/pkg/snapshot/snapshotter/types.go +++ b/pkg/snapshot/snapshotter/types.go @@ -21,11 +21,11 @@ import ( "github.com/gardener/etcd-backup-restore/pkg/wrappers" - "github.com/coreos/etcd/clientv3" "github.com/gardener/etcd-backup-restore/pkg/etcdutil" "github.com/gardener/etcd-backup-restore/pkg/snapstore" cron "github.com/robfig/cron/v3" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/clientv3" ) const ( diff --git a/test/e2e/integration/cloud_backup_test.go b/test/e2e/integration/cloud_backup_test.go index 675038836..ab24ffa8c 100644 --- a/test/e2e/integration/cloud_backup_test.go +++ b/test/e2e/integration/cloud_backup_test.go @@ -30,6 +30,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func startEtcd() (*Cmd, *chan error) { @@ -209,12 +210,14 @@ var _ = Describe("CloudBackup", func() { It("valids non corrupt data directory", func() { dataDir := os.Getenv("ETCD_DATA_DIR") Expect(dataDir).ShouldNot(Equal(nil)) + zapLogger, _ := zap.NewProduction() dataValidator := validator.DataValidator{ - Logger: logger, Config: &validator.Config{ DataDir: dataDir, SnapstoreConfig: snapstoreConfig, }, + Logger: logger, + ZapLogger: zapLogger, } dataDirStatus, err := dataValidator.Validate(validator.Full, 0) Expect(err).ShouldNot(HaveOccurred()) @@ -241,12 +244,14 @@ var _ = Describe("CloudBackup", func() { Expect(err).ShouldNot(HaveOccurred()) fileWriter.Write([]byte("corrupt file..")) fileWriter.Flush() + zapLogger, _ := zap.NewProduction() dataValidator := validator.DataValidator{ - Logger: logger, Config: &validator.Config{ DataDir: dataDir, SnapstoreConfig: snapstoreConfig, }, + Logger: logger, + ZapLogger: zapLogger, } dataDirStatus, err := dataValidator.Validate(validator.Full, 0) Expect(err).ShouldNot(HaveOccurred()) diff --git a/test/utils/utils.go b/test/utils/utils.go index cafe4a2f9..40de38cb6 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -23,9 +23,9 @@ import ( "sync" "time" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/embed" "github.com/sirupsen/logrus" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/embed" ) const ( @@ -44,7 +44,7 @@ func StartEmbeddedEtcd(ctx context.Context, etcdDir string, logger *logrus.Entry cfg.EnableV2 = false cfg.Debug = false cfg.GRPCKeepAliveTimeout = 0 - cfg.SnapCount = 10 + cfg.SnapshotCount = 10 DefaultListenPeerURLs := "http://localhost:0" DefaultListenClientURLs := "http://localhost:0" DefaultInitialAdvertisePeerURLs := "http://localhost:0" @@ -58,6 +58,7 @@ func StartEmbeddedEtcd(ctx context.Context, etcdDir string, logger *logrus.Entry cfg.APUrls = []url.URL{*apurl} cfg.ACUrls = []url.URL{*acurl} cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) + cfg.Logger = "zap" e, err := embed.StartEtcd(cfg) if err != nil { return nil, err diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go deleted file mode 100644 index 99b2d6b5c..000000000 --- a/vendor/github.com/coreos/etcd/auth/jwt.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "context" - "crypto/rsa" - "io/ioutil" - - jwt "github.com/dgrijalva/jwt-go" -) - -type tokenJWT struct { - signMethod string - signKey *rsa.PrivateKey - verifyKey *rsa.PublicKey -} - -func (t *tokenJWT) enable() {} -func (t *tokenJWT) disable() {} -func (t *tokenJWT) invalidateUser(string) {} -func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } - -func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { - // rev isn't used in JWT, it is only used in simple token - var ( - username string - revision uint64 - ) - - parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { - return t.verifyKey, nil - }) - - switch err.(type) { - case nil: - if !parsed.Valid { - plog.Warningf("invalid jwt token: %s", token) - return nil, false - } - - claims := parsed.Claims.(jwt.MapClaims) - - username = claims["username"].(string) - revision = uint64(claims["revision"].(float64)) - default: - plog.Warningf("failed to parse jwt token: %s", err) - return nil, false - } - - return &AuthInfo{Username: username, Revision: revision}, true -} - -func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { - // Future work: let a jwt token include permission information would be useful for - // permission checking in proxy side. - tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), - jwt.MapClaims{ - "username": username, - "revision": revision, - }) - - token, err := tk.SignedString(t.signKey) - if err != nil { - plog.Debugf("failed to sign jwt token: %s", err) - return "", err - } - - plog.Debugf("jwt token: %s", token) - - return token, err -} - -func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) { - for k, v := range opts { - switch k { - case "sign-method": - jwtSignMethod = v - case "pub-key": - jwtPubKeyPath = v - case "priv-key": - jwtPrivKeyPath = v - default: - plog.Errorf("unknown token specific option: %s", k) - return "", "", "", ErrInvalidAuthOpts - } - } - if len(jwtSignMethod) == 0 { - return "", "", "", ErrInvalidAuthOpts - } - return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil -} - -func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) { - jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts) - if err != nil { - return nil, ErrInvalidAuthOpts - } - - t := &tokenJWT{} - - t.signMethod = jwtSignMethod - - verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) - if err != nil { - plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) - return nil, err - } - t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) - if err != nil { - plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) - return nil, err - } - - signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) - if err != nil { - plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) - return nil, err - } - t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) - if err != nil { - plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) - return nil, err - } - - return t, nil -} diff --git a/vendor/github.com/coreos/etcd/auth/metrics.go b/vendor/github.com/coreos/etcd/auth/metrics.go deleted file mode 100644 index fe0d28e22..000000000 --- a/vendor/github.com/coreos/etcd/auth/metrics.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package auth - -import ( - "github.com/prometheus/client_golang/prometheus" - "sync" -) - -var ( - currentAuthRevision = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "auth", - Name: "revision", - Help: "The current revision of auth store.", - }, - func() float64 { - reportCurrentAuthRevMu.RLock() - defer reportCurrentAuthRevMu.RUnlock() - return reportCurrentAuthRev() - }, - ) - // overridden by auth store initialization - reportCurrentAuthRevMu sync.RWMutex - reportCurrentAuthRev = func() float64 { return 0 } -) - -func init() { - prometheus.MustRegister(currentAuthRevision) -} diff --git a/vendor/github.com/coreos/etcd/clientv3/ctx.go b/vendor/github.com/coreos/etcd/clientv3/ctx.go deleted file mode 100644 index da8297b6c..000000000 --- a/vendor/github.com/coreos/etcd/clientv3/ctx.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2020 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "context" - "strings" - - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - "github.com/coreos/etcd/version" - "google.golang.org/grpc/metadata" -) - -// WithRequireLeader requires client requests to only succeed -// when the cluster has a leader. -func WithRequireLeader(ctx context.Context) context.Context { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { // no outgoing metadata ctx key, create one - md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewOutgoingContext(ctx, md) - } - copied := md.Copy() // avoid racey updates - // overwrite/add 'hasleader' key/value - metadataSet(copied, rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewOutgoingContext(ctx, copied) -} - -// embeds client version -func withVersion(ctx context.Context) context.Context { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { // no outgoing metadata ctx key, create one - md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion) - return metadata.NewOutgoingContext(ctx, md) - } - copied := md.Copy() // avoid racey updates - // overwrite/add version key/value - metadataSet(copied, rpctypes.MetadataClientAPIVersionKey, version.APIVersion) - return metadata.NewOutgoingContext(ctx, copied) -} - -func metadataGet(md metadata.MD, k string) []string { - k = strings.ToLower(k) - return md[k] -} - -func metadataSet(md metadata.MD, k string, vals ...string) { - if len(vals) == 0 { - return - } - k = strings.ToLower(k) - md[k] = vals -} diff --git a/vendor/github.com/coreos/etcd/compactor/revision.go b/vendor/github.com/coreos/etcd/compactor/revision.go deleted file mode 100644 index 927e41c97..000000000 --- a/vendor/github.com/coreos/etcd/compactor/revision.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compactor - -import ( - "context" - "sync" - "time" - - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - - "github.com/jonboulle/clockwork" -) - -// Revision compacts the log by purging revisions older than -// the configured reivison number. Compaction happens every 5 minutes. -type Revision struct { - clock clockwork.Clock - retention int64 - - rg RevGetter - c Compactable - - ctx context.Context - cancel context.CancelFunc - - mu sync.Mutex - paused bool -} - -// NewRevision creates a new instance of Revisonal compactor that purges -// the log older than retention revisions from the current revision. -func NewRevision(retention int64, rg RevGetter, c Compactable) *Revision { - return newRevision(clockwork.NewRealClock(), retention, rg, c) -} - -func newRevision(clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision { - t := &Revision{ - clock: clock, - retention: retention, - rg: rg, - c: c, - } - t.ctx, t.cancel = context.WithCancel(context.Background()) - return t -} - -const revInterval = 5 * time.Minute - -// Run runs revision-based compactor. -func (t *Revision) Run() { - prev := int64(0) - go func() { - for { - select { - case <-t.ctx.Done(): - return - case <-t.clock.After(revInterval): - t.mu.Lock() - p := t.paused - t.mu.Unlock() - if p { - continue - } - } - - rev := t.rg.Rev() - t.retention - if rev <= 0 || rev == prev { - continue - } - - plog.Noticef("Starting auto-compaction at revision %d (retention: %d revisions)", rev, t.retention) - _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) - if err == nil || err == mvcc.ErrCompacted { - prev = rev - plog.Noticef("Finished auto-compaction at revision %d", rev) - } else { - plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err) - plog.Noticef("Retry after %v", revInterval) - } - } - }() -} - -// Stop stops revision-based compactor. -func (t *Revision) Stop() { - t.cancel() -} - -// Pause pauses revision-based compactor. -func (t *Revision) Pause() { - t.mu.Lock() - defer t.mu.Unlock() - t.paused = true -} - -// Resume resumes revision-based compactor. -func (t *Revision) Resume() { - t.mu.Lock() - defer t.mu.Unlock() - t.paused = false -} diff --git a/vendor/github.com/coreos/etcd/embed/serve.go b/vendor/github.com/coreos/etcd/embed/serve.go deleted file mode 100644 index 62b8b5780..000000000 --- a/vendor/github.com/coreos/etcd/embed/serve.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package embed - -import ( - "context" - "io/ioutil" - defaultLog "log" - "net" - "net/http" - "strings" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3client" - "github.com/coreos/etcd/etcdserver/api/v3election" - "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" - v3electiongw "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw" - "github.com/coreos/etcd/etcdserver/api/v3lock" - "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" - v3lockgw "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw" - "github.com/coreos/etcd/etcdserver/api/v3rpc" - etcdservergw "github.com/coreos/etcd/etcdserver/etcdserverpb/gw" - "github.com/coreos/etcd/pkg/debugutil" - "github.com/coreos/etcd/pkg/transport" - - gw "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/soheilhy/cmux" - "github.com/tmc/grpc-websocket-proxy/wsproxy" - "golang.org/x/net/trace" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -type serveCtx struct { - l net.Listener - addr string - secure bool - insecure bool - - ctx context.Context - cancel context.CancelFunc - - userHandlers map[string]http.Handler - serviceRegister func(*grpc.Server) - serversC chan *servers -} - -type servers struct { - secure bool - grpc *grpc.Server - http *http.Server -} - -func newServeCtx() *serveCtx { - ctx, cancel := context.WithCancel(context.Background()) - return &serveCtx{ctx: ctx, cancel: cancel, userHandlers: make(map[string]http.Handler), - serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true - } -} - -// serve accepts incoming connections on the listener l, -// creating a new service goroutine for each. The service goroutines -// read requests and then call handler to reply to them. -func (sctx *serveCtx) serve( - s *etcdserver.EtcdServer, - tlsinfo *transport.TLSInfo, - handler http.Handler, - errHandler func(error), - gopts ...grpc.ServerOption) (err error) { - logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) - <-s.ReadyNotify() - plog.Info("ready to serve client requests") - - m := cmux.New(sctx.l) - v3c := v3client.New(s) - servElection := v3election.NewElectionServer(v3c) - servLock := v3lock.NewLockServer(v3c) - - var gs *grpc.Server - defer func() { - if err != nil && gs != nil { - gs.Stop() - } - }() - - if sctx.insecure { - gs = v3rpc.Server(s, nil, gopts...) - v3electionpb.RegisterElectionServer(gs, servElection) - v3lockpb.RegisterLockServer(gs, servLock) - if sctx.serviceRegister != nil { - sctx.serviceRegister(gs) - } - grpcl := m.Match(cmux.HTTP2()) - go func() { errHandler(gs.Serve(grpcl)) }() - - var gwmux *gw.ServeMux - gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()}) - if err != nil { - return err - } - - httpmux := sctx.createMux(gwmux, handler) - - srvhttp := &http.Server{ - Handler: wrapMux(httpmux), - ErrorLog: logger, // do not log user error - } - httpl := m.Match(cmux.HTTP1()) - go func() { errHandler(srvhttp.Serve(httpl)) }() - - sctx.serversC <- &servers{grpc: gs, http: srvhttp} - plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String()) - } - - if sctx.secure { - tlscfg, tlsErr := tlsinfo.ServerConfig() - if tlsErr != nil { - return tlsErr - } - gs = v3rpc.Server(s, tlscfg, gopts...) - v3electionpb.RegisterElectionServer(gs, servElection) - v3lockpb.RegisterLockServer(gs, servLock) - if sctx.serviceRegister != nil { - sctx.serviceRegister(gs) - } - handler = grpcHandlerFunc(gs, handler) - - dtls := tlscfg.Clone() - // trust local server - dtls.InsecureSkipVerify = true - creds := credentials.NewTLS(dtls) - opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)} - var gwmux *gw.ServeMux - gwmux, err = sctx.registerGateway(opts) - if err != nil { - return err - } - - var tlsl net.Listener - tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo) - if err != nil { - return err - } - // TODO: add debug flag; enable logging when debug flag is set - httpmux := sctx.createMux(gwmux, handler) - - srv := &http.Server{ - Handler: wrapMux(httpmux), - TLSConfig: tlscfg, - ErrorLog: logger, // do not log user error - } - go func() { errHandler(srv.Serve(tlsl)) }() - - sctx.serversC <- &servers{secure: true, grpc: gs, http: srv} - plog.Infof("serving client requests on %s", sctx.l.Addr().String()) - } - - close(sctx.serversC) - return m.Serve() -} - -// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC -// connections or otherHandler otherwise. Given in gRPC docs. -func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler { - if otherHandler == nil { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - grpcServer.ServeHTTP(w, r) - }) - } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { - grpcServer.ServeHTTP(w, r) - } else { - otherHandler.ServeHTTP(w, r) - } - }) -} - -type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error - -func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) { - ctx := sctx.ctx - conn, err := grpc.DialContext(ctx, sctx.addr, opts...) - if err != nil { - return nil, err - } - gwmux := gw.NewServeMux() - - handlers := []registerHandlerFunc{ - etcdservergw.RegisterKVHandler, - etcdservergw.RegisterWatchHandler, - etcdservergw.RegisterLeaseHandler, - etcdservergw.RegisterClusterHandler, - etcdservergw.RegisterMaintenanceHandler, - etcdservergw.RegisterAuthHandler, - v3lockgw.RegisterLockHandler, - v3electiongw.RegisterElectionHandler, - } - for _, h := range handlers { - if err := h(ctx, gwmux, conn); err != nil { - return nil, err - } - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr) - } - }() - - return gwmux, nil -} - -func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux { - httpmux := http.NewServeMux() - for path, h := range sctx.userHandlers { - httpmux.Handle(path, h) - } - - httpmux.Handle( - "/v3beta/", - wsproxy.WebsocketProxy( - gwmux, - wsproxy.WithRequestMutator( - // Default to the POST method for streams - func(incoming *http.Request, outgoing *http.Request) *http.Request { - outgoing.Method = "POST" - return outgoing - }, - ), - ), - ) - if handler != nil { - httpmux.Handle("/", handler) - } - return httpmux -} - -// wraps HTTP multiplexer to mute requests to /v3alpha -// TODO: deprecate this in 3.4 release -func wrapMux(mux *http.ServeMux) http.Handler { return &v3alphaMutator{mux: mux} } - -type v3alphaMutator struct { - mux *http.ServeMux -} - -func (m *v3alphaMutator) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - if req != nil && req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3alpha/") { - req.URL.Path = strings.Replace(req.URL.Path, "/v3alpha/", "/v3beta/", 1) - } - m.mux.ServeHTTP(rw, req) -} - -func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) { - if sctx.userHandlers[s] != nil { - plog.Warningf("path %s already registered by user handler", s) - return - } - sctx.userHandlers[s] = h -} - -func (sctx *serveCtx) registerPprof() { - for p, h := range debugutil.PProfHandlers() { - sctx.registerUserHandler(p, h) - } -} - -func (sctx *serveCtx) registerTrace() { - reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) } - sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf)) - evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) } - sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf)) -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go deleted file mode 100644 index 0a9213b01..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdhttp - -import ( - "encoding/json" - "net/http" - - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/lease/leasehttp" - "github.com/coreos/etcd/rafthttp" -) - -const ( - peerMembersPrefix = "/members" -) - -// NewPeerHandler generates an http.Handler to handle etcd peer requests. -func NewPeerHandler(s etcdserver.ServerPeer) http.Handler { - return newPeerHandler(s.Cluster(), s.RaftHandler(), s.LeaseHandler()) -} - -func newPeerHandler(cluster api.Cluster, raftHandler http.Handler, leaseHandler http.Handler) http.Handler { - mh := &peerMembersHandler{ - cluster: cluster, - } - - mux := http.NewServeMux() - mux.HandleFunc("/", http.NotFound) - mux.Handle(rafthttp.RaftPrefix, raftHandler) - mux.Handle(rafthttp.RaftPrefix+"/", raftHandler) - mux.Handle(peerMembersPrefix, mh) - if leaseHandler != nil { - mux.Handle(leasehttp.LeasePrefix, leaseHandler) - mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler) - } - mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion)) - return mux -} - -type peerMembersHandler struct { - cluster api.Cluster -} - -func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r, "GET") { - return - } - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - if r.URL.Path != peerMembersPrefix { - http.Error(w, "bad path", http.StatusBadRequest) - return - } - ms := h.cluster.Members() - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(ms); err != nil { - plog.Warningf("failed to encode members response (%v)", err) - } -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go deleted file mode 100644 index f44862a46..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "sort" - "time" - - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/version" - "github.com/coreos/go-semver/semver" -) - -// isMemberBootstrapped tries to check if the given member has been bootstrapped -// in the given cluster. -func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { - rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt) - if err != nil { - return false - } - id := cl.MemberByName(member).ID - m := rcl.Member(id) - if m == nil { - return false - } - if len(m.ClientURLs) > 0 { - return true - } - return false -} - -// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and -// attempts to construct a Cluster by accessing the members endpoint on one of -// these URLs. The first URL to provide a response is used. If no URLs provide -// a response, or a Cluster cannot be successfully created from a received -// response, an error is returned. -// Each request has a 10-second timeout. Because the upper limit of TTL is 5s, -// 10 second is enough for building connection and finishing request. -func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { - return getClusterFromRemotePeers(urls, 10*time.Second, true, rt) -} - -// If logerr is true, it prints out more error messages. -func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { - cc := &http.Client{ - Transport: rt, - Timeout: timeout, - } - for _, u := range urls { - resp, err := cc.Get(u + "/members") - if err != nil { - if logerr { - plog.Warningf("could not get cluster response from %s: %v", u, err) - } - continue - } - b, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - if logerr { - plog.Warningf("could not read the body of cluster response: %v", err) - } - continue - } - var membs []*membership.Member - if err = json.Unmarshal(b, &membs); err != nil { - if logerr { - plog.Warningf("could not unmarshal cluster response: %v", err) - } - continue - } - id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID")) - if err != nil { - if logerr { - plog.Warningf("could not parse the cluster ID from cluster res: %v", err) - } - continue - } - - // check the length of membership members - // if the membership members are present then prepare and return raft cluster - // if membership members are not present then the raft cluster formed will be - // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error - if len(membs) > 0 { - return membership.NewClusterFromMembers("", id, membs), nil - } - - return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.") - } - return nil, fmt.Errorf("could not retrieve cluster information from the given urls") -} - -// getRemotePeerURLs returns peer urls of remote members in the cluster. The -// returned list is sorted in ascending lexicographical order. -func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { - us := make([]string, 0) - for _, m := range cl.Members() { - if m.Name == local { - continue - } - us = append(us, m.PeerURLs...) - } - sort.Strings(us) - return us -} - -// getVersions returns the versions of the members in the given cluster. -// The key of the returned map is the member's ID. The value of the returned map -// is the semver versions string, including server and cluster. -// If it fails to get the version of a member, the key will be nil. -func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { - members := cl.Members() - vers := make(map[string]*version.Versions) - for _, m := range members { - if m.ID == local { - cv := "not_decided" - if cl.Version() != nil { - cv = cl.Version().String() - } - vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} - continue - } - ver, err := getVersion(m, rt) - if err != nil { - plog.Warningf("cannot get the version of member %s (%v)", m.ID, err) - vers[m.ID.String()] = nil - } else { - vers[m.ID.String()] = ver - } - } - return vers -} - -// decideClusterVersion decides the cluster version based on the versions map. -// The returned version is the min server version in the map, or nil if the min -// version in unknown. -func decideClusterVersion(vers map[string]*version.Versions) *semver.Version { - var cv *semver.Version - lv := semver.Must(semver.NewVersion(version.Version)) - - for mid, ver := range vers { - if ver == nil { - return nil - } - v, err := semver.NewVersion(ver.Server) - if err != nil { - plog.Errorf("cannot understand the version of member %s (%v)", mid, err) - return nil - } - if lv.LessThan(*v) { - plog.Warningf("the local etcd version %s is not up-to-date", lv.String()) - plog.Warningf("member %s has a higher version %s", mid, ver.Server) - } - if cv == nil { - cv = v - } else if v.LessThan(*cv) { - cv = v - } - } - return cv -} - -// isCompatibleWithCluster return true if the local member has a compatible version with -// the current running cluster. -// The version is considered as compatible when at least one of the other members in the cluster has a -// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version -// out of the range. -// We set this rule since when the local member joins, another member might be offline. -func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { - vers := getVersions(cl, local, rt) - minV := semver.Must(semver.NewVersion(version.MinClusterVersion)) - maxV := semver.Must(semver.NewVersion(version.Version)) - maxV = &semver.Version{ - Major: maxV.Major, - Minor: maxV.Minor, - } - - return isCompatibleWithVers(vers, local, minV, maxV) -} - -func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { - var ok bool - for id, v := range vers { - // ignore comparison with local version - if id == local.String() { - continue - } - if v == nil { - continue - } - clusterv, err := semver.NewVersion(v.Cluster) - if err != nil { - plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err) - continue - } - if clusterv.LessThan(*minV) { - plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String()) - return false - } - if maxV.LessThan(*clusterv) { - plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String()) - return false - } - ok = true - } - return ok -} - -// getVersion returns the Versions of the given member via its -// peerURLs. Returns the last error if it fails to get the version. -func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { - cc := &http.Client{ - Transport: rt, - } - var ( - err error - resp *http.Response - ) - - for _, u := range m.PeerURLs { - resp, err = cc.Get(u + "/version") - if err != nil { - plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - var b []byte - b, err = ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - var vers version.Versions - if err = json.Unmarshal(b, &vers); err != nil { - plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err) - continue - } - return &vers, nil - } - return nil, err -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/corrupt.go b/vendor/github.com/coreos/etcd/etcdserver/corrupt.go deleted file mode 100644 index d998ec590..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/corrupt.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package etcdserver - -import ( - "context" - "fmt" - "time" - - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/pkg/types" -) - -// CheckInitialHashKV compares initial hash values with its peers -// before serving any peer/client traffic. Only mismatch when hashes -// are different at requested revision, with same compact revision. -func (s *EtcdServer) CheckInitialHashKV() error { - if !s.Cfg.InitialCorruptCheck { - return nil - } - - plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout()) - h, rev, crev, err := s.kv.HashByRev(0) - if err != nil { - return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err) - } - peers := s.getPeerHashKVs(rev) - mismatch := 0 - for _, p := range peers { - if p.resp != nil { - peerID := types.ID(p.resp.Header.MemberId) - if h != p.resp.Hash { - if crev == p.resp.CompactRevision { - plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev) - mismatch++ - } else { - plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev) - } - } - continue - } - if p.err != nil { - switch p.err { - case rpctypes.ErrFutureRev: - plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error()) - case rpctypes.ErrCompacted: - plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error()) - } - } - } - if mismatch > 0 { - return fmt.Errorf("%s found data inconsistency with peers", s.ID()) - } - - plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID()) - return nil -} - -func (s *EtcdServer) monitorKVHash() { - t := s.Cfg.CorruptCheckTime - if t == 0 { - return - } - plog.Infof("enabled corruption checking with %s interval", t) - for { - select { - case <-s.stopping: - return - case <-time.After(t): - } - if !s.isLeader() { - continue - } - if err := s.checkHashKV(); err != nil { - plog.Debugf("check hash kv failed %v", err) - } - } -} - -func (s *EtcdServer) checkHashKV() error { - h, rev, crev, err := s.kv.HashByRev(0) - if err != nil { - plog.Fatalf("failed to hash kv store (%v)", err) - } - peers := s.getPeerHashKVs(rev) - - ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - err = s.linearizableReadNotify(ctx) - cancel() - if err != nil { - return err - } - - h2, rev2, crev2, err := s.kv.HashByRev(0) - if err != nil { - plog.Warningf("failed to hash kv store (%v)", err) - return err - } - - alarmed := false - mismatch := func(id uint64) { - if alarmed { - return - } - alarmed = true - a := &pb.AlarmRequest{ - MemberID: uint64(id), - Action: pb.AlarmRequest_ACTIVATE, - Alarm: pb.AlarmType_CORRUPT, - } - s.goAttach(func() { - s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) - }) - } - - if h2 != h && rev2 == rev && crev == crev2 { - plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev) - mismatch(uint64(s.ID())) - } - - for _, p := range peers { - if p.resp == nil { - continue - } - id := p.resp.Header.MemberId - - // leader expects follower's latest revision less than or equal to leader's - if p.resp.Header.Revision > rev2 { - plog.Warningf( - "revision %d from member %v, expected at most %d", - p.resp.Header.Revision, - types.ID(id), - rev2) - mismatch(id) - } - - // leader expects follower's latest compact revision less than or equal to leader's - if p.resp.CompactRevision > crev2 { - plog.Warningf( - "compact revision %d from member %v, expected at most %d", - p.resp.CompactRevision, - types.ID(id), - crev2, - ) - mismatch(id) - } - - // follower's compact revision is leader's old one, then hashes must match - if p.resp.CompactRevision == crev && p.resp.Hash != h { - plog.Warningf( - "hash %d at revision %d from member %v, expected hash %d", - p.resp.Hash, - rev, - types.ID(id), - h, - ) - mismatch(id) - } - } - return nil -} - -type peerHashKVResp struct { - resp *clientv3.HashKVResponse - err error - eps []string -} - -func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) { - // TODO: handle the case when "s.cluster.Members" have not - // been populated (e.g. no snapshot to load from disk) - mbs := s.cluster.Members() - pURLs := make([][]string, len(mbs)) - for _, m := range mbs { - if m.ID == s.ID() { - continue - } - pURLs = append(pURLs, m.PeerURLs) - } - - for _, purls := range pURLs { - if len(purls) == 0 { - continue - } - cli, cerr := clientv3.New(clientv3.Config{ - DialTimeout: s.Cfg.ReqTimeout(), - Endpoints: purls, - }) - if cerr != nil { - plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), purls, cerr.Error()) - continue - } - - respsLen := len(resps) - for _, c := range cli.Endpoints() { - ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - var resp *clientv3.HashKVResponse - resp, cerr = cli.HashKV(ctx, c, rev) - cancel() - if cerr == nil { - resps = append(resps, &peerHashKVResp{resp: resp}) - break - } - plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev) - } - cli.Close() - - if respsLen == len(resps) { - resps = append(resps, &peerHashKVResp{err: cerr, eps: purls}) - } - } - return resps -} - -type applierV3Corrupt struct { - applierV3 -} - -func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } - -func (a *applierV3Corrupt) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { - return nil, ErrCorrupt -} - -func (a *applierV3Corrupt) Range(txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) { - return nil, ErrCorrupt -} - -func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - return nil, ErrCorrupt -} - -func (a *applierV3Corrupt) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - return nil, ErrCorrupt -} - -func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) { - return nil, nil, ErrCorrupt -} - -func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, ErrCorrupt -} - -func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - return nil, ErrCorrupt -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go deleted file mode 100644 index ba5c5411d..000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go +++ /dev/null @@ -1,518 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package membership - -import ( - "bytes" - "context" - "crypto/sha1" - "encoding/binary" - "encoding/json" - "fmt" - "path" - "sort" - "strings" - "sync" - "time" - - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/netutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - - "github.com/coreos/go-semver/semver" - "github.com/prometheus/client_golang/prometheus" -) - -// RaftCluster is a list of Members that belong to the same raft cluster -type RaftCluster struct { - id types.ID - token string - - store store.Store - be backend.Backend - - sync.Mutex // guards the fields below - version *semver.Version - members map[types.ID]*Member - // removed contains the ids of removed members in the cluster. - // removed id cannot be reused. - removed map[types.ID]bool -} - -func NewClusterFromURLsMap(token string, urlsmap types.URLsMap) (*RaftCluster, error) { - c := NewCluster(token) - for name, urls := range urlsmap { - m := NewMember(name, urls, token, nil) - if _, ok := c.members[m.ID]; ok { - return nil, fmt.Errorf("member exists with identical ID %v", m) - } - if uint64(m.ID) == raft.None { - return nil, fmt.Errorf("cannot use %x as member id", raft.None) - } - c.members[m.ID] = m - } - c.genID() - return c, nil -} - -func NewClusterFromMembers(token string, id types.ID, membs []*Member) *RaftCluster { - c := NewCluster(token) - c.id = id - for _, m := range membs { - c.members[m.ID] = m - } - return c -} - -func NewCluster(token string) *RaftCluster { - return &RaftCluster{ - token: token, - members: make(map[types.ID]*Member), - removed: make(map[types.ID]bool), - } -} - -func (c *RaftCluster) ID() types.ID { return c.id } - -func (c *RaftCluster) Members() []*Member { - c.Lock() - defer c.Unlock() - var ms MembersByID - for _, m := range c.members { - ms = append(ms, m.Clone()) - } - sort.Sort(ms) - return []*Member(ms) -} - -func (c *RaftCluster) Member(id types.ID) *Member { - c.Lock() - defer c.Unlock() - return c.members[id].Clone() -} - -// MemberByName returns a Member with the given name if exists. -// If more than one member has the given name, it will panic. -func (c *RaftCluster) MemberByName(name string) *Member { - c.Lock() - defer c.Unlock() - var memb *Member - for _, m := range c.members { - if m.Name == name { - if memb != nil { - plog.Panicf("two members with the given name %q exist", name) - } - memb = m - } - } - return memb.Clone() -} - -func (c *RaftCluster) MemberIDs() []types.ID { - c.Lock() - defer c.Unlock() - var ids []types.ID - for _, m := range c.members { - ids = append(ids, m.ID) - } - sort.Sort(types.IDSlice(ids)) - return ids -} - -func (c *RaftCluster) IsIDRemoved(id types.ID) bool { - c.Lock() - defer c.Unlock() - return c.removed[id] -} - -// PeerURLs returns a list of all peer addresses. -// The returned list is sorted in ascending lexicographical order. -func (c *RaftCluster) PeerURLs() []string { - c.Lock() - defer c.Unlock() - urls := make([]string, 0) - for _, p := range c.members { - urls = append(urls, p.PeerURLs...) - } - sort.Strings(urls) - return urls -} - -// ClientURLs returns a list of all client addresses. -// The returned list is sorted in ascending lexicographical order. -func (c *RaftCluster) ClientURLs() []string { - c.Lock() - defer c.Unlock() - urls := make([]string, 0) - for _, p := range c.members { - urls = append(urls, p.ClientURLs...) - } - sort.Strings(urls) - return urls -} - -func (c *RaftCluster) String() string { - c.Lock() - defer c.Unlock() - b := &bytes.Buffer{} - fmt.Fprintf(b, "{ClusterID:%s ", c.id) - var ms []string - for _, m := range c.members { - ms = append(ms, fmt.Sprintf("%+v", m)) - } - fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " ")) - var ids []string - for id := range c.removed { - ids = append(ids, id.String()) - } - fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " ")) - return b.String() -} - -func (c *RaftCluster) genID() { - mIDs := c.MemberIDs() - b := make([]byte, 8*len(mIDs)) - for i, id := range mIDs { - binary.BigEndian.PutUint64(b[8*i:], uint64(id)) - } - hash := sha1.Sum(b) - c.id = types.ID(binary.BigEndian.Uint64(hash[:8])) -} - -func (c *RaftCluster) SetID(id types.ID) { c.id = id } - -func (c *RaftCluster) SetStore(st store.Store) { c.store = st } - -func (c *RaftCluster) SetBackend(be backend.Backend) { - c.be = be - mustCreateBackendBuckets(c.be) -} - -func (c *RaftCluster) Recover(onSet func(*semver.Version)) { - c.Lock() - defer c.Unlock() - - c.members, c.removed = membersFromStore(c.store) - c.version = clusterVersionFromStore(c.store) - mustDetectDowngrade(c.version) - onSet(c.version) - - for _, m := range c.members { - plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id) - } - if c.version != nil { - plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String())) - } -} - -// ValidateConfigurationChange takes a proposed ConfChange and -// ensures that it is still valid. -func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { - members, removed := membersFromStore(c.store) - id := types.ID(cc.NodeID) - if removed[id] { - return ErrIDRemoved - } - switch cc.Type { - case raftpb.ConfChangeAddNode: - if members[id] != nil { - return ErrIDExists - } - urls := make(map[string]bool) - for _, m := range members { - for _, u := range m.PeerURLs { - urls[u] = true - } - } - m := new(Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) - } - for _, u := range m.PeerURLs { - if urls[u] { - return ErrPeerURLexists - } - } - case raftpb.ConfChangeRemoveNode: - if members[id] == nil { - return ErrIDNotFound - } - case raftpb.ConfChangeUpdateNode: - if members[id] == nil { - return ErrIDNotFound - } - urls := make(map[string]bool) - for _, m := range members { - if m.ID == id { - continue - } - for _, u := range m.PeerURLs { - urls[u] = true - } - } - m := new(Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) - } - for _, u := range m.PeerURLs { - if urls[u] { - return ErrPeerURLexists - } - } - default: - plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode") - } - return nil -} - -// AddMember adds a new Member into the cluster, and saves the given member's -// raftAttributes into the store. The given member should have empty attributes. -// A Member with a matching id must not exist. -func (c *RaftCluster) AddMember(m *Member) { - c.Lock() - defer c.Unlock() - if c.store != nil { - mustSaveMemberToStore(c.store, m) - } - if c.be != nil { - mustSaveMemberToBackend(c.be, m) - } - - c.members[m.ID] = m - - plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id) -} - -// RemoveMember removes a member from the store. -// The given id MUST exist, or the function panics. -func (c *RaftCluster) RemoveMember(id types.ID) { - c.Lock() - defer c.Unlock() - if c.store != nil { - mustDeleteMemberFromStore(c.store, id) - } - if c.be != nil { - mustDeleteMemberFromBackend(c.be, id) - } - - delete(c.members, id) - c.removed[id] = true - - plog.Infof("removed member %s from cluster %s", id, c.id) -} - -func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) { - c.Lock() - defer c.Unlock() - if m, ok := c.members[id]; ok { - m.Attributes = attr - if c.store != nil { - mustUpdateMemberAttrInStore(c.store, m) - } - if c.be != nil { - mustSaveMemberToBackend(c.be, m) - } - return - } - _, ok := c.removed[id] - if !ok { - plog.Panicf("error updating attributes of unknown member %s", id) - } - plog.Warningf("skipped updating attributes of removed member %s", id) -} - -func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) { - c.Lock() - defer c.Unlock() - - c.members[id].RaftAttributes = raftAttr - if c.store != nil { - mustUpdateMemberInStore(c.store, c.members[id]) - } - if c.be != nil { - mustSaveMemberToBackend(c.be, c.members[id]) - } - - plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id) -} - -func (c *RaftCluster) Version() *semver.Version { - c.Lock() - defer c.Unlock() - if c.version == nil { - return nil - } - return semver.Must(semver.NewVersion(c.version.String())) -} - -func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*semver.Version)) { - c.Lock() - defer c.Unlock() - if c.version != nil { - plog.Noticef("updated the cluster version from %v to %v", version.Cluster(c.version.String()), version.Cluster(ver.String())) - } else { - plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String())) - } - oldVer := c.version - c.version = ver - mustDetectDowngrade(c.version) - if c.store != nil { - mustSaveClusterVersionToStore(c.store, ver) - } - if c.be != nil { - mustSaveClusterVersionToBackend(c.be, ver) - } - if oldVer != nil { - ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(oldVer.String())}).Set(0) - } - ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(ver.String())}).Set(1) - onSet(ver) -} - -func (c *RaftCluster) IsReadyToAddNewMember() bool { - nmembers := 1 - nstarted := 0 - - for _, member := range c.members { - if member.IsStarted() { - nstarted++ - } - nmembers++ - } - - if nstarted == 1 && nmembers == 2 { - // a case of adding a new node to 1-member cluster for restoring cluster data - // https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster - - plog.Debugf("The number of started member is 1. This cluster can accept add member request.") - return true - } - - nquorum := nmembers/2 + 1 - if nstarted < nquorum { - plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum) - return false - } - - return true -} - -func (c *RaftCluster) IsReadyToRemoveMember(id uint64) bool { - nmembers := 0 - nstarted := 0 - - for _, member := range c.members { - if uint64(member.ID) == id { - continue - } - - if member.IsStarted() { - nstarted++ - } - nmembers++ - } - - nquorum := nmembers/2 + 1 - if nstarted < nquorum { - plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum) - return false - } - - return true -} - -func membersFromStore(st store.Store) (map[types.ID]*Member, map[types.ID]bool) { - members := make(map[types.ID]*Member) - removed := make(map[types.ID]bool) - e, err := st.Get(StoreMembersPrefix, true, true) - if err != nil { - if isKeyNotFound(err) { - return members, removed - } - plog.Panicf("get storeMembers should never fail: %v", err) - } - for _, n := range e.Node.Nodes { - var m *Member - m, err = nodeToMember(n) - if err != nil { - plog.Panicf("nodeToMember should never fail: %v", err) - } - members[m.ID] = m - } - - e, err = st.Get(storeRemovedMembersPrefix, true, true) - if err != nil { - if isKeyNotFound(err) { - return members, removed - } - plog.Panicf("get storeRemovedMembers should never fail: %v", err) - } - for _, n := range e.Node.Nodes { - removed[MustParseMemberIDFromKey(n.Key)] = true - } - return members, removed -} - -func clusterVersionFromStore(st store.Store) *semver.Version { - e, err := st.Get(path.Join(storePrefix, "version"), false, false) - if err != nil { - if isKeyNotFound(err) { - return nil - } - plog.Panicf("unexpected error (%v) when getting cluster version from store", err) - } - return semver.Must(semver.NewVersion(*e.Node.Value)) -} - -// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs -// with the existing cluster. If the validation succeeds, it assigns the IDs -// from the existing cluster to the local cluster. -// If the validation fails, an error will be returned. -func ValidateClusterAndAssignIDs(local *RaftCluster, existing *RaftCluster) error { - ems := existing.Members() - lms := local.Members() - if len(ems) != len(lms) { - return fmt.Errorf("member count is unequal") - } - sort.Sort(MembersByPeerURLs(ems)) - sort.Sort(MembersByPeerURLs(lms)) - - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - for i := range ems { - if ok, err := netutil.URLStringsEqual(ctx, ems[i].PeerURLs, lms[i].PeerURLs); !ok { - return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err) - } - lms[i].ID = ems[i].ID - } - local.members = make(map[types.ID]*Member) - for _, m := range lms { - local.members[m.ID] = m - } - return nil -} - -func mustDetectDowngrade(cv *semver.Version) { - lv := semver.Must(semver.NewVersion(version.Version)) - // only keep major.minor version for comparison against cluster version - lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} - if cv != nil && lv.LessThan(*cv) { - plog.Fatalf("cluster cannot be downgraded (current version: %s is lower than determined cluster version: %s).", version.Version, version.Cluster(cv.String())) - } -} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go deleted file mode 100644 index 0536de70e..000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "bytes" - "math" - "sync" - - bolt "github.com/coreos/bbolt" -) - -// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys; -// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket -// is known to never overwrite any key so range is safe. -var safeRangeBucket = []byte("key") - -type ReadTx interface { - Lock() - Unlock() - - UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) - UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error -} - -type readTx struct { - // mu protects accesses to the txReadBuffer - mu sync.RWMutex - buf txReadBuffer - - // txmu protects accesses to buckets and tx on Range requests. - txmu sync.RWMutex - tx *bolt.Tx - buckets map[string]*bolt.Bucket -} - -func (rt *readTx) Lock() { rt.mu.RLock() } -func (rt *readTx) Unlock() { rt.mu.RUnlock() } - -func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { - if endKey == nil { - // forbid duplicates for single keys - limit = 1 - } - if limit <= 0 { - limit = math.MaxInt64 - } - if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) { - panic("do not use unsafeRange on non-keys bucket") - } - keys, vals := rt.buf.Range(bucketName, key, endKey, limit) - if int64(len(keys)) == limit { - return keys, vals - } - - // find/cache bucket - bn := string(bucketName) - rt.txmu.RLock() - bucket, ok := rt.buckets[bn] - rt.txmu.RUnlock() - if !ok { - rt.txmu.Lock() - bucket = rt.tx.Bucket(bucketName) - rt.buckets[bn] = bucket - rt.txmu.Unlock() - } - - // ignore missing bucket since may have been created in this batch - if bucket == nil { - return keys, vals - } - rt.txmu.Lock() - c := bucket.Cursor() - rt.txmu.Unlock() - - k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys))) - return append(k2, keys...), append(v2, vals...) -} - -func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { - dups := make(map[string]struct{}) - getDups := func(k, v []byte) error { - dups[string(k)] = struct{}{} - return nil - } - visitNoDup := func(k, v []byte) error { - if _, ok := dups[string(k)]; ok { - return nil - } - return visitor(k, v) - } - if err := rt.buf.ForEach(bucketName, getDups); err != nil { - return err - } - rt.txmu.Lock() - err := unsafeForEach(rt.tx, bucketName, visitNoDup) - rt.txmu.Unlock() - if err != nil { - return err - } - return rt.buf.ForEach(bucketName, visitor) -} - -func (rt *readTx) reset() { - rt.buf.reset() - rt.buckets = make(map[string]*bolt.Bucket) - rt.tx = nil -} diff --git a/vendor/github.com/coreos/etcd/pkg/cors/cors.go b/vendor/github.com/coreos/etcd/pkg/cors/cors.go deleted file mode 100644 index 0c64f16a3..000000000 --- a/vendor/github.com/coreos/etcd/pkg/cors/cors.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cors handles cross-origin HTTP requests (CORS). -package cors - -import ( - "fmt" - "net/http" - "net/url" - "sort" - "strings" -) - -type CORSInfo map[string]bool - -// Set implements the flag.Value interface to allow users to define a list of CORS origins -func (ci *CORSInfo) Set(s string) error { - m := make(map[string]bool) - for _, v := range strings.Split(s, ",") { - v = strings.TrimSpace(v) - if v == "" { - continue - } - if v != "*" { - if _, err := url.Parse(v); err != nil { - return fmt.Errorf("Invalid CORS origin: %s", err) - } - } - m[v] = true - - } - *ci = CORSInfo(m) - return nil -} - -func (ci *CORSInfo) String() string { - o := make([]string, 0) - for k := range *ci { - o = append(o, k) - } - sort.StringSlice(o).Sort() - return strings.Join(o, ",") -} - -// OriginAllowed determines whether the server will allow a given CORS origin. -func (c CORSInfo) OriginAllowed(origin string) bool { - return c["*"] || c[origin] -} - -type CORSHandler struct { - Handler http.Handler - Info *CORSInfo -} - -// addHeader adds the correct cors headers given an origin -func (h *CORSHandler) addHeader(w http.ResponseWriter, origin string) { - w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") - w.Header().Add("Access-Control-Allow-Origin", origin) - w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization") -} - -// ServeHTTP adds the correct CORS headers based on the origin and returns immediately -// with a 200 OK if the method is OPTIONS. -func (h *CORSHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Write CORS header. - if h.Info.OriginAllowed("*") { - h.addHeader(w, "*") - } else if origin := req.Header.Get("Origin"); h.Info.OriginAllowed(origin) { - h.addHeader(w, origin) - } - - if req.Method == "OPTIONS" { - w.WriteHeader(http.StatusOK) - return - } - - h.Handler.ServeHTTP(w, req) -} diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go deleted file mode 100644 index 09f44e7c7..000000000 --- a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq.go - -// Package httputil provides HTTP utility functions. -package httputil - -import ( - "io" - "io/ioutil" - "net/http" -) - -// GracefulClose drains http.Response.Body until it hits EOF -// and closes it. This prevents TCP/TLS connections from closing, -// therefore available for reuse. -func GracefulClose(resp *http.Response) { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() -} diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go deleted file mode 100644 index ef3787db6..000000000 --- a/vendor/github.com/coreos/etcd/raft/progress.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import "fmt" - -const ( - ProgressStateProbe ProgressStateType = iota - ProgressStateReplicate - ProgressStateSnapshot -) - -type ProgressStateType uint64 - -var prstmap = [...]string{ - "ProgressStateProbe", - "ProgressStateReplicate", - "ProgressStateSnapshot", -} - -func (st ProgressStateType) String() string { return prstmap[uint64(st)] } - -// Progress represents a follower’s progress in the view of the leader. Leader maintains -// progresses of all followers, and sends entries to the follower based on its progress. -type Progress struct { - Match, Next uint64 - // State defines how the leader should interact with the follower. - // - // When in ProgressStateProbe, leader sends at most one replication message - // per heartbeat interval. It also probes actual progress of the follower. - // - // When in ProgressStateReplicate, leader optimistically increases next - // to the latest entry sent after sending replication message. This is - // an optimized state for fast replicating log entries to the follower. - // - // When in ProgressStateSnapshot, leader should have sent out snapshot - // before and stops sending any replication message. - State ProgressStateType - - // Paused is used in ProgressStateProbe. - // When Paused is true, raft should pause sending replication message to this peer. - Paused bool - // PendingSnapshot is used in ProgressStateSnapshot. - // If there is a pending snapshot, the pendingSnapshot will be set to the - // index of the snapshot. If pendingSnapshot is set, the replication process of - // this Progress will be paused. raft will not resend snapshot until the pending one - // is reported to be failed. - PendingSnapshot uint64 - - // RecentActive is true if the progress is recently active. Receiving any messages - // from the corresponding follower indicates the progress is active. - // RecentActive can be reset to false after an election timeout. - RecentActive bool - - // inflights is a sliding window for the inflight messages. - // Each inflight message contains one or more log entries. - // The max number of entries per message is defined in raft config as MaxSizePerMsg. - // Thus inflight effectively limits both the number of inflight messages - // and the bandwidth each Progress can use. - // When inflights is full, no more message should be sent. - // When a leader sends out a message, the index of the last - // entry should be added to inflights. The index MUST be added - // into inflights in order. - // When a leader receives a reply, the previous inflights should - // be freed by calling inflights.freeTo with the index of the last - // received entry. - ins *inflights - - // IsLearner is true if this progress is tracked for a learner. - IsLearner bool -} - -func (pr *Progress) resetState(state ProgressStateType) { - pr.Paused = false - pr.PendingSnapshot = 0 - pr.State = state - pr.ins.reset() -} - -func (pr *Progress) becomeProbe() { - // If the original state is ProgressStateSnapshot, progress knows that - // the pending snapshot has been sent to this peer successfully, then - // probes from pendingSnapshot + 1. - if pr.State == ProgressStateSnapshot { - pendingSnapshot := pr.PendingSnapshot - pr.resetState(ProgressStateProbe) - pr.Next = max(pr.Match+1, pendingSnapshot+1) - } else { - pr.resetState(ProgressStateProbe) - pr.Next = pr.Match + 1 - } -} - -func (pr *Progress) becomeReplicate() { - pr.resetState(ProgressStateReplicate) - pr.Next = pr.Match + 1 -} - -func (pr *Progress) becomeSnapshot(snapshoti uint64) { - pr.resetState(ProgressStateSnapshot) - pr.PendingSnapshot = snapshoti -} - -// maybeUpdate returns false if the given n index comes from an outdated message. -// Otherwise it updates the progress and returns true. -func (pr *Progress) maybeUpdate(n uint64) bool { - var updated bool - if pr.Match < n { - pr.Match = n - updated = true - pr.resume() - } - if pr.Next < n+1 { - pr.Next = n + 1 - } - return updated -} - -func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 } - -// maybeDecrTo returns false if the given to index comes from an out of order message. -// Otherwise it decreases the progress next index to min(rejected, last) and returns true. -func (pr *Progress) maybeDecrTo(rejected, last uint64) bool { - if pr.State == ProgressStateReplicate { - // the rejection must be stale if the progress has matched and "rejected" - // is smaller than "match". - if rejected <= pr.Match { - return false - } - // directly decrease next to match + 1 - pr.Next = pr.Match + 1 - return true - } - - // the rejection must be stale if "rejected" does not match next - 1 - if pr.Next-1 != rejected { - return false - } - - if pr.Next = min(rejected, last+1); pr.Next < 1 { - pr.Next = 1 - } - pr.resume() - return true -} - -func (pr *Progress) pause() { pr.Paused = true } -func (pr *Progress) resume() { pr.Paused = false } - -// IsPaused returns whether sending log entries to this node has been -// paused. A node may be paused because it has rejected recent -// MsgApps, is currently waiting for a snapshot, or has reached the -// MaxInflightMsgs limit. -func (pr *Progress) IsPaused() bool { - switch pr.State { - case ProgressStateProbe: - return pr.Paused - case ProgressStateReplicate: - return pr.ins.full() - case ProgressStateSnapshot: - return true - default: - panic("unexpected state") - } -} - -func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 } - -// needSnapshotAbort returns true if snapshot progress's Match -// is equal or higher than the pendingSnapshot. -func (pr *Progress) needSnapshotAbort() bool { - return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot -} - -func (pr *Progress) String() string { - return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot) -} - -type inflights struct { - // the starting index in the buffer - start int - // number of inflights in the buffer - count int - - // the size of the buffer - size int - - // buffer contains the index of the last entry - // inside one message. - buffer []uint64 -} - -func newInflights(size int) *inflights { - return &inflights{ - size: size, - } -} - -// add adds an inflight into inflights -func (in *inflights) add(inflight uint64) { - if in.full() { - panic("cannot add into a full inflights") - } - next := in.start + in.count - size := in.size - if next >= size { - next -= size - } - if next >= len(in.buffer) { - in.growBuf() - } - in.buffer[next] = inflight - in.count++ -} - -// grow the inflight buffer by doubling up to inflights.size. We grow on demand -// instead of preallocating to inflights.size to handle systems which have -// thousands of Raft groups per process. -func (in *inflights) growBuf() { - newSize := len(in.buffer) * 2 - if newSize == 0 { - newSize = 1 - } else if newSize > in.size { - newSize = in.size - } - newBuffer := make([]uint64, newSize) - copy(newBuffer, in.buffer) - in.buffer = newBuffer -} - -// freeTo frees the inflights smaller or equal to the given `to` flight. -func (in *inflights) freeTo(to uint64) { - if in.count == 0 || to < in.buffer[in.start] { - // out of the left side of the window - return - } - - idx := in.start - var i int - for i = 0; i < in.count; i++ { - if to < in.buffer[idx] { // found the first large inflight - break - } - - // increase index and maybe rotate - size := in.size - if idx++; idx >= size { - idx -= size - } - } - // free i inflights and set new start index - in.count -= i - in.start = idx - if in.count == 0 { - // inflights is empty, reset the start index so that we don't grow the - // buffer unnecessarily. - in.start = 0 - } -} - -func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) } - -// full returns true if the inflights is full. -func (in *inflights) full() bool { - return in.count == in.size -} - -// resets frees all inflights. -func (in *inflights) reset() { - in.count = 0 - in.start = 0 -} diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto deleted file mode 100644 index 644ce7b8f..000000000 --- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto +++ /dev/null @@ -1,95 +0,0 @@ -syntax = "proto2"; -package raftpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_enum_prefix_all) = false; - -enum EntryType { - EntryNormal = 0; - EntryConfChange = 1; -} - -message Entry { - optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations - optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations - optional EntryType Type = 1 [(gogoproto.nullable) = false]; - optional bytes Data = 4; -} - -message SnapshotMetadata { - optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; - optional uint64 index = 2 [(gogoproto.nullable) = false]; - optional uint64 term = 3 [(gogoproto.nullable) = false]; -} - -message Snapshot { - optional bytes data = 1; - optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; -} - -enum MessageType { - MsgHup = 0; - MsgBeat = 1; - MsgProp = 2; - MsgApp = 3; - MsgAppResp = 4; - MsgVote = 5; - MsgVoteResp = 6; - MsgSnap = 7; - MsgHeartbeat = 8; - MsgHeartbeatResp = 9; - MsgUnreachable = 10; - MsgSnapStatus = 11; - MsgCheckQuorum = 12; - MsgTransferLeader = 13; - MsgTimeoutNow = 14; - MsgReadIndex = 15; - MsgReadIndexResp = 16; - MsgPreVote = 17; - MsgPreVoteResp = 18; -} - -message Message { - optional MessageType type = 1 [(gogoproto.nullable) = false]; - optional uint64 to = 2 [(gogoproto.nullable) = false]; - optional uint64 from = 3 [(gogoproto.nullable) = false]; - optional uint64 term = 4 [(gogoproto.nullable) = false]; - optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; - optional uint64 index = 6 [(gogoproto.nullable) = false]; - repeated Entry entries = 7 [(gogoproto.nullable) = false]; - optional uint64 commit = 8 [(gogoproto.nullable) = false]; - optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; - optional bool reject = 10 [(gogoproto.nullable) = false]; - optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; - optional bytes context = 12; -} - -message HardState { - optional uint64 term = 1 [(gogoproto.nullable) = false]; - optional uint64 vote = 2 [(gogoproto.nullable) = false]; - optional uint64 commit = 3 [(gogoproto.nullable) = false]; -} - -message ConfState { - repeated uint64 nodes = 1; - repeated uint64 learners = 2; -} - -enum ConfChangeType { - ConfChangeAddNode = 0; - ConfChangeRemoveNode = 1; - ConfChangeUpdateNode = 2; - ConfChangeAddLearnerNode = 3; -} - -message ConfChange { - optional uint64 ID = 1 [(gogoproto.nullable) = false]; - optional ConfChangeType Type = 2 [(gogoproto.nullable) = false]; - optional uint64 NodeID = 3 [(gogoproto.nullable) = false]; - optional bytes Context = 4; -} diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go deleted file mode 100644 index f4141fe65..000000000 --- a/vendor/github.com/coreos/etcd/raft/util.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "bytes" - "fmt" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -func (st StateType) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf("%q", st.String())), nil -} - -// uint64Slice implements sort interface -type uint64Slice []uint64 - -func (p uint64Slice) Len() int { return len(p) } -func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func min(a, b uint64) uint64 { - if a > b { - return b - } - return a -} - -func max(a, b uint64) uint64 { - if a > b { - return a - } - return b -} - -func IsLocalMsg(msgt pb.MessageType) bool { - return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || - msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum -} - -func IsResponseMsg(msgt pb.MessageType) bool { - return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp -} - -// voteResponseType maps vote and prevote message types to their corresponding responses. -func voteRespMsgType(msgt pb.MessageType) pb.MessageType { - switch msgt { - case pb.MsgVote: - return pb.MsgVoteResp - case pb.MsgPreVote: - return pb.MsgPreVoteResp - default: - panic(fmt.Sprintf("not a vote message: %s", msgt)) - } -} - -// EntryFormatter can be implemented by the application to provide human-readable formatting -// of entry data. Nil is a valid EntryFormatter and will use a default format. -type EntryFormatter func([]byte) string - -// DescribeMessage returns a concise human-readable description of a -// Message for debugging. -func DescribeMessage(m pb.Message, f EntryFormatter) string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) - if m.Reject { - fmt.Fprintf(&buf, " Rejected") - if m.RejectHint != 0 { - fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint) - } - } - if m.Commit != 0 { - fmt.Fprintf(&buf, " Commit:%d", m.Commit) - } - if len(m.Entries) > 0 { - fmt.Fprintf(&buf, " Entries:[") - for i, e := range m.Entries { - if i != 0 { - buf.WriteString(", ") - } - buf.WriteString(DescribeEntry(e, f)) - } - fmt.Fprintf(&buf, "]") - } - if !IsEmptySnap(m.Snapshot) { - fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot) - } - return buf.String() -} - -// DescribeEntry returns a concise human-readable description of an -// Entry for debugging. -func DescribeEntry(e pb.Entry, f EntryFormatter) string { - var formatted string - if e.Type == pb.EntryNormal && f != nil { - formatted = f(e.Data) - } else { - formatted = fmt.Sprintf("%q", e.Data) - } - return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted) -} - -func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { - if len(ents) == 0 { - return ents - } - size := ents[0].Size() - var limit int - for limit = 1; limit < len(ents); limit++ { - size += ents[limit].Size() - if uint64(size) > maxSize { - break - } - } - return ents[:limit] -} diff --git a/vendor/github.com/coreos/etcd/snap/snapshotter.go b/vendor/github.com/coreos/etcd/snap/snapshotter.go deleted file mode 100644 index 1d73a1c2a..000000000 --- a/vendor/github.com/coreos/etcd/snap/snapshotter.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package snap stores raft nodes' states with snapshots. -package snap - -import ( - "errors" - "fmt" - "hash/crc32" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - pioutil "github.com/coreos/etcd/pkg/ioutil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap/snappb" - "github.com/coreos/etcd/wal/walpb" - "github.com/coreos/pkg/capnslog" -) - -const ( - snapSuffix = ".snap" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "snap") - - ErrNoSnapshot = errors.New("snap: no available snapshot") - ErrEmptySnapshot = errors.New("snap: empty snapshot") - ErrCRCMismatch = errors.New("snap: crc mismatch") - crcTable = crc32.MakeTable(crc32.Castagnoli) - - // A map of valid files that can be present in the snap folder. - validFiles = map[string]bool{ - "db": true, - } -) - -type Snapshotter struct { - dir string -} - -func New(dir string) *Snapshotter { - return &Snapshotter{ - dir: dir, - } -} - -func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error { - if raft.IsEmptySnap(snapshot) { - return nil - } - return s.save(&snapshot) -} - -func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { - start := time.Now() - - fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix) - b := pbutil.MustMarshal(snapshot) - crc := crc32.Update(0, crcTable, b) - snap := snappb.Snapshot{Crc: crc, Data: b} - d, err := snap.Marshal() - if err != nil { - return err - } - marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second)) - - err = pioutil.WriteAndSyncFile(filepath.Join(s.dir, fname), d, 0666) - if err == nil { - saveDurations.Observe(float64(time.Since(start)) / float64(time.Second)) - } else { - err1 := os.Remove(filepath.Join(s.dir, fname)) - if err1 != nil { - plog.Errorf("failed to remove broken snapshot file %s", filepath.Join(s.dir, fname)) - } - } - return err -} - -func (s *Snapshotter) Load() (*raftpb.Snapshot, error) { - return s.loadMatching(func(*raftpb.Snapshot) bool { return true }) -} - -// LoadNewestAvailable loads the newest snapshot available that is in walSnaps. -func (s *Snapshotter) LoadNewestAvailable(walSnaps []walpb.Snapshot) (*raftpb.Snapshot, error) { - return s.loadMatching(func(snapshot *raftpb.Snapshot) bool { - m := snapshot.Metadata - for i := len(walSnaps) - 1; i >= 0; i-- { - if m.Term == walSnaps[i].Term && m.Index == walSnaps[i].Index { - return true - } - } - return false - }) -} - -// loadMatching returns the newest snapshot where matchFn returns true. -func (s *Snapshotter) loadMatching(matchFn func(*raftpb.Snapshot) bool) (*raftpb.Snapshot, error) { - names, err := s.snapNames() - if err != nil { - return nil, err - } - var snap *raftpb.Snapshot - for _, name := range names { - if snap, err = loadSnap(s.dir, name); err == nil && matchFn(snap) { - return snap, nil - } - } - return nil, ErrNoSnapshot -} - -func loadSnap(dir, name string) (*raftpb.Snapshot, error) { - fpath := filepath.Join(dir, name) - snap, err := Read(fpath) - if err != nil { - renameBroken(fpath) - } - return snap, err -} - -// Read reads the snapshot named by snapname and returns the snapshot. -func Read(snapname string) (*raftpb.Snapshot, error) { - b, err := ioutil.ReadFile(snapname) - if err != nil { - plog.Errorf("cannot read file %v: %v", snapname, err) - return nil, err - } - - if len(b) == 0 { - plog.Errorf("unexpected empty snapshot") - return nil, ErrEmptySnapshot - } - - var serializedSnap snappb.Snapshot - if err = serializedSnap.Unmarshal(b); err != nil { - plog.Errorf("corrupted snapshot file %v: %v", snapname, err) - return nil, err - } - - if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 { - plog.Errorf("unexpected empty snapshot") - return nil, ErrEmptySnapshot - } - - crc := crc32.Update(0, crcTable, serializedSnap.Data) - if crc != serializedSnap.Crc { - plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname) - return nil, ErrCRCMismatch - } - - var snap raftpb.Snapshot - if err = snap.Unmarshal(serializedSnap.Data); err != nil { - plog.Errorf("corrupted snapshot file %v: %v", snapname, err) - return nil, err - } - return &snap, nil -} - -// snapNames returns the filename of the snapshots in logical time order (from newest to oldest). -// If there is no available snapshots, an ErrNoSnapshot will be returned. -func (s *Snapshotter) snapNames() ([]string, error) { - dir, err := os.Open(s.dir) - if err != nil { - return nil, err - } - defer dir.Close() - names, err := dir.Readdirnames(-1) - if err != nil { - return nil, err - } - names, err = s.cleanupSnapdir(names) - if err != nil { - return nil, err - } - snaps := checkSuffix(names) - if len(snaps) == 0 { - return nil, ErrNoSnapshot - } - sort.Sort(sort.Reverse(sort.StringSlice(snaps))) - return snaps, nil -} - -func checkSuffix(names []string) []string { - snaps := []string{} - for i := range names { - if strings.HasSuffix(names[i], snapSuffix) { - snaps = append(snaps, names[i]) - } else { - // If we find a file which is not a snapshot then check if it's - // a vaild file. If not throw out a warning. - if _, ok := validFiles[names[i]]; !ok { - plog.Warningf("skipped unexpected non snapshot file %v", names[i]) - } - } - } - return snaps -} - -func renameBroken(path string) { - brokenPath := path + ".broken" - if err := os.Rename(path, brokenPath); err != nil { - plog.Warningf("cannot rename broken snapshot file %v to %v: %v", path, brokenPath, err) - } -} - -// cleanupSnapdir removes any files that should not be in the snapshot directory: -// - db.tmp prefixed files that can be orphaned by defragmentation -func (s *Snapshotter) cleanupSnapdir(filenames []string) (names []string, err error) { - for _, filename := range filenames { - if strings.HasPrefix(filename, "db.tmp") { - plog.Infof("found orphaned defragmentation file; deleting: %s", filename) - if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) { - return nil, fmt.Errorf("failed to remove orphaned defragmentation file %s: %v", filename, rmErr) - } - continue - } - names = append(names, filename) - } - return names, nil -} - -func (s *Snapshotter) ReleaseSnapDBs(snap raftpb.Snapshot) error { - dir, err := os.Open(s.dir) - if err != nil { - return err - } - defer dir.Close() - filenames, err := dir.Readdirnames(-1) - if err != nil { - return err - } - for _, filename := range filenames { - if strings.HasSuffix(filename, ".snap.db") { - hexIndex := strings.TrimSuffix(filepath.Base(filename), ".snap.db") - index, err := strconv.ParseUint(hexIndex, 16, 64) - if err != nil { - plog.Warningf("failed to parse index from filename: %s (%v)", filename, err) - continue - } - if index < snap.Metadata.Index { - plog.Infof("found orphaned .snap.db file; deleting %q", filename) - if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) { - plog.Warningf("failed to remove orphaned .snap.db file: %s (%v)", filename, rmErr) - } - } - } - } - return nil -} diff --git a/vendor/github.com/coreos/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore similarity index 100% rename from vendor/github.com/coreos/bbolt/.gitignore rename to vendor/go.etcd.io/bbolt/.gitignore diff --git a/vendor/github.com/coreos/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml similarity index 100% rename from vendor/github.com/coreos/bbolt/.travis.yml rename to vendor/go.etcd.io/bbolt/.travis.yml diff --git a/vendor/github.com/coreos/bbolt/LICENSE b/vendor/go.etcd.io/bbolt/LICENSE similarity index 100% rename from vendor/github.com/coreos/bbolt/LICENSE rename to vendor/go.etcd.io/bbolt/LICENSE diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile similarity index 100% rename from vendor/github.com/coreos/bbolt/Makefile rename to vendor/go.etcd.io/bbolt/Makefile diff --git a/vendor/github.com/coreos/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md similarity index 100% rename from vendor/github.com/coreos/bbolt/README.md rename to vendor/go.etcd.io/bbolt/README.md diff --git a/vendor/github.com/coreos/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_386.go rename to vendor/go.etcd.io/bbolt/bolt_386.go diff --git a/vendor/github.com/coreos/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_amd64.go rename to vendor/go.etcd.io/bbolt/bolt_amd64.go diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_arm.go rename to vendor/go.etcd.io/bbolt/bolt_arm.go diff --git a/vendor/github.com/coreos/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_arm64.go rename to vendor/go.etcd.io/bbolt/bolt_arm64.go diff --git a/vendor/github.com/coreos/bbolt/bolt_linux.go b/vendor/go.etcd.io/bbolt/bolt_linux.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_linux.go rename to vendor/go.etcd.io/bbolt/bolt_linux.go diff --git a/vendor/github.com/coreos/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_mips64x.go rename to vendor/go.etcd.io/bbolt/bolt_mips64x.go diff --git a/vendor/github.com/coreos/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_mipsx.go rename to vendor/go.etcd.io/bbolt/bolt_mipsx.go diff --git a/vendor/github.com/coreos/bbolt/bolt_openbsd.go b/vendor/go.etcd.io/bbolt/bolt_openbsd.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_openbsd.go rename to vendor/go.etcd.io/bbolt/bolt_openbsd.go diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_ppc.go rename to vendor/go.etcd.io/bbolt/bolt_ppc.go diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_ppc64.go rename to vendor/go.etcd.io/bbolt/bolt_ppc64.go diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_ppc64le.go rename to vendor/go.etcd.io/bbolt/bolt_ppc64le.go diff --git a/vendor/github.com/coreos/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_riscv64.go rename to vendor/go.etcd.io/bbolt/bolt_riscv64.go diff --git a/vendor/github.com/coreos/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_s390x.go rename to vendor/go.etcd.io/bbolt/bolt_s390x.go diff --git a/vendor/github.com/coreos/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_unix.go rename to vendor/go.etcd.io/bbolt/bolt_unix.go diff --git a/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_unix_solaris.go rename to vendor/go.etcd.io/bbolt/bolt_unix_solaris.go diff --git a/vendor/github.com/coreos/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bolt_windows.go rename to vendor/go.etcd.io/bbolt/bolt_windows.go diff --git a/vendor/github.com/coreos/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go similarity index 100% rename from vendor/github.com/coreos/bbolt/boltsync_unix.go rename to vendor/go.etcd.io/bbolt/boltsync_unix.go diff --git a/vendor/github.com/coreos/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go similarity index 100% rename from vendor/github.com/coreos/bbolt/bucket.go rename to vendor/go.etcd.io/bbolt/bucket.go diff --git a/vendor/github.com/coreos/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go similarity index 100% rename from vendor/github.com/coreos/bbolt/cursor.go rename to vendor/go.etcd.io/bbolt/cursor.go diff --git a/vendor/github.com/coreos/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go similarity index 100% rename from vendor/github.com/coreos/bbolt/db.go rename to vendor/go.etcd.io/bbolt/db.go diff --git a/vendor/github.com/coreos/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go similarity index 100% rename from vendor/github.com/coreos/bbolt/doc.go rename to vendor/go.etcd.io/bbolt/doc.go diff --git a/vendor/github.com/coreos/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go similarity index 100% rename from vendor/github.com/coreos/bbolt/errors.go rename to vendor/go.etcd.io/bbolt/errors.go diff --git a/vendor/github.com/coreos/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go similarity index 100% rename from vendor/github.com/coreos/bbolt/freelist.go rename to vendor/go.etcd.io/bbolt/freelist.go diff --git a/vendor/github.com/coreos/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go similarity index 100% rename from vendor/github.com/coreos/bbolt/freelist_hmap.go rename to vendor/go.etcd.io/bbolt/freelist_hmap.go diff --git a/vendor/github.com/coreos/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go similarity index 100% rename from vendor/github.com/coreos/bbolt/node.go rename to vendor/go.etcd.io/bbolt/node.go diff --git a/vendor/github.com/coreos/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go similarity index 100% rename from vendor/github.com/coreos/bbolt/page.go rename to vendor/go.etcd.io/bbolt/page.go diff --git a/vendor/github.com/coreos/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go similarity index 100% rename from vendor/github.com/coreos/bbolt/tx.go rename to vendor/go.etcd.io/bbolt/tx.go diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/go.etcd.io/etcd/LICENSE similarity index 100% rename from vendor/github.com/coreos/etcd/LICENSE rename to vendor/go.etcd.io/etcd/LICENSE diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/go.etcd.io/etcd/NOTICE similarity index 100% rename from vendor/github.com/coreos/etcd/NOTICE rename to vendor/go.etcd.io/etcd/NOTICE diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go similarity index 60% rename from vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go rename to vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go index c5faf00c6..7e038df01 100644 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ b/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go @@ -1,16 +1,30 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: auth.proto +/* + Package authpb is a generated protocol buffer package. + + It is generated from these files: + auth.proto + + It has these top-level messages: + UserAddOptions + User + Permission + Role +*/ package authpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -37,7 +51,6 @@ var Permission_Type_name = map[int32]string{ 1: "WRITE", 2: "READWRITE", } - var Permission_Type_value = map[string]int32{ "READ": 0, "WRITE": 1, @@ -47,174 +60,92 @@ var Permission_Type_value = map[string]int32{ func (x Permission_Type) String() string { return proto.EnumName(Permission_Type_name, int32(x)) } +func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2, 0} } -func (Permission_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{1, 0} +type UserAddOptions struct { + NoPassword bool `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"` } +func (m *UserAddOptions) Reset() { *m = UserAddOptions{} } +func (m *UserAddOptions) String() string { return proto.CompactTextString(m) } +func (*UserAddOptions) ProtoMessage() {} +func (*UserAddOptions) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} } + // User is a single entry in the bucket authUsers type User struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - Roles []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"` + Options *UserAddOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` } -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{0} -} -func (m *User) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_User.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *User) XXX_Merge(src proto.Message) { - xxx_messageInfo_User.Merge(m, src) -} -func (m *User) XXX_Size() int { - return m.Size() -} -func (m *User) XXX_DiscardUnknown() { - xxx_messageInfo_User.DiscardUnknown(m) -} - -var xxx_messageInfo_User proto.InternalMessageInfo +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} } // Permission is a single entity type Permission struct { - PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` } -func (m *Permission) Reset() { *m = Permission{} } -func (m *Permission) String() string { return proto.CompactTextString(m) } -func (*Permission) ProtoMessage() {} -func (*Permission) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{1} -} -func (m *Permission) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Permission.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Permission) XXX_Merge(src proto.Message) { - xxx_messageInfo_Permission.Merge(m, src) -} -func (m *Permission) XXX_Size() int { - return m.Size() -} -func (m *Permission) XXX_DiscardUnknown() { - xxx_messageInfo_Permission.DiscardUnknown(m) -} - -var xxx_messageInfo_Permission proto.InternalMessageInfo +func (m *Permission) Reset() { *m = Permission{} } +func (m *Permission) String() string { return proto.CompactTextString(m) } +func (*Permission) ProtoMessage() {} +func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} } // Role is a single entry in the bucket authRoles type Role struct { - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"` } -func (m *Role) Reset() { *m = Role{} } -func (m *Role) String() string { return proto.CompactTextString(m) } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{2} -} -func (m *Role) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Role.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Role) XXX_Merge(src proto.Message) { - xxx_messageInfo_Role.Merge(m, src) -} -func (m *Role) XXX_Size() int { - return m.Size() -} -func (m *Role) XXX_DiscardUnknown() { - xxx_messageInfo_Role.DiscardUnknown(m) -} - -var xxx_messageInfo_Role proto.InternalMessageInfo +func (m *Role) Reset() { *m = Role{} } +func (m *Role) String() string { return proto.CompactTextString(m) } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{3} } func init() { - proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) + proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions") proto.RegisterType((*User)(nil), "authpb.User") proto.RegisterType((*Permission)(nil), "authpb.Permission") proto.RegisterType((*Role)(nil), "authpb.Role") + proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value) +} +func (m *UserAddOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) } - -var fileDescriptor_8bbd6f3875b0e874 = []byte{ - // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, - 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, - 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, - 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, - 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d, - 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd, - 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51, - 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef, - 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00, - 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc, - 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70, - 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41, - 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc, - 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b, - 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1, - 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee, - 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4, - 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00, +func (m *UserAddOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NoPassword { + dAtA[i] = 0x8 + i++ + if m.NoPassword { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil } func (m *User) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -222,49 +153,54 @@ func (m *User) Marshal() (dAtA []byte, err error) { } func (m *User) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Password) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) } if len(m.Roles) > 0 { - for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Roles[iNdEx]) - copy(dAtA[i:], m.Roles[iNdEx]) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx]))) - i-- + for _, s := range m.Roles { dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Password))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if m.Options != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.Options.Size())) + n1, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } - return len(dAtA) - i, nil + return i, nil } func (m *Permission) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -272,45 +208,34 @@ func (m *Permission) Marshal() (dAtA []byte, err error) { } func (m *Permission) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x1a + if m.PermType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) } if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.PermType != 0 { - i = encodeVarintAuth(dAtA, i, uint64(m.PermType)) - i-- - dAtA[i] = 0x8 + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } - return len(dAtA) - i, nil + return i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -318,58 +243,50 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.KeyPermission) > 0 { - for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAuth(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.KeyPermission { dAtA[i] = 0x12 + i++ + i = encodeVarintAuth(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { - offset -= sovAuth(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } -func (m *User) Size() (n int) { - if m == nil { - return 0 +func (m *UserAddOptions) Size() (n int) { + var l int + _ = l + if m.NoPassword { + n += 2 } + return n +} + +func (m *User) Size() (n int) { var l int _ = l l = len(m.Name) @@ -386,16 +303,14 @@ func (m *User) Size() (n int) { n += 1 + l + sovAuth(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovAuth(uint64(l)) } return n } func (m *Permission) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.PermType != 0 { @@ -409,16 +324,10 @@ func (m *Permission) Size() (n int) { if l > 0 { n += 1 + l + sovAuth(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *Role) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -431,18 +340,92 @@ func (m *Role) Size() (n int) { n += 1 + l + sovAuth(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovAuth(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozAuth(x uint64) (n int) { return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (m *UserAddOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserAddOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserAddOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoPassword", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoPassword = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *User) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -458,7 +441,7 @@ func (m *User) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -486,7 +469,7 @@ func (m *User) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -495,9 +478,6 @@ func (m *User) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -520,7 +500,7 @@ func (m *User) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -529,9 +509,6 @@ func (m *User) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -554,7 +531,7 @@ func (m *User) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -564,13 +541,43 @@ func (m *User) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthAuth } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + if m.Options == nil { + m.Options = &UserAddOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -581,13 +588,9 @@ func (m *User) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthAuth } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAuth - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -612,7 +615,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -640,7 +643,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PermType |= Permission_Type(b&0x7F) << shift + m.PermType |= (Permission_Type(b) & 0x7F) << shift if b < 0x80 { break } @@ -659,7 +662,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -668,9 +671,6 @@ func (m *Permission) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -693,7 +693,7 @@ func (m *Permission) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -702,9 +702,6 @@ func (m *Permission) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -722,13 +719,9 @@ func (m *Permission) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthAuth } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAuth - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -753,7 +746,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -781,7 +774,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -790,9 +783,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -815,7 +805,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -824,9 +814,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthAuth } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAuth - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -844,13 +831,9 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthAuth } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAuth - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -914,11 +897,8 @@ func skipAuth(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthAuth - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthAuth } return iNdEx, nil @@ -949,9 +929,6 @@ func skipAuth(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthAuth - } } return iNdEx, nil case 4: @@ -970,3 +947,31 @@ var ( ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } + +var fileDescriptorAuth = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xea, 0x40, + 0x14, 0xc6, 0x3b, 0xb4, 0x70, 0xdb, 0xc3, 0x85, 0x90, 0x13, 0x72, 0x6f, 0x83, 0x49, 0x6d, 0xba, + 0x6a, 0x5c, 0x54, 0x85, 0x8d, 0x5b, 0x8c, 0x2c, 0x5c, 0x49, 0x26, 0x18, 0x97, 0xa4, 0xa4, 0x13, + 0x24, 0xc0, 0x4c, 0x33, 0x83, 0x31, 0x6c, 0x7c, 0x0e, 0x17, 0x3e, 0x10, 0x4b, 0x1e, 0x41, 0xf0, + 0x45, 0x4c, 0x67, 0xf8, 0x13, 0xa2, 0xbb, 0xef, 0x7c, 0xe7, 0xfb, 0x66, 0x7e, 0x99, 0x01, 0x48, + 0x5f, 0x16, 0xcf, 0x49, 0x2e, 0xc5, 0x42, 0x60, 0xa5, 0xd0, 0xf9, 0xa8, 0xd5, 0x1c, 0x8b, 0xb1, + 0xd0, 0xd6, 0x65, 0xa1, 0xcc, 0x36, 0xba, 0x86, 0xfa, 0xa3, 0x62, 0xb2, 0x9b, 0x65, 0x0f, 0xf9, + 0x62, 0x22, 0xb8, 0xc2, 0x73, 0xa8, 0x72, 0x31, 0xcc, 0x53, 0xa5, 0x5e, 0x85, 0xcc, 0x7c, 0x12, + 0x92, 0xd8, 0xa5, 0xc0, 0x45, 0x7f, 0xe7, 0x44, 0x6f, 0xe0, 0x14, 0x15, 0x44, 0x70, 0x78, 0x3a, + 0x67, 0x3a, 0xf1, 0x97, 0x6a, 0x8d, 0x2d, 0x70, 0x0f, 0xcd, 0x92, 0xf6, 0x0f, 0x33, 0x36, 0xa1, + 0x2c, 0xc5, 0x8c, 0x29, 0xdf, 0x0e, 0xed, 0xd8, 0xa3, 0x66, 0xc0, 0x2b, 0xf8, 0x23, 0xcc, 0xcd, + 0xbe, 0x13, 0x92, 0xb8, 0xda, 0xfe, 0x97, 0x18, 0xe0, 0xe4, 0x94, 0x8b, 0xee, 0x63, 0xd1, 0x07, + 0x01, 0xe8, 0x33, 0x39, 0x9f, 0x28, 0x35, 0x11, 0x1c, 0x3b, 0xe0, 0xe6, 0x4c, 0xce, 0x07, 0xcb, + 0xdc, 0xa0, 0xd4, 0xdb, 0xff, 0xf7, 0x27, 0x1c, 0x53, 0x49, 0xb1, 0xa6, 0x87, 0x20, 0x36, 0xc0, + 0x9e, 0xb2, 0xe5, 0x0e, 0xb1, 0x90, 0x78, 0x06, 0x9e, 0x4c, 0xf9, 0x98, 0x0d, 0x19, 0xcf, 0x7c, + 0xdb, 0xa0, 0x6b, 0xa3, 0xc7, 0xb3, 0xe8, 0x02, 0x1c, 0x5d, 0x73, 0xc1, 0xa1, 0xbd, 0xee, 0x5d, + 0xc3, 0x42, 0x0f, 0xca, 0x4f, 0xf4, 0x7e, 0xd0, 0x6b, 0x10, 0xac, 0x81, 0x57, 0x98, 0x66, 0x2c, + 0x45, 0x03, 0x70, 0xa8, 0x98, 0xb1, 0x5f, 0x9f, 0xe7, 0x06, 0x6a, 0x53, 0xb6, 0x3c, 0x62, 0xf9, + 0xa5, 0xd0, 0x8e, 0xab, 0x6d, 0xfc, 0x09, 0x4c, 0x4f, 0x83, 0xb7, 0xfe, 0x6a, 0x13, 0x58, 0xeb, + 0x4d, 0x60, 0xad, 0xb6, 0x01, 0x59, 0x6f, 0x03, 0xf2, 0xb9, 0x0d, 0xc8, 0xfb, 0x57, 0x60, 0x8d, + 0x2a, 0xfa, 0x23, 0x3b, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x61, 0x66, 0xc6, 0x9d, 0xf4, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/go.etcd.io/etcd/auth/authpb/auth.proto similarity index 89% rename from vendor/github.com/coreos/etcd/auth/authpb/auth.proto rename to vendor/go.etcd.io/etcd/auth/authpb/auth.proto index 001d33435..8f82b7cf1 100644 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto +++ b/vendor/go.etcd.io/etcd/auth/authpb/auth.proto @@ -9,11 +9,16 @@ option (gogoproto.unmarshaler_all) = true; option (gogoproto.goproto_getters_all) = false; option (gogoproto.goproto_enum_prefix_all) = false; +message UserAddOptions { + bool no_password = 1; +}; + // User is a single entry in the bucket authUsers message User { bytes name = 1; bytes password = 2; repeated string roles = 3; + UserAddOptions options = 4; } // Permission is a single entity diff --git a/vendor/github.com/coreos/etcd/auth/doc.go b/vendor/go.etcd.io/etcd/auth/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/auth/doc.go rename to vendor/go.etcd.io/etcd/auth/doc.go diff --git a/vendor/go.etcd.io/etcd/auth/jwt.go b/vendor/go.etcd.io/etcd/auth/jwt.go new file mode 100644 index 000000000..c22ef898a --- /dev/null +++ b/vendor/go.etcd.io/etcd/auth/jwt.go @@ -0,0 +1,184 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "context" + "crypto/ecdsa" + "crypto/rsa" + "errors" + "time" + + jwt "github.com/dgrijalva/jwt-go" + "go.uber.org/zap" +) + +type tokenJWT struct { + lg *zap.Logger + signMethod jwt.SigningMethod + key interface{} + ttl time.Duration + verifyOnly bool +} + +func (t *tokenJWT) enable() {} +func (t *tokenJWT) disable() {} +func (t *tokenJWT) invalidateUser(string) {} +func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } + +func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { + // rev isn't used in JWT, it is only used in simple token + var ( + username string + revision uint64 + ) + + parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + if token.Method.Alg() != t.signMethod.Alg() { + return nil, errors.New("invalid signing method") + } + switch k := t.key.(type) { + case *rsa.PrivateKey: + return &k.PublicKey, nil + case *ecdsa.PrivateKey: + return &k.PublicKey, nil + default: + return t.key, nil + } + }) + + if err != nil { + if t.lg != nil { + t.lg.Warn( + "failed to parse a JWT token", + zap.String("token", token), + zap.Error(err), + ) + } else { + plog.Warningf("failed to parse jwt token: %s", err) + } + return nil, false + } + + claims, ok := parsed.Claims.(jwt.MapClaims) + if !parsed.Valid || !ok { + if t.lg != nil { + t.lg.Warn("invalid JWT token", zap.String("token", token)) + } else { + plog.Warningf("invalid jwt token: %s", token) + } + return nil, false + } + + username = claims["username"].(string) + revision = uint64(claims["revision"].(float64)) + + return &AuthInfo{Username: username, Revision: revision}, true +} + +func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { + if t.verifyOnly { + return "", ErrVerifyOnly + } + + // Future work: let a jwt token include permission information would be useful for + // permission checking in proxy side. + tk := jwt.NewWithClaims(t.signMethod, + jwt.MapClaims{ + "username": username, + "revision": revision, + "exp": time.Now().Add(t.ttl).Unix(), + }) + + token, err := tk.SignedString(t.key) + if err != nil { + if t.lg != nil { + t.lg.Warn( + "failed to sign a JWT token", + zap.String("user-name", username), + zap.Uint64("revision", revision), + zap.Error(err), + ) + } else { + plog.Debugf("failed to sign jwt token: %s", err) + } + return "", err + } + + if t.lg != nil { + t.lg.Info( + "created/assigned a new JWT token", + zap.String("user-name", username), + zap.Uint64("revision", revision), + zap.String("token", token), + ) + } else { + plog.Debugf("jwt token: %s", token) + } + return token, err +} + +func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, error) { + var err error + var opts jwtOptions + err = opts.ParseWithDefaults(optMap) + if err != nil { + if lg != nil { + lg.Warn("problem loading JWT options", zap.Error(err)) + } else { + plog.Errorf("problem loading JWT options: %s", err) + } + return nil, ErrInvalidAuthOpts + } + + var keys = make([]string, 0, len(optMap)) + for k := range optMap { + if !knownOptions[k] { + keys = append(keys, k) + } + } + if len(keys) > 0 { + if lg != nil { + lg.Warn("unknown JWT options", zap.Strings("keys", keys)) + } else { + plog.Warningf("unknown JWT options: %v", keys) + } + } + + key, err := opts.Key() + if err != nil { + return nil, err + } + + t := &tokenJWT{ + lg: lg, + ttl: opts.TTL, + signMethod: opts.SignMethod, + key: key, + } + + switch t.signMethod.(type) { + case *jwt.SigningMethodECDSA: + if _, ok := t.key.(*ecdsa.PublicKey); ok { + t.verifyOnly = true + } + case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS: + if _, ok := t.key.(*rsa.PublicKey); ok { + t.verifyOnly = true + } + } + + return t, nil +} diff --git a/vendor/github.com/coreos/etcd/auth/nop.go b/vendor/go.etcd.io/etcd/auth/nop.go similarity index 100% rename from vendor/github.com/coreos/etcd/auth/nop.go rename to vendor/go.etcd.io/etcd/auth/nop.go diff --git a/vendor/go.etcd.io/etcd/auth/options.go b/vendor/go.etcd.io/etcd/auth/options.go new file mode 100644 index 000000000..f40b92de6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/auth/options.go @@ -0,0 +1,192 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "crypto/ecdsa" + "crypto/rsa" + "fmt" + "io/ioutil" + "time" + + jwt "github.com/dgrijalva/jwt-go" +) + +const ( + optSignMethod = "sign-method" + optPublicKey = "pub-key" + optPrivateKey = "priv-key" + optTTL = "ttl" +) + +var knownOptions = map[string]bool{ + optSignMethod: true, + optPublicKey: true, + optPrivateKey: true, + optTTL: true, +} + +var ( + // DefaultTTL will be used when a 'ttl' is not specified + DefaultTTL = 5 * time.Minute +) + +type jwtOptions struct { + SignMethod jwt.SigningMethod + PublicKey []byte + PrivateKey []byte + TTL time.Duration +} + +// ParseWithDefaults will load options from the specified map or set defaults where appropriate +func (opts *jwtOptions) ParseWithDefaults(optMap map[string]string) error { + if opts.TTL == 0 && optMap[optTTL] == "" { + opts.TTL = DefaultTTL + } + + return opts.Parse(optMap) +} + +// Parse will load options from the specified map +func (opts *jwtOptions) Parse(optMap map[string]string) error { + var err error + if ttl := optMap[optTTL]; ttl != "" { + opts.TTL, err = time.ParseDuration(ttl) + if err != nil { + return err + } + } + + if file := optMap[optPublicKey]; file != "" { + opts.PublicKey, err = ioutil.ReadFile(file) + if err != nil { + return err + } + } + + if file := optMap[optPrivateKey]; file != "" { + opts.PrivateKey, err = ioutil.ReadFile(file) + if err != nil { + return err + } + } + + // signing method is a required field + method := optMap[optSignMethod] + opts.SignMethod = jwt.GetSigningMethod(method) + if opts.SignMethod == nil { + return ErrInvalidAuthMethod + } + + return nil +} + +// Key will parse and return the appropriately typed key for the selected signature method +func (opts *jwtOptions) Key() (interface{}, error) { + switch opts.SignMethod.(type) { + case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS: + return opts.rsaKey() + case *jwt.SigningMethodECDSA: + return opts.ecKey() + case *jwt.SigningMethodHMAC: + return opts.hmacKey() + default: + return nil, fmt.Errorf("unsupported signing method: %T", opts.SignMethod) + } +} + +func (opts *jwtOptions) hmacKey() (interface{}, error) { + if len(opts.PrivateKey) == 0 { + return nil, ErrMissingKey + } + return opts.PrivateKey, nil +} + +func (opts *jwtOptions) rsaKey() (interface{}, error) { + var ( + priv *rsa.PrivateKey + pub *rsa.PublicKey + err error + ) + + if len(opts.PrivateKey) > 0 { + priv, err = jwt.ParseRSAPrivateKeyFromPEM(opts.PrivateKey) + if err != nil { + return nil, err + } + } + + if len(opts.PublicKey) > 0 { + pub, err = jwt.ParseRSAPublicKeyFromPEM(opts.PublicKey) + if err != nil { + return nil, err + } + } + + if priv == nil { + if pub == nil { + // Neither key given + return nil, ErrMissingKey + } + // Public key only, can verify tokens + return pub, nil + } + + // both keys provided, make sure they match + if pub != nil && pub.E != priv.E && pub.N.Cmp(priv.N) != 0 { + return nil, ErrKeyMismatch + } + + return priv, nil +} + +func (opts *jwtOptions) ecKey() (interface{}, error) { + var ( + priv *ecdsa.PrivateKey + pub *ecdsa.PublicKey + err error + ) + + if len(opts.PrivateKey) > 0 { + priv, err = jwt.ParseECPrivateKeyFromPEM(opts.PrivateKey) + if err != nil { + return nil, err + } + } + + if len(opts.PublicKey) > 0 { + pub, err = jwt.ParseECPublicKeyFromPEM(opts.PublicKey) + if err != nil { + return nil, err + } + } + + if priv == nil { + if pub == nil { + // Neither key given + return nil, ErrMissingKey + } + // Public key only, can verify tokens + return pub, nil + } + + // both keys provided, make sure they match + if pub != nil && pub.Curve != priv.Curve && + pub.X.Cmp(priv.X) != 0 && pub.Y.Cmp(priv.Y) != 0 { + return nil, ErrKeyMismatch + } + + return priv, nil +} diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/go.etcd.io/etcd/auth/range_perm_cache.go similarity index 68% rename from vendor/github.com/coreos/etcd/auth/range_perm_cache.go rename to vendor/go.etcd.io/etcd/auth/range_perm_cache.go index 7d47d2610..7b6c18240 100644 --- a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go +++ b/vendor/go.etcd.io/etcd/auth/range_perm_cache.go @@ -15,15 +15,16 @@ package auth import ( - "github.com/coreos/etcd/auth/authpb" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/adt" + "go.etcd.io/etcd/auth/authpb" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/pkg/adt" + + "go.uber.org/zap" ) -func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { - user := getUser(tx, userName) +func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifiedRangePermissions { + user := getUser(lg, tx, userName) if user == nil { - plog.Errorf("invalid user name %s", userName) return nil } @@ -70,7 +71,11 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission } } -func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { +func checkKeyInterval( + lg *zap.Logger, + cachedPerms *unifiedRangePermissions, + key, rangeEnd []byte, + permtyp authpb.Permission_Type) bool { if len(rangeEnd) == 1 && rangeEnd[0] == 0 { rangeEnd = nil } @@ -82,12 +87,16 @@ func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte case authpb.WRITE: return cachedPerms.writePerms.Contains(ivl) default: - plog.Panicf("unknown auth type: %v", permtyp) + if lg != nil { + lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String())) + } else { + plog.Panicf("unknown auth type: %v", permtyp) + } } return false } -func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { +func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { pt := adt.NewBytesAffinePoint(key) switch permtyp { case authpb.READ: @@ -95,7 +104,11 @@ func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp aut case authpb.WRITE: return cachedPerms.writePerms.Intersects(pt) default: - plog.Panicf("unknown auth type: %v", permtyp) + if lg != nil { + lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String())) + } else { + plog.Panicf("unknown auth type: %v", permtyp) + } } return false } @@ -104,19 +117,26 @@ func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key // assumption: tx is Lock()ed _, ok := as.rangePermCache[userName] if !ok { - perms := getMergedPerms(tx, userName) + perms := getMergedPerms(as.lg, tx, userName) if perms == nil { - plog.Errorf("failed to create a unified permission of user %s", userName) + if as.lg != nil { + as.lg.Warn( + "failed to create a merged permission", + zap.String("user-name", userName), + ) + } else { + plog.Errorf("failed to create a unified permission of user %s", userName) + } return false } as.rangePermCache[userName] = perms } if len(rangeEnd) == 0 { - return checkKeyPoint(as.rangePermCache[userName], key, permtyp) + return checkKeyPoint(as.lg, as.rangePermCache[userName], key, permtyp) } - return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp) + return checkKeyInterval(as.lg, as.rangePermCache[userName], key, rangeEnd, permtyp) } func (as *authStore) clearCachedPerm() { diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/go.etcd.io/etcd/auth/simple_token.go similarity index 87% rename from vendor/github.com/coreos/etcd/auth/simple_token.go rename to vendor/go.etcd.io/etcd/auth/simple_token.go index ac55ad7f1..934978c98 100644 --- a/vendor/github.com/coreos/etcd/auth/simple_token.go +++ b/vendor/go.etcd.io/etcd/auth/simple_token.go @@ -14,7 +14,7 @@ package auth -// CAUTION: This randum number based token mechanism is only for testing purpose. +// CAUTION: This random number based token mechanism is only for testing purpose. // JWT based mechanism will be added in the near future. import ( @@ -26,6 +26,8 @@ import ( "strings" "sync" "time" + + "go.uber.org/zap" ) const ( @@ -94,6 +96,7 @@ func (tm *simpleTokenTTLKeeper) run() { } type tokenSimple struct { + lg *zap.Logger indexWaiter func(uint64) <-chan struct{} simpleTokenKeeper *simpleTokenTTLKeeper simpleTokensMu sync.Mutex @@ -124,7 +127,15 @@ func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { _, ok := t.simpleTokens[token] if ok { - plog.Panicf("token %s is alredy used", token) + if t.lg != nil { + t.lg.Panic( + "failed to assign already-used simple token to a user", + zap.String("user-name", username), + zap.String("token", token), + ) + } else { + plog.Panicf("token %s is already used", token) + } } t.simpleTokens[token] = username @@ -137,7 +148,7 @@ func (t *tokenSimple) invalidateUser(username string) { } t.simpleTokensMu.Lock() for token, name := range t.simpleTokens { - if strings.Compare(name, username) == 0 { + if name == username { delete(t.simpleTokens, token) t.simpleTokenKeeper.deleteSimpleToken(token) } @@ -148,7 +159,15 @@ func (t *tokenSimple) invalidateUser(username string) { func (t *tokenSimple) enable() { delf := func(tk string) { if username, ok := t.simpleTokens[tk]; ok { - plog.Infof("deleting token %s for user %s", tk, username) + if t.lg != nil { + t.lg.Info( + "deleted a simple token", + zap.String("user-name", username), + zap.String("token", tk), + ) + } else { + plog.Infof("deleting token %s for user %s", tk, username) + } delete(t.simpleTokens, tk) } } @@ -215,8 +234,9 @@ func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool return false } -func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple { +func newTokenProviderSimple(lg *zap.Logger, indexWaiter func(uint64) <-chan struct{}) *tokenSimple { return &tokenSimple{ + lg: lg, simpleTokens: make(map[string]string), indexWaiter: indexWaiter, } diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/go.etcd.io/etcd/auth/store.go similarity index 64% rename from vendor/github.com/coreos/etcd/auth/store.go rename to vendor/go.etcd.io/etcd/auth/store.go index 21c121e50..52122554a 100644 --- a/vendor/github.com/coreos/etcd/auth/store.go +++ b/vendor/go.etcd.io/etcd/auth/store.go @@ -24,11 +24,13 @@ import ( "sync" "sync/atomic" - "github.com/coreos/etcd/auth/authpb" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/backend" + "go.etcd.io/etcd/auth/authpb" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc/backend" "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" "golang.org/x/crypto/bcrypt" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" @@ -46,7 +48,7 @@ var ( authUsersBucketName = []byte("authUsers") authRolesBucketName = []byte("authRoles") - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "auth") ErrRootUserNotExist = errors.New("auth: root user does not exist") ErrRootRoleNotExist = errors.New("auth: root user does not have root role") @@ -55,6 +57,7 @@ var ( ErrUserNotFound = errors.New("auth: user not found") ErrRoleAlreadyExist = errors.New("auth: role already exists") ErrRoleNotFound = errors.New("auth: role not found") + ErrRoleEmpty = errors.New("auth: role name is empty") ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password") ErrPermissionDenied = errors.New("auth: permission denied") ErrRoleNotGranted = errors.New("auth: role is not granted to the user") @@ -64,9 +67,10 @@ var ( ErrInvalidAuthToken = errors.New("auth: invalid auth token") ErrInvalidAuthOpts = errors.New("auth: invalid auth options") ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") - - // BcryptCost is the algorithm cost / strength for hashing auth passwords - BcryptCost = bcrypt.DefaultCost + ErrInvalidAuthMethod = errors.New("auth: invalid auth signature method") + ErrMissingKey = errors.New("auth: missing key data") + ErrKeyMismatch = errors.New("auth: public and private keys don't match") + ErrVerifyOnly = errors.New("auth: token signing attempted with verify-only key") ) const ( @@ -90,9 +94,6 @@ type AuthenticateParamIndex struct{} // AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate() type AuthenticateParamSimpleTokenPrefix struct{} -// saveConsistentIndexFunc is used to sync consistentIndex to backend, now reusing store.saveIndex -type saveConsistentIndexFunc func(tx backend.BatchTx) - // AuthStore defines auth storage interface. type AuthStore interface { // AuthEnable turns on the authentication feature @@ -101,6 +102,9 @@ type AuthStore interface { // AuthDisable turns off the authentication feature AuthDisable() + // IsAuthEnabled returns true if the authentication feature is enabled. + IsAuthEnabled() bool + // Authenticate does authentication based on given user name and password Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) @@ -182,9 +186,6 @@ type AuthStore interface { // HasRole checks that user has role HasRole(user, role string) bool - - // SetConsistentIndexSyncer sets consistentIndex syncer - SetConsistentIndexSyncer(syncer saveConsistentIndexFunc) } type TokenProvider interface { @@ -201,24 +202,26 @@ type authStore struct { // atomic operations; need 64-bit align, or 32-bit tests will crash revision uint64 + lg *zap.Logger be backend.Backend enabled bool enabledMu sync.RWMutex rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions - tokenProvider TokenProvider - syncConsistentIndex saveConsistentIndexFunc + tokenProvider TokenProvider + bcryptCost int // the algorithm cost / strength for hashing auth passwords } -func (as *authStore) SetConsistentIndexSyncer(syncer saveConsistentIndexFunc) { - as.syncConsistentIndex = syncer -} func (as *authStore) AuthEnable() error { as.enabledMu.Lock() defer as.enabledMu.Unlock() if as.enabled { - plog.Noticef("Authentication already enabled") + if as.lg != nil { + as.lg.Info("authentication is already enabled; ignored auth enable request") + } else { + plog.Noticef("Authentication already enabled") + } return nil } b := as.be @@ -229,7 +232,7 @@ func (as *authStore) AuthEnable() error { b.ForceCommit() }() - u := getUser(tx, rootUser) + u := getUser(as.lg, tx, rootUser) if u == nil { return ErrRootUserNotExist } @@ -247,8 +250,11 @@ func (as *authStore) AuthEnable() error { as.setRevision(getRevision(tx)) - plog.Noticef("Authentication enabled") - + if as.lg != nil { + as.lg.Info("enabled authentication") + } else { + plog.Noticef("Authentication enabled") + } return nil } @@ -263,14 +269,17 @@ func (as *authStore) AuthDisable() { tx.Lock() tx.UnsafePut(authBucketName, enableFlagKey, authDisabled) as.commitRevision(tx) - as.saveConsistentIndex(tx) tx.Unlock() b.ForceCommit() as.enabled = false as.tokenProvider.disable() - plog.Noticef("Authentication disabled") + if as.lg != nil { + as.lg.Info("disabled authentication") + } else { + plog.Noticef("Authentication disabled") + } } func (as *authStore) Close() error { @@ -284,7 +293,7 @@ func (as *authStore) Close() error { } func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) { - if !as.isAuthEnabled() { + if !as.IsAuthEnabled() { return nil, ErrAuthNotEnabled } @@ -292,11 +301,15 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string tx.Lock() defer tx.Unlock() - user := getUser(tx, username) + user := getUser(as.lg, tx, username) if user == nil { return nil, ErrAuthFailed } + if user.Options.NoPassword { + return nil, ErrAuthFailed + } + // Password checking is already performed in the API layer, so we don't need to check for now. // Staleness of password can be detected with OCC in the API layer, too. @@ -305,12 +318,20 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string return nil, err } - plog.Debugf("authorized %s, token is %s", username, token) + if as.lg != nil { + as.lg.Debug( + "authenticated a user", + zap.String("user-name", username), + zap.String("token", token), + ) + } else { + plog.Debugf("authorized %s, token is %s", username, token) + } return &pb.AuthenticateResponse{Token: token}, nil } func (as *authStore) CheckPassword(username, password string) (uint64, error) { - if !as.isAuthEnabled() { + if !as.IsAuthEnabled() { return 0, ErrAuthNotEnabled } @@ -318,16 +339,23 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) { tx.Lock() defer tx.Unlock() - user := getUser(tx, username) + user := getUser(as.lg, tx, username) if user == nil { return 0, ErrAuthFailed } - if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil { - plog.Noticef("authentication failed, invalid password for user %s", username) + if user.Options.NoPassword { return 0, ErrAuthFailed } + if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil { + if as.lg != nil { + as.lg.Info("invalid password", zap.String("user-name", username)) + } else { + plog.Noticef("authentication failed, invalid password for user %s", username) + } + return 0, ErrAuthFailed + } return getRevision(tx), nil } @@ -357,39 +385,66 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, return nil, ErrUserEmpty } - hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) - if err != nil { - plog.Errorf("failed to hash password: %s", err) - return nil, err + var hashed []byte + var err error + + if r.Options != nil && !r.Options.NoPassword { + hashed, err = bcrypt.GenerateFromPassword([]byte(r.Password), as.bcryptCost) + if err != nil { + if as.lg != nil { + as.lg.Warn( + "failed to bcrypt hash password", + zap.String("user-name", r.Name), + zap.Error(err), + ) + } else { + plog.Errorf("failed to hash password: %s", err) + } + return nil, err + } } tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() - user := getUser(tx, r.Name) + user := getUser(as.lg, tx, r.Name) if user != nil { return nil, ErrUserAlreadyExist } + options := r.Options + if options == nil { + options = &authpb.UserAddOptions{ + NoPassword: false, + } + } + newUser := &authpb.User{ Name: []byte(r.Name), Password: hashed, + Options: options, } - putUser(tx, newUser) + putUser(as.lg, tx, newUser) as.commitRevision(tx) - as.saveConsistentIndex(tx) - - plog.Noticef("added a new user: %s", r.Name) + if as.lg != nil { + as.lg.Info("added a user", zap.String("user-name", r.Name)) + } else { + plog.Noticef("added a new user: %s", r.Name) + } return &pb.AuthUserAddResponse{}, nil } func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - if as.enabled && strings.Compare(r.Name, rootUser) == 0 { - plog.Errorf("the user root must not be deleted") + if as.enabled && r.Name == rootUser { + if as.lg != nil { + as.lg.Warn("cannot delete 'root' user", zap.String("user-name", r.Name)) + } else { + plog.Errorf("the user root must not be deleted") + } return nil, ErrInvalidAuthMgmt } @@ -397,7 +452,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete tx.Lock() defer tx.Unlock() - user := getUser(tx, r.Name) + user := getUser(as.lg, tx, r.Name) if user == nil { return nil, ErrUserNotFound } @@ -405,22 +460,36 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete delUser(tx, r.Name) as.commitRevision(tx) - as.saveConsistentIndex(tx) as.invalidateCachedPerm(r.Name) as.tokenProvider.invalidateUser(r.Name) - plog.Noticef("deleted a user: %s", r.Name) - + if as.lg != nil { + as.lg.Info( + "deleted a user", + zap.String("user-name", r.Name), + zap.Strings("user-roles", user.Roles), + ) + } else { + plog.Noticef("deleted a user: %s", r.Name) + } return &pb.AuthUserDeleteResponse{}, nil } func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { // TODO(mitake): measure the cost of bcrypt.GenerateFromPassword() // If the cost is too high, we should move the encryption to outside of the raft - hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost) + hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), as.bcryptCost) if err != nil { - plog.Errorf("failed to hash password: %s", err) + if as.lg != nil { + as.lg.Warn( + "failed to bcrypt hash password", + zap.String("user-name", r.Name), + zap.Error(err), + ) + } else { + plog.Errorf("failed to hash password: %s", err) + } return nil, err } @@ -428,7 +497,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p tx.Lock() defer tx.Unlock() - user := getUser(tx, r.Name) + user := getUser(as.lg, tx, r.Name) if user == nil { return nil, ErrUserNotFound } @@ -437,18 +506,25 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p Name: []byte(r.Name), Roles: user.Roles, Password: hashed, + Options: user.Options, } - putUser(tx, updatedUser) + putUser(as.lg, tx, updatedUser) as.commitRevision(tx) - as.saveConsistentIndex(tx) as.invalidateCachedPerm(r.Name) as.tokenProvider.invalidateUser(r.Name) - plog.Noticef("changed a password of a user: %s", r.Name) - + if as.lg != nil { + as.lg.Info( + "changed a password of a user", + zap.String("user-name", r.Name), + zap.Strings("user-roles", user.Roles), + ) + } else { + plog.Noticef("changed a password of a user: %s", r.Name) + } return &pb.AuthUserChangePasswordResponse{}, nil } @@ -457,7 +533,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser tx.Lock() defer tx.Unlock() - user := getUser(tx, r.User) + user := getUser(as.lg, tx, r.User) if user == nil { return nil, ErrUserNotFound } @@ -470,29 +546,46 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser } idx := sort.SearchStrings(user.Roles, r.Role) - if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 { - plog.Warningf("user %s is already granted role %s", r.User, r.Role) + if idx < len(user.Roles) && user.Roles[idx] == r.Role { + if as.lg != nil { + as.lg.Warn( + "ignored grant role request to a user", + zap.String("user-name", r.User), + zap.Strings("user-roles", user.Roles), + zap.String("duplicate-role-name", r.Role), + ) + } else { + plog.Warningf("user %s is already granted role %s", r.User, r.Role) + } return &pb.AuthUserGrantRoleResponse{}, nil } user.Roles = append(user.Roles, r.Role) sort.Strings(user.Roles) - putUser(tx, user) + putUser(as.lg, tx, user) as.invalidateCachedPerm(r.User) as.commitRevision(tx) - as.saveConsistentIndex(tx) - plog.Noticef("granted role %s to user %s", r.Role, r.User) + if as.lg != nil { + as.lg.Info( + "granted a role to a user", + zap.String("user-name", r.User), + zap.Strings("user-roles", user.Roles), + zap.String("added-role-name", r.Role), + ) + } else { + plog.Noticef("granted role %s to user %s", r.Role, r.User) + } return &pb.AuthUserGrantRoleResponse{}, nil } func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { tx := as.be.BatchTx() tx.Lock() - user := getUser(tx, r.Name) + user := getUser(as.lg, tx, r.Name) tx.Unlock() if user == nil { @@ -507,7 +600,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { tx := as.be.BatchTx() tx.Lock() - users := getAllUsers(tx) + users := getAllUsers(as.lg, tx) tx.Unlock() resp := &pb.AuthUserListResponse{Users: make([]string, len(users))} @@ -518,8 +611,16 @@ func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListRespon } func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 { - plog.Errorf("the role root must not be revoked from the user root") + if as.enabled && r.Name == rootUser && r.Role == rootRole { + if as.lg != nil { + as.lg.Warn( + "'root' user cannot revoke 'root' role", + zap.String("user-name", r.Name), + zap.String("role-name", r.Role), + ) + } else { + plog.Errorf("the role root must not be revoked from the user root") + } return nil, ErrInvalidAuthMgmt } @@ -527,7 +628,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs tx.Lock() defer tx.Unlock() - user := getUser(tx, r.Name) + user := getUser(as.lg, tx, r.Name) if user == nil { return nil, ErrUserNotFound } @@ -535,10 +636,11 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs updatedUser := &authpb.User{ Name: user.Name, Password: user.Password, + Options: user.Options, } for _, role := range user.Roles { - if strings.Compare(role, r.Role) != 0 { + if role != r.Role { updatedUser.Roles = append(updatedUser.Roles, role) } } @@ -547,14 +649,23 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs return nil, ErrRoleNotGranted } - putUser(tx, updatedUser) + putUser(as.lg, tx, updatedUser) as.invalidateCachedPerm(r.Name) as.commitRevision(tx) - as.saveConsistentIndex(tx) - plog.Noticef("revoked role %s from user %s", r.Role, r.Name) + if as.lg != nil { + as.lg.Info( + "revoked a role from a user", + zap.String("user-name", r.Name), + zap.Strings("old-user-roles", user.Roles), + zap.Strings("new-user-roles", updatedUser.Roles), + zap.String("revoked-role-name", r.Role), + ) + } else { + plog.Noticef("revoked role %s from user %s", r.Role, r.Name) + } return &pb.AuthUserRevokeRoleResponse{}, nil } @@ -576,7 +687,7 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { tx := as.be.BatchTx() tx.Lock() - roles := getAllRoles(tx) + roles := getAllRoles(as.lg, tx) tx.Unlock() resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))} @@ -601,7 +712,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) } for _, perm := range role.KeyPermission { - if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) { + if !bytes.Equal(perm.Key, r.Key) || !bytes.Equal(perm.RangeEnd, r.RangeEnd) { updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm) } } @@ -610,22 +721,34 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) return nil, ErrPermissionNotGranted } - putRole(tx, updatedRole) + putRole(as.lg, tx, updatedRole) // TODO(mitake): currently single role update invalidates every cache // It should be optimized. as.clearCachedPerm() as.commitRevision(tx) - as.saveConsistentIndex(tx) - plog.Noticef("revoked key %s from role %s", r.Key, r.Role) + if as.lg != nil { + as.lg.Info( + "revoked a permission on range", + zap.String("role-name", r.Role), + zap.String("key", string(r.Key)), + zap.String("range-end", string(r.RangeEnd)), + ) + } else { + plog.Noticef("revoked key %s from role %s", r.Key, r.Role) + } return &pb.AuthRoleRevokePermissionResponse{}, nil } func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - if as.enabled && strings.Compare(r.Role, rootRole) == 0 { - plog.Errorf("the role root must not be deleted") + if as.enabled && r.Role == rootRole { + if as.lg != nil { + as.lg.Warn("cannot delete 'root' role", zap.String("role-name", r.Role)) + } else { + plog.Errorf("the role root must not be deleted") + } return nil, ErrInvalidAuthMgmt } @@ -640,15 +763,16 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete delRole(tx, r.Role) - users := getAllUsers(tx) + users := getAllUsers(as.lg, tx) for _, user := range users { updatedUser := &authpb.User{ Name: user.Name, Password: user.Password, + Options: user.Options, } for _, role := range user.Roles { - if strings.Compare(role, r.Role) != 0 { + if role != r.Role { updatedUser.Roles = append(updatedUser.Roles, role) } } @@ -657,19 +781,26 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete continue } - putUser(tx, updatedUser) + putUser(as.lg, tx, updatedUser) as.invalidateCachedPerm(string(user.Name)) } as.commitRevision(tx) - as.saveConsistentIndex(tx) - plog.Noticef("deleted role %s", r.Role) + if as.lg != nil { + as.lg.Info("deleted a role", zap.String("role-name", r.Role)) + } else { + plog.Noticef("deleted role %s", r.Role) + } return &pb.AuthRoleDeleteResponse{}, nil } func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { + if len(r.Name) == 0 { + return nil, ErrRoleEmpty + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -683,13 +814,15 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, Name: []byte(r.Name), } - putRole(tx, newRole) + putRole(as.lg, tx, newRole) as.commitRevision(tx) - as.saveConsistentIndex(tx) - - plog.Noticef("Role %s is created", r.Name) + if as.lg != nil { + as.lg.Info("created a role", zap.String("role-name", r.Name)) + } else { + plog.Noticef("Role %s is created", r.Name) + } return &pb.AuthRoleAddResponse{}, nil } @@ -722,7 +855,7 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( } idx := sort.Search(len(role.KeyPermission), func(i int) bool { - return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0 + return bytes.Compare(role.KeyPermission[i].Key, r.Perm.Key) >= 0 }) if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) { @@ -731,8 +864,8 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( } else { // append new permission to the role newPerm := &authpb.Permission{ - Key: []byte(r.Perm.Key), - RangeEnd: []byte(r.Perm.RangeEnd), + Key: r.Perm.Key, + RangeEnd: r.Perm.RangeEnd, PermType: r.Perm.PermType, } @@ -740,23 +873,29 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( sort.Sort(permSlice(role.KeyPermission)) } - putRole(tx, role) + putRole(as.lg, tx, role) // TODO(mitake): currently single role update invalidates every cache // It should be optimized. as.clearCachedPerm() as.commitRevision(tx) - as.saveConsistentIndex(tx) - - plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)]) + if as.lg != nil { + as.lg.Info( + "granted/updated a permission to a user", + zap.String("user-name", r.Name), + zap.String("permission-name", authpb.Permission_Type_name[int32(r.Perm.PermType)]), + ) + } else { + plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)]) + } return &pb.AuthRoleGrantPermissionResponse{}, nil } func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error { // TODO(mitake): this function would be costly so we need a caching mechanism - if !as.isAuthEnabled() { + if !as.IsAuthEnabled() { return nil } @@ -764,13 +903,8 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE if revision == 0 { return ErrUserEmpty } - rev := as.Revision() - if revision < rev { - plog.Warningf("request auth revision is less than current node auth revision,"+ - "current node auth revision is %d,"+ - "request auth revision is %d,"+ - "request key is %s, "+ - "err is %v", rev, revision, key, ErrAuthOldRevision) + + if revision < as.Revision() { return ErrAuthOldRevision } @@ -778,9 +912,13 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE tx.Lock() defer tx.Unlock() - user := getUser(tx, userName) + user := getUser(as.lg, tx, userName) if user == nil { - plog.Errorf("invalid user name %s for permission checking", userName) + if as.lg != nil { + as.lg.Warn("cannot find a user for permission check", zap.String("user-name", userName)) + } else { + plog.Errorf("invalid user name %s for permission checking", userName) + } return ErrPermissionDenied } @@ -809,7 +947,7 @@ func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd [] } func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { - if !as.isAuthEnabled() { + if !as.IsAuthEnabled() { return nil } if authInfo == nil { @@ -818,7 +956,7 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { tx := as.be.BatchTx() tx.Lock() - u := getUser(tx, authInfo.Username) + u := getUser(as.lg, tx, authInfo.Username) tx.Unlock() if u == nil { @@ -832,7 +970,7 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { return nil } -func getUser(tx backend.BatchTx, username string) *authpb.User { +func getUser(lg *zap.Logger, tx backend.BatchTx, username string) *authpb.User { _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0) if len(vs) == 0 { return nil @@ -841,12 +979,20 @@ func getUser(tx backend.BatchTx, username string) *authpb.User { user := &authpb.User{} err := user.Unmarshal(vs[0]) if err != nil { - plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err) + if lg != nil { + lg.Panic( + "failed to unmarshal 'authpb.User'", + zap.String("user-name", username), + zap.Error(err), + ) + } else { + plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err) + } } return user } -func getAllUsers(tx backend.BatchTx) []*authpb.User { +func getAllUsers(lg *zap.Logger, tx backend.BatchTx) []*authpb.User { _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1) if len(vs) == 0 { return nil @@ -857,17 +1003,25 @@ func getAllUsers(tx backend.BatchTx) []*authpb.User { user := &authpb.User{} err := user.Unmarshal(vs[i]) if err != nil { - plog.Panicf("failed to unmarshal user struct: %s", err) + if lg != nil { + lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) + } else { + plog.Panicf("failed to unmarshal user struct: %s", err) + } } users[i] = user } return users } -func putUser(tx backend.BatchTx, user *authpb.User) { +func putUser(lg *zap.Logger, tx backend.BatchTx, user *authpb.User) { b, err := user.Marshal() if err != nil { - plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err) + if lg != nil { + lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) + } else { + plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err) + } } tx.UnsafePut(authUsersBucketName, user.Name, b) } @@ -890,7 +1044,7 @@ func getRole(tx backend.BatchTx, rolename string) *authpb.Role { return role } -func getAllRoles(tx backend.BatchTx) []*authpb.Role { +func getAllRoles(lg *zap.Logger, tx backend.BatchTx) []*authpb.Role { _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1) if len(vs) == 0 { return nil @@ -901,33 +1055,62 @@ func getAllRoles(tx backend.BatchTx) []*authpb.Role { role := &authpb.Role{} err := role.Unmarshal(vs[i]) if err != nil { - plog.Panicf("failed to unmarshal role struct: %s", err) + if lg != nil { + lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) + } else { + plog.Panicf("failed to unmarshal role struct: %s", err) + } } roles[i] = role } return roles } -func putRole(tx backend.BatchTx, role *authpb.Role) { +func putRole(lg *zap.Logger, tx backend.BatchTx, role *authpb.Role) { b, err := role.Marshal() if err != nil { - plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err) + if lg != nil { + lg.Panic( + "failed to marshal 'authpb.Role'", + zap.String("role-name", string(role.Name)), + zap.Error(err), + ) + } else { + plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err) + } } - tx.UnsafePut(authRolesBucketName, []byte(role.Name), b) + tx.UnsafePut(authRolesBucketName, role.Name, b) } func delRole(tx backend.BatchTx, rolename string) { tx.UnsafeDelete(authRolesBucketName, []byte(rolename)) } -func (as *authStore) isAuthEnabled() bool { +func (as *authStore) IsAuthEnabled() bool { as.enabledMu.RLock() defer as.enabledMu.RUnlock() return as.enabled } -func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { +// NewAuthStore creates a new AuthStore. +func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCost int) *authStore { + if bcryptCost < bcrypt.MinCost || bcryptCost > bcrypt.MaxCost { + if lg != nil { + lg.Warn( + "use default bcrypt cost instead of the invalid given cost", + zap.Int("min-cost", bcrypt.MinCost), + zap.Int("max-cost", bcrypt.MaxCost), + zap.Int("default-cost", bcrypt.DefaultCost), + zap.Int("given-cost", bcryptCost)) + } else { + plog.Warningf("Use default bcrypt-cost %d instead of the invalid value %d", + bcrypt.DefaultCost, bcryptCost) + } + + bcryptCost = bcrypt.DefaultCost + } + tx := be.BatchTx() tx.Lock() @@ -944,11 +1127,13 @@ func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { } as := &authStore{ - be: be, revision: getRevision(tx), + lg: lg, + be: be, enabled: enabled, rangePermCache: make(map[string]*unifiedRangePermissions), tokenProvider: tp, + bcryptCost: bcryptCost, } if enabled { @@ -959,8 +1144,6 @@ func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { as.commitRevision(tx) } - as.setupMetricsReporter() - tx.Unlock() be.ForceCommit() @@ -981,12 +1164,11 @@ func (as *authStore) commitRevision(tx backend.BatchTx) { } func getRevision(tx backend.BatchTx) uint64 { - _, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0) + _, vs := tx.UnsafeRange(authBucketName, revisionKey, nil, 0) if len(vs) != 1 { // this can happen in the initialization phase return 0 } - return binary.BigEndian.Uint64(vs[0]) } @@ -998,7 +1180,7 @@ func (as *authStore) Revision() uint64 { return atomic.LoadUint64(&as.revision) } -func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { +func (as *authStore) AuthInfoFromTLS(ctx context.Context) (ai *AuthInfo) { peer, ok := peer.FromContext(ctx) if !ok || peer == nil || peer.AuthInfo == nil { return nil @@ -1006,31 +1188,47 @@ func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { tlsInfo := peer.AuthInfo.(credentials.TLSInfo) for _, chains := range tlsInfo.State.VerifiedChains { - for _, chain := range chains { - cn := chain.Subject.CommonName - plog.Debugf("found common name %s", cn) - - ai := &AuthInfo{ - Username: cn, - Revision: as.Revision(), - } - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil - } + if len(chains) < 1 { + continue + } + ai = &AuthInfo{ + Username: chains[0].Subject.CommonName, + Revision: as.Revision(), + } + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil + } - // gRPC-gateway proxy request to etcd server includes Grpcgateway-Accept - // header. The proxy uses etcd client server certificate. If the certificate - // has a CommonName we should never use this for authentication. - if gw := md["grpcgateway-accept"]; len(gw) > 0 { + // gRPC-gateway proxy request to etcd server includes Grpcgateway-Accept + // header. The proxy uses etcd client server certificate. If the certificate + // has a CommonName we should never use this for authentication. + if gw := md["grpcgateway-accept"]; len(gw) > 0 { + if as.lg != nil { + as.lg.Warn( + "ignoring common name in gRPC-gateway proxy request", + zap.String("common-name", ai.Username), + zap.String("user-name", ai.Username), + zap.Uint64("revision", ai.Revision), + ) + } else { plog.Warningf("ignoring common name in gRPC-gateway proxy request %s", ai.Username) - return nil } - return ai + return nil } + if as.lg != nil { + as.lg.Debug( + "found command name", + zap.String("common-name", ai.Username), + zap.String("user-name", ai.Username), + zap.Uint64("revision", ai.Revision), + ) + } else { + plog.Debugf("found common name %s", ai.Username) + } + break } - - return nil + return ai } func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { @@ -1040,9 +1238,9 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { } //TODO(mitake|hexfusion) review unifying key names - ts, ok := md["token"] + ts, ok := md[rpctypes.TokenFieldNameGRPC] if !ok { - ts, ok = md["authorization"] + ts, ok = md[rpctypes.TokenFieldNameSwagger] } if !ok { return nil, nil @@ -1051,7 +1249,11 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { token := ts[0] authInfo, uok := as.authInfoFromToken(ctx, token) if !uok { - plog.Warningf("invalid auth token: %s", token) + if as.lg != nil { + as.lg.Warn("invalid auth token", zap.String("token", token)) + } else { + plog.Warningf("invalid auth token: %s", token) + } return nil, ErrInvalidAuthToken } @@ -1062,7 +1264,7 @@ func (as *authStore) GenTokenPrefix() (string, error) { return as.tokenProvider.genTokenPrefix() } -func decomposeOpts(optstr string) (string, map[string]string, error) { +func decomposeOpts(lg *zap.Logger, optstr string) (string, map[string]string, error) { opts := strings.Split(optstr, ",") tokenType := opts[0] @@ -1071,12 +1273,24 @@ func decomposeOpts(optstr string) (string, map[string]string, error) { pair := strings.Split(opts[i], "=") if len(pair) != 2 { - plog.Errorf("invalid token specific option: %s", optstr) + if lg != nil { + lg.Warn("invalid token option", zap.String("option", optstr)) + } else { + plog.Errorf("invalid token specific option: %s", optstr) + } return "", nil, ErrInvalidAuthOpts } if _, ok := typeSpecificOpts[pair[0]]; ok { - plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) + if lg != nil { + lg.Warn( + "invalid token option", + zap.String("option", optstr), + zap.String("duplicate-parameter", pair[0]), + ) + } else { + plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) + } return "", nil, ErrInvalidAuthOpts } @@ -1087,30 +1301,47 @@ func decomposeOpts(optstr string) (string, map[string]string, error) { } -func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { - tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts) +// NewTokenProvider creates a new token provider. +func NewTokenProvider( + lg *zap.Logger, + tokenOpts string, + indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { + tokenType, typeSpecificOpts, err := decomposeOpts(lg, tokenOpts) if err != nil { return nil, ErrInvalidAuthOpts } switch tokenType { case tokenTypeSimple: - plog.Warningf("simple token is not cryptographically signed") - return newTokenProviderSimple(indexWaiter), nil + if lg != nil { + lg.Warn("simple token is not cryptographically signed") + } else { + plog.Warningf("simple token is not cryptographically signed") + } + return newTokenProviderSimple(lg, indexWaiter), nil case tokenTypeJWT: - return newTokenProviderJWT(typeSpecificOpts) + return newTokenProviderJWT(lg, typeSpecificOpts) case "": return newTokenProviderNop() + default: - plog.Errorf("unknown token type: %s", tokenType) + if lg != nil { + lg.Warn( + "unknown token type", + zap.String("type", tokenType), + zap.Error(ErrInvalidAuthOpts), + ) + } else { + plog.Errorf("unknown token type: %s", tokenType) + } return nil, ErrInvalidAuthOpts } } func (as *authStore) WithRoot(ctx context.Context) context.Context { - if !as.isAuthEnabled() { + if !as.IsAuthEnabled() { return ctx } @@ -1119,7 +1350,14 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context { ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0)) prefix, err := ts.genTokenPrefix() if err != nil { - plog.Errorf("failed to generate prefix of internally used token") + if as.lg != nil { + as.lg.Warn( + "failed to generate prefix of internally used token", + zap.Error(err), + ) + } else { + plog.Errorf("failed to generate prefix of internally used token") + } return ctx } ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix) @@ -1130,12 +1368,19 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context { token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision()) if err != nil { // this must not happen - plog.Errorf("failed to assign token for lease revoking: %s", err) + if as.lg != nil { + as.lg.Warn( + "failed to assign token for lease revoking", + zap.Error(err), + ) + } else { + plog.Errorf("failed to assign token for lease revoking: %s", err) + } return ctx } mdMap := map[string]string{ - "token": token, + rpctypes.TokenFieldNameGRPC: token, } tokenMD := metadata.New(mdMap) @@ -1146,11 +1391,19 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context { func (as *authStore) HasRole(user, role string) bool { tx := as.be.BatchTx() tx.Lock() - u := getUser(tx, user) + u := getUser(as.lg, tx, user) tx.Unlock() if u == nil { - plog.Warningf("tried to check user %s has role %s, but user %s doesn't exist", user, role, user) + if as.lg != nil { + as.lg.Warn( + "'has-role' requested for non-existing user", + zap.String("user-name", user), + zap.String("role-name", role), + ) + } else { + plog.Warningf("tried to check user %s has role %s, but user %s doesn't exist", user, role, user) + } return false } @@ -1159,22 +1412,9 @@ func (as *authStore) HasRole(user, role string) bool { return true } } - return false } -func (as *authStore) saveConsistentIndex(tx backend.BatchTx) { - if as.syncConsistentIndex != nil { - as.syncConsistentIndex(tx) - } else { - plog.Errorf("failed to save consistentIndex,syncConsistentIndex is nil") - } -} - -func (as *authStore) setupMetricsReporter() { - reportCurrentAuthRevMu.Lock() - reportCurrentAuthRev = func() float64 { - return float64(as.Revision()) - } - reportCurrentAuthRevMu.Unlock() +func (as *authStore) BcryptCost() int { + return as.bcryptCost } diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/go.etcd.io/etcd/client/README.md similarity index 88% rename from vendor/github.com/coreos/etcd/client/README.md rename to vendor/go.etcd.io/etcd/client/README.md index 2be731ede..521d6c012 100644 --- a/vendor/github.com/coreos/etcd/client/README.md +++ b/vendor/go.etcd.io/etcd/client/README.md @@ -2,19 +2,14 @@ etcd/client is the Go client library for etcd. -[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) +[![GoDoc](https://godoc.org/go.etcd.io/etcd/client?status.png)](https://godoc.org/go.etcd.io/etcd/client) -etcd uses `cmd/vendor` directory to store external dependencies, which are -to be compiled into etcd release binaries. `client` can be imported without -vendoring. For full compatibility, it is recommended to vendor builds using -etcd's vendored packages, using tools like godep, as in -[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). -For more detail, please read [Go vendor design](https://golang.org/s/go15vendor). +For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). ## Install ```bash -go get github.com/coreos/etcd/client +go get go.etcd.io/etcd/client ``` ## Usage @@ -27,7 +22,7 @@ import ( "time" "context" - "github.com/coreos/etcd/client" + "go.etcd.io/etcd/client" ) func main() { diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/go.etcd.io/etcd/client/auth_role.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/auth_role.go rename to vendor/go.etcd.io/etcd/client/auth_role.go diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/go.etcd.io/etcd/client/auth_user.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/auth_user.go rename to vendor/go.etcd.io/etcd/client/auth_user.go diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/go.etcd.io/etcd/client/cancelreq.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/cancelreq.go rename to vendor/go.etcd.io/etcd/client/cancelreq.go diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/go.etcd.io/etcd/client/client.go similarity index 99% rename from vendor/github.com/coreos/etcd/client/client.go rename to vendor/go.etcd.io/etcd/client/client.go index e68745056..de9ab798e 100644 --- a/vendor/github.com/coreos/etcd/client/client.go +++ b/vendor/go.etcd.io/etcd/client/client.go @@ -29,7 +29,7 @@ import ( "sync" "time" - "github.com/coreos/etcd/version" + "go.etcd.io/etcd/version" ) var ( @@ -640,11 +640,11 @@ func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (* if resp.StatusCode/100 == 3 { hdr := resp.Header.Get("Location") if hdr == "" { - return nil, nil, fmt.Errorf("Location header not set") + return nil, nil, fmt.Errorf("location header not set") } loc, err := url.Parse(hdr) if err != nil { - return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) + return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr) } next = &redirectedHTTPAction{ action: act, diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/go.etcd.io/etcd/client/cluster_error.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/cluster_error.go rename to vendor/go.etcd.io/etcd/client/cluster_error.go diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/go.etcd.io/etcd/client/curl.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/curl.go rename to vendor/go.etcd.io/etcd/client/curl.go diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/go.etcd.io/etcd/client/discover.go similarity index 80% rename from vendor/github.com/coreos/etcd/client/discover.go rename to vendor/go.etcd.io/etcd/client/discover.go index 442e35fe5..580c25626 100644 --- a/vendor/github.com/coreos/etcd/client/discover.go +++ b/vendor/go.etcd.io/etcd/client/discover.go @@ -15,13 +15,13 @@ package client import ( - "github.com/coreos/etcd/pkg/srv" + "go.etcd.io/etcd/pkg/srv" ) // Discoverer is an interface that wraps the Discover method. type Discoverer interface { // Discover looks up the etcd servers for the domain. - Discover(domain string) ([]string, error) + Discover(domain string, serviceName string) ([]string, error) } type srvDiscover struct{} @@ -31,8 +31,8 @@ func NewSRVDiscover() Discoverer { return &srvDiscover{} } -func (d *srvDiscover) Discover(domain string) ([]string, error) { - srvs, err := srv.GetClient("etcd-client", domain) +func (d *srvDiscover) Discover(domain string, serviceName string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain, serviceName) if err != nil { return nil, err } diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/go.etcd.io/etcd/client/doc.go similarity index 98% rename from vendor/github.com/coreos/etcd/client/doc.go rename to vendor/go.etcd.io/etcd/client/doc.go index ad4eca4e1..abe5199c3 100644 --- a/vendor/github.com/coreos/etcd/client/doc.go +++ b/vendor/go.etcd.io/etcd/client/doc.go @@ -21,7 +21,7 @@ Create a Config and exchange it for a Client: "net/http" "context" - "github.com/coreos/etcd/client" + "go.etcd.io/etcd/client" ) cfg := client.Config{ diff --git a/vendor/github.com/coreos/etcd/client/json.go b/vendor/go.etcd.io/etcd/client/json.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/json.go rename to vendor/go.etcd.io/etcd/client/json.go diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/go.etcd.io/etcd/client/keys.go similarity index 99% rename from vendor/github.com/coreos/etcd/client/keys.go rename to vendor/go.etcd.io/etcd/client/keys.go index f8f2c7b18..ec53830c7 100644 --- a/vendor/github.com/coreos/etcd/client/keys.go +++ b/vendor/go.etcd.io/etcd/client/keys.go @@ -19,13 +19,12 @@ import ( "encoding/json" "errors" "fmt" + "go.etcd.io/etcd/pkg/pathutil" "net/http" "net/url" "strconv" "strings" "time" - - "github.com/coreos/etcd/pkg/pathutil" ) const ( @@ -63,7 +62,7 @@ func (e Error) Error() string { } var ( - ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") + ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint") ErrEmptyBody = errors.New("client: response body is empty") ) diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/go.etcd.io/etcd/client/members.go similarity index 99% rename from vendor/github.com/coreos/etcd/client/members.go rename to vendor/go.etcd.io/etcd/client/members.go index aafa3d1b8..657131ab0 100644 --- a/vendor/github.com/coreos/etcd/client/members.go +++ b/vendor/go.etcd.io/etcd/client/members.go @@ -23,7 +23,7 @@ import ( "net/url" "path" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/pkg/types" ) var ( diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/go.etcd.io/etcd/client/util.go similarity index 100% rename from vendor/github.com/coreos/etcd/client/util.go rename to vendor/go.etcd.io/etcd/client/util.go diff --git a/vendor/go.etcd.io/etcd/clientv3/README.md b/vendor/go.etcd.io/etcd/clientv3/README.md new file mode 100644 index 000000000..6c6fe7c67 --- /dev/null +++ b/vendor/go.etcd.io/etcd/clientv3/README.md @@ -0,0 +1,85 @@ +# etcd/clientv3 + +[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3) + +`etcd/clientv3` is the official Go etcd client for v3. + +## Install + +```bash +go get go.etcd.io/etcd/clientv3 +``` + +## Get started + +Create client using `clientv3.New`: + +```go +cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, +}) +if err != nil { + // handle error! +} +defer cli.Close() +``` + +etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses +[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. +If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, +pass `context.WithTimeout` to APIs: + +```go +ctx, cancel := context.WithTimeout(context.Background(), timeout) +resp, err := cli.Put(ctx, "sample_key", "sample_value") +cancel() +if err != nil { + // handle error! +} +// use the response +``` + +For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). + +## Error Handling + +etcd client returns 2 types of errors: + +1. context error: canceled or deadline exceeded. +2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes). + +Here is the example code to handle client errors: + +```go +resp, err := cli.Put(ctx, "", "") +if err != nil { + switch err { + case context.Canceled: + log.Fatalf("ctx is canceled by another routine: %v", err) + case context.DeadlineExceeded: + log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) + case rpctypes.ErrEmptyKey: + log.Fatalf("client-side error: %v", err) + default: + log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) + } +} +``` + +## Metrics + +The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/master/clientv3/example_metrics_test.go). + +## Namespacing + +The [namespace](https://godoc.org/go.etcd.io/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + +## Request size limit + +Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`. + +## Examples + +More code examples can be found at [GoDoc](https://godoc.org/go.etcd.io/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/go.etcd.io/etcd/clientv3/auth.go similarity index 74% rename from vendor/github.com/coreos/etcd/clientv3/auth.go rename to vendor/go.etcd.io/etcd/clientv3/auth.go index edccf1a8c..c954f1bf4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/vendor/go.etcd.io/etcd/clientv3/auth.go @@ -19,9 +19,8 @@ import ( "fmt" "strings" - "github.com/coreos/etcd/auth/authpb" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - + "go.etcd.io/etcd/auth/authpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) @@ -53,6 +52,8 @@ const ( PermReadWrite = authpb.READWRITE ) +type UserAddOptions authpb.UserAddOptions + type Auth interface { // AuthEnable enables auth of an etcd cluster. AuthEnable(ctx context.Context) (*AuthEnableResponse, error) @@ -63,6 +64,9 @@ type Auth interface { // UserAdd adds a new user to an etcd cluster. UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) + // UserAddWithOptions adds a new user to an etcd cluster with some options. + UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error) + // UserDelete deletes a user from an etcd cluster. UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) @@ -100,70 +104,75 @@ type Auth interface { RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) } -type auth struct { +type authClient struct { remote pb.AuthClient callOpts []grpc.CallOption } func NewAuth(c *Client) Auth { - api := &auth{remote: RetryAuthClient(c)} + api := &authClient{remote: RetryAuthClient(c)} if c != nil { api.callOpts = c.callOpts } return api } -func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { +func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) return (*AuthEnableResponse)(resp), toErr(ctx, err) } -func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { +func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) return (*AuthDisableResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...) +func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) return (*AuthUserAddResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { +func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { +func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { +func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { +func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) return (*AuthUserGetResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { +func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) return (*AuthUserListResponse)(resp), toErr(ctx, err) } -func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { +func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { +func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) return (*AuthRoleAddResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { +func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { perm := &authpb.Permission{ Key: []byte(key), RangeEnd: []byte(rangeEnd), @@ -173,22 +182,22 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { +func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) return (*AuthRoleGetResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { +func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) return (*AuthRoleListResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...) +func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) } -func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { +func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go similarity index 98% rename from vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go rename to vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go index 9306385e9..d02a7eec7 100644 --- a/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go +++ b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go @@ -20,8 +20,8 @@ import ( "sync" "time" - "github.com/coreos/etcd/clientv3/balancer/connectivity" - "github.com/coreos/etcd/clientv3/balancer/picker" + "go.etcd.io/etcd/clientv3/balancer/connectivity" + "go.etcd.io/etcd/clientv3/balancer/picker" "go.uber.org/zap" "google.golang.org/grpc/balancer" diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go rename to vendor/go.etcd.io/etcd/clientv3/balancer/connectivity/connectivity.go diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go rename to vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go rename to vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go rename to vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go rename to vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go rename to vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/balancer/utils.go rename to vendor/go.etcd.io/etcd/clientv3/balancer/utils.go diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/go.etcd.io/etcd/clientv3/client.go similarity index 96% rename from vendor/github.com/coreos/etcd/clientv3/client.go rename to vendor/go.etcd.io/etcd/clientv3/client.go index c49e4ba12..215e05479 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/go.etcd.io/etcd/clientv3/client.go @@ -25,19 +25,19 @@ import ( "sync" "time" - "github.com/coreos/etcd/clientv3/balancer" - "github.com/coreos/etcd/clientv3/balancer/picker" - "github.com/coreos/etcd/clientv3/balancer/resolver/endpoint" - "github.com/coreos/etcd/clientv3/credentials" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - "github.com/coreos/etcd/pkg/logutil" - "github.com/coreos/pkg/capnslog" "github.com/google/uuid" + "go.etcd.io/etcd/clientv3/balancer" + "go.etcd.io/etcd/clientv3/balancer/picker" + "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" + "go.etcd.io/etcd/clientv3/credentials" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/pkg/logutil" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" grpccredentials "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -48,10 +48,6 @@ var ( roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String()) ) -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3") -) - func init() { lg := zap.NewNop() if os.Getenv("ETCD_CLIENT_DEBUG") != "" { @@ -133,8 +129,12 @@ func NewFromURLs(urls []string) (*Client, error) { // Close shuts down the client's etcd connections. func (c *Client) Close() error { c.cancel() - c.Watcher.Close() - c.Lease.Close() + if c.Watcher != nil { + c.Watcher.Close() + } + if c.Lease != nil { + c.Lease.Close() + } if c.resolverGroup != nil { c.resolverGroup.Close() } @@ -397,6 +397,13 @@ func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCrede return creds } +// WithRequireLeader requires client requests to only succeed +// when the cluster has a leader. +func WithRequireLeader(ctx context.Context) context.Context { + md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) + return metadata.NewOutgoingContext(ctx, md) +} + func newClient(cfg *Config) (*Client, error) { if cfg == nil { cfg = &Config{} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/go.etcd.io/etcd/clientv3/cluster.go similarity index 69% rename from vendor/github.com/coreos/etcd/clientv3/cluster.go rename to vendor/go.etcd.io/etcd/clientv3/cluster.go index 785672be8..ce97e5c85 100644 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ b/vendor/go.etcd.io/etcd/clientv3/cluster.go @@ -17,18 +17,19 @@ package clientv3 import ( "context" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/types" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/types" "google.golang.org/grpc" ) type ( - Member pb.Member - MemberListResponse pb.MemberListResponse - MemberAddResponse pb.MemberAddResponse - MemberRemoveResponse pb.MemberRemoveResponse - MemberUpdateResponse pb.MemberUpdateResponse + Member pb.Member + MemberListResponse pb.MemberListResponse + MemberAddResponse pb.MemberAddResponse + MemberRemoveResponse pb.MemberRemoveResponse + MemberUpdateResponse pb.MemberUpdateResponse + MemberPromoteResponse pb.MemberPromoteResponse ) type Cluster interface { @@ -38,11 +39,17 @@ type Cluster interface { // MemberAdd adds a new member into the cluster. MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + // MemberAddAsLearner adds a new learner member into the cluster. + MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + // MemberRemove removes an existing member from the cluster. MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) // MemberUpdate updates the peer addresses of the member. MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) } type cluster struct { @@ -67,12 +74,23 @@ func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { } func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, false) +} + +func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, true) +} + +func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) { // fail-fast before panic in rafthttp if _, err := types.NewURLs(peerAddrs); err != nil { return nil, err } - r := &pb.MemberAddRequest{PeerURLs: peerAddrs} + r := &pb.MemberAddRequest{ + PeerURLs: peerAddrs, + IsLearner: isLearner, + } resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) if err != nil { return nil, toErr(ctx, err) @@ -112,3 +130,12 @@ func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { } return nil, toErr(ctx, err) } + +func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { + r := &pb.MemberPromoteRequest{ID: id} + resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberPromoteResponse)(resp), nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/go.etcd.io/etcd/clientv3/compact_op.go similarity index 96% rename from vendor/github.com/coreos/etcd/clientv3/compact_op.go rename to vendor/go.etcd.io/etcd/clientv3/compact_op.go index 41e80c1da..5779713d3 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ b/vendor/go.etcd.io/etcd/clientv3/compact_op.go @@ -15,7 +15,7 @@ package clientv3 import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" ) // CompactOp represents a compact operation. diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/go.etcd.io/etcd/clientv3/compare.go similarity index 98% rename from vendor/github.com/coreos/etcd/clientv3/compare.go rename to vendor/go.etcd.io/etcd/clientv3/compare.go index b5f0a2552..01ed68e94 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ b/vendor/go.etcd.io/etcd/clientv3/compare.go @@ -15,7 +15,7 @@ package clientv3 import ( - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" ) type CompareTarget int diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go rename to vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go similarity index 90% rename from vendor/github.com/coreos/etcd/clientv3/concurrency/election.go rename to vendor/go.etcd.io/etcd/clientv3/concurrency/election.go index 20160472a..2521db6ac 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go +++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go @@ -19,9 +19,9 @@ import ( "errors" "fmt" - v3 "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/mvccpb" + v3 "go.etcd.io/etcd/clientv3" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc/mvccpb" ) var ( @@ -48,16 +48,24 @@ func NewElection(s *Session, pfx string) *Election { // ResumeElection initializes an election with a known leader. func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { return &Election{ - session: s, keyPrefix: pfx, + session: s, leaderKey: leaderKey, leaderRev: leaderRev, leaderSession: s, } } -// Campaign puts a value as eligible for the election. It blocks until -// it is elected, an error occurs, or the context is cancelled. +// Campaign puts a value as eligible for the election on the prefix +// key. +// Multiple sessions can participate in the election for the +// same prefix, but only one can be the leader at a time. +// +// If the context is 'context.TODO()/context.Background()', the Campaign +// will continue to be blocked for other keys to be deleted, unless server +// returns a non-recoverable error (e.g. ErrCompacted). +// Otherwise, until the context is not cancelled or timed-out, Campaign will +// continue to be blocked until it becomes the leader. func (e *Election) Campaign(ctx context.Context, val string) error { s := e.session client := e.session.Client() diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go similarity index 93% rename from vendor/github.com/coreos/etcd/clientv3/concurrency/key.go rename to vendor/go.etcd.io/etcd/clientv3/concurrency/key.go index 4b6e399bd..e4cf77517 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go +++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go @@ -18,9 +18,9 @@ import ( "context" "fmt" - v3 "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/mvccpb" + v3 "go.etcd.io/etcd/clientv3" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc/mvccpb" ) func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go similarity index 97% rename from vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go rename to vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go index 77b3582cd..013534193 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go +++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go @@ -19,8 +19,8 @@ import ( "fmt" "sync" - v3 "github.com/coreos/etcd/clientv3" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + v3 "go.etcd.io/etcd/clientv3" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" ) // Mutex implements the sync Locker interface with etcd diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go similarity index 98% rename from vendor/github.com/coreos/etcd/clientv3/concurrency/session.go rename to vendor/go.etcd.io/etcd/clientv3/concurrency/session.go index c399d64a6..97eb76310 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go +++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go @@ -18,7 +18,7 @@ import ( "context" "time" - v3 "github.com/coreos/etcd/clientv3" + v3 "go.etcd.io/etcd/clientv3" ) const defaultSessionTTL = 60 @@ -47,7 +47,7 @@ func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { if err != nil { return nil, err } - id = v3.LeaseID(resp.ID) + id = resp.ID } ctx, cancel := context.WithCancel(ops.ctx) diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go similarity index 99% rename from vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go rename to vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go index d11023ebe..ee1151079 100644 --- a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go +++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go @@ -18,7 +18,7 @@ import ( "context" "math" - v3 "github.com/coreos/etcd/clientv3" + v3 "go.etcd.io/etcd/clientv3" ) // STM is an interface for software transactional memory. diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/go.etcd.io/etcd/clientv3/config.go similarity index 98% rename from vendor/github.com/coreos/etcd/clientv3/config.go rename to vendor/go.etcd.io/etcd/clientv3/config.go index 9c17fc252..11d447d57 100644 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ b/vendor/go.etcd.io/etcd/clientv3/config.go @@ -72,15 +72,17 @@ type Config struct { // Without this, Dial returns immediately and connecting the server happens in background. DialOptions []grpc.DialOption + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context + // LogConfig configures client-side logger. // If nil, use the default logger. // TODO: configure gRPC logger LogConfig *zap.Config - // Context is the default client context; it can be used to cancel grpc dial out and - // other operations that do not have an explicit context. - Context context.Context - // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs). PermitWithoutStream bool `json:"permit-without-stream"` + + // TODO: support custom balancer picker } diff --git a/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go b/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go similarity index 97% rename from vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go rename to vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go index 2dc201292..63389c08b 100644 --- a/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go +++ b/vendor/go.etcd.io/etcd/clientv3/credentials/credentials.go @@ -22,8 +22,8 @@ import ( "net" "sync" - "github.com/coreos/etcd/clientv3/balancer/resolver/endpoint" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" grpccredentials "google.golang.org/grpc/credentials" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/go.etcd.io/etcd/clientv3/doc.go similarity index 82% rename from vendor/github.com/coreos/etcd/clientv3/doc.go rename to vendor/go.etcd.io/etcd/clientv3/doc.go index 717fbe435..913cd2825 100644 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ b/vendor/go.etcd.io/etcd/clientv3/doc.go @@ -19,7 +19,7 @@ // // expect dial time-out on ipv4 blackhole // _, err := clientv3.New(clientv3.Config{ // Endpoints: []string{"http://254.0.0.1:12345"}, -// DialTimeout: 2 * time.Second +// DialTimeout: 2 * time.Second, // }) // // // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3 @@ -61,7 +61,7 @@ // // 1. context error: canceled or deadline exceeded. // 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded. -// 3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go +// 3. gRPC error: see https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go // // Here is the example code to handle client errors: // @@ -71,14 +71,14 @@ // // ctx is canceled by another routine // } else if err == context.DeadlineExceeded { // // ctx is attached with a deadline and it exceeded +// } else if err == rpctypes.ErrEmptyKey { +// // client-side error: key is not provided // } else if ev, ok := status.FromError(err); ok { // code := ev.Code() // if code == codes.DeadlineExceeded { // // server-side context might have timed-out first (due to clock skew) // // while original client-side context is not timed-out yet // } -// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok { -// // process (verr.Errors) // } else { // // bad cluster endpoints, which are not etcd servers // } @@ -87,11 +87,20 @@ // go func() { cli.Close() }() // _, err := kvc.Get(ctx, "a") // if err != nil { +// // with etcd clientv3 <= v3.3 // if err == context.Canceled { // // grpc balancer calls 'Get' with an inflight client.Close -// } else if err == grpc.ErrClientConnClosing { +// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x // // grpc balancer calls 'Get' after client.Close. // } +// // with etcd clientv3 >= v3.4 +// if clientv3.IsConnCanceled(err) { +// // gRPC client connection is closed +// } // } // +// The grpc load balancer is registered statically and is shared across etcd clients. +// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment +// variable. E.g. "ETCD_CLIENT_DEBUG=1". +// package clientv3 diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/go.etcd.io/etcd/clientv3/kv.go similarity index 99% rename from vendor/github.com/coreos/etcd/clientv3/kv.go rename to vendor/go.etcd.io/etcd/clientv3/kv.go index 5a7469bd4..2b7864ad8 100644 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/vendor/go.etcd.io/etcd/clientv3/kv.go @@ -17,7 +17,7 @@ package clientv3 import ( "context" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/go.etcd.io/etcd/clientv3/lease.go similarity index 92% rename from vendor/github.com/coreos/etcd/clientv3/lease.go rename to vendor/go.etcd.io/etcd/clientv3/lease.go index 3729cf37b..c2796fc96 100644 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ b/vendor/go.etcd.io/etcd/clientv3/lease.go @@ -19,9 +19,10 @@ import ( "sync" "time" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) @@ -117,22 +118,21 @@ type Lease interface { // Leases retrieves all leases. Leases(ctx context.Context) (*LeaseLeasesResponse, error) - // KeepAlive keeps the given lease alive forever. If the keepalive response - // posted to the channel is not consumed immediately, the lease client will - // continue sending keep alive requests to the etcd server at least every - // second until latest response is consumed. + // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted + // to the channel are not consumed promptly the channel may become full. When full, the lease + // client will continue sending keep alive requests to the etcd server, but will drop responses + // until there is capacity on the channel to send more responses. + // + // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or + // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error + // containing the error reason. // // The returned "LeaseKeepAliveResponse" channel closes if underlying keep // alive stream is interrupted in some way the client cannot handle itself; - // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse" - // from this closed channel is nil. - // - // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: - // no leader") or canceled by the caller (e.g. context.Canceled), the error - // is returned. Otherwise, it retries. + // given context "ctx" is canceled or timed out. // // TODO(v4.0): post errors to last keep alive message before closing - // (see https://github.com/coreos/etcd/pull/7866) + // (see https://github.com/etcd-io/etcd/pull/7866) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) // KeepAliveOnce renews the lease once. The response corresponds to the @@ -172,6 +172,8 @@ type lessor struct { firstKeepAliveOnce sync.Once callOpts []grpc.CallOption + + lg *zap.Logger } // keepAlive multiplexes a keepalive for a lease over multiple channels @@ -196,6 +198,7 @@ func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout keepAlives: make(map[LeaseID]*keepAlive), remote: remote, firstKeepAliveTimeout: keepAliveTimeout, + lg: c.lg, } if l.firstKeepAliveTimeout == time.Second { l.firstKeepAliveTimeout = defaultTTL @@ -291,7 +294,7 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl } l.mu.Unlock() - go l.keepAliveCtxCloser(id, ctx, ka.donec) + go l.keepAliveCtxCloser(ctx, id, ka.donec) l.firstKeepAliveOnce.Do(func() { go l.recvKeepAliveLoop() go l.deadlineLoop() @@ -323,7 +326,7 @@ func (l *lessor) Close() error { return nil } -func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) { +func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) { select { case <-donec: return @@ -459,7 +462,6 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { select { case <-time.After(retryConnWait): - continue case <-l.stopCtx.Done(): return l.stopCtx.Err() } @@ -469,7 +471,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { // resetRecv opens a new lease stream and starts sending keep alive requests. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...) + stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...) if err != nil { cancel() return nil, err @@ -518,6 +520,12 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { select { case ch <- karesp: default: + if l.lg != nil { + l.lg.Warn("lease keepalive response queue is full; dropping response send", + zap.Int("queue-size", len(ch)), + zap.Int("queue-capacity", cap(ch)), + ) + } } // still advance in order to rate-limit keep-alive sends ka.nextKeepAlive = nextKeepAlive @@ -569,7 +577,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { } select { - case <-time.After(500 * time.Millisecond): + case <-time.After(retryConnWait): case <-stream.Context().Done(): return case <-l.donec: diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/go.etcd.io/etcd/clientv3/logger.go similarity index 98% rename from vendor/github.com/coreos/etcd/clientv3/logger.go rename to vendor/go.etcd.io/etcd/clientv3/logger.go index 3276372ad..f5ae0109d 100644 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ b/vendor/go.etcd.io/etcd/clientv3/logger.go @@ -18,7 +18,7 @@ import ( "io/ioutil" "sync" - "github.com/coreos/etcd/pkg/logutil" + "go.etcd.io/etcd/pkg/logutil" "google.golang.org/grpc/grpclog" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/go.etcd.io/etcd/clientv3/maintenance.go similarity index 93% rename from vendor/github.com/coreos/etcd/clientv3/maintenance.go rename to vendor/go.etcd.io/etcd/clientv3/maintenance.go index 5e87cf8f3..744455a3b 100644 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ b/vendor/go.etcd.io/etcd/clientv3/maintenance.go @@ -19,7 +19,7 @@ import ( "fmt" "io" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) @@ -193,32 +193,23 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { return nil, toErr(ctx, err) } - plog.Info("opened snapshot stream; downloading") pr, pw := io.Pipe() go func() { for { resp, err := ss.Recv() if err != nil { - switch err { - case io.EOF: - plog.Info("completed snapshot read; closing") - default: - plog.Warningf("failed to receive from snapshot stream; closing (%v)", err) - } pw.CloseWithError(err) return } - - // can "resp == nil && err == nil" - // before we receive snapshot SHA digest? - // No, server sends EOF with an empty response - // after it sends SHA digest at the end - + if resp == nil && err == nil { + break + } if _, werr := pw.Write(resp.Blob); werr != nil { pw.CloseWithError(werr) return } } + pw.Close() }() return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/go.etcd.io/etcd/clientv3/op.go similarity index 90% rename from vendor/github.com/coreos/etcd/clientv3/op.go rename to vendor/go.etcd.io/etcd/clientv3/op.go index 3dca41b5f..81ae31fd8 100644 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ b/vendor/go.etcd.io/etcd/clientv3/op.go @@ -14,7 +14,7 @@ package clientv3 -import pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +import pb "go.etcd.io/etcd/etcdserver/etcdserverpb" type opType int @@ -26,9 +26,7 @@ const ( tTxn ) -var ( - noPrefixEnd = []byte{0} -) +var noPrefixEnd = []byte{0} // Op represents an Operation that kv can execute. type Op struct { @@ -83,8 +81,15 @@ type Op struct { // accessors / mutators -func (op Op) IsTxn() bool { return op.t == tTxn } -func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } +// IsTxn returns true if the "Op" type is transaction. +func (op Op) IsTxn() bool { + return op.t == tTxn +} + +// Txn returns the comparison(if) operations, "then" operations, and "else" operations. +func (op Op) Txn() ([]Cmp, []Op, []Op) { + return op.cmps, op.thenOps, op.elseOps +} // KeyBytes returns the byte slice holding the Op's key. func (op Op) KeyBytes() []byte { return op.key } @@ -108,13 +113,13 @@ func (op Op) IsGet() bool { return op.t == tRange } func (op Op) IsDelete() bool { return op.t == tDeleteRange } // IsSerializable returns true if the serializable field is true. -func (op Op) IsSerializable() bool { return op.serializable == true } +func (op Op) IsSerializable() bool { return op.serializable } // IsKeysOnly returns whether keysOnly is set. -func (op Op) IsKeysOnly() bool { return op.keysOnly == true } +func (op Op) IsKeysOnly() bool { return op.keysOnly } // IsCountOnly returns whether countOnly is set. -func (op Op) IsCountOnly() bool { return op.countOnly == true } +func (op Op) IsCountOnly() bool { return op.countOnly } // MinModRev returns the operation's minimum modify revision. func (op Op) MinModRev() int64 { return op.minModRev } @@ -211,13 +216,23 @@ func (op Op) isWrite() bool { return op.t != tRange } +// OpGet returns "get" operation based on given key and operation options. func OpGet(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if isWithPrefix(opts) && isWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } ret := Op{t: tRange, key: []byte(key)} ret.applyOpts(opts) return ret } +// OpDelete returns "delete" operation based on given key and operation options. func OpDelete(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if isWithPrefix(opts) && isWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } ret := Op{t: tDeleteRange, key: []byte(key)} ret.applyOpts(opts) switch { @@ -245,6 +260,7 @@ func OpDelete(key string, opts ...OpOption) Op { return ret } +// OpPut returns "put" operation based on given key-value and operation options. func OpPut(key, val string, opts ...OpOption) Op { ret := Op{t: tPut, key: []byte(key), val: []byte(val)} ret.applyOpts(opts) @@ -273,6 +289,7 @@ func OpPut(key, val string, opts ...OpOption) Op { return ret } +// OpTxn returns "txn" operation based on given transaction conditions. func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} } @@ -383,7 +400,14 @@ func WithRange(endKey string) OpOption { // WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests // to be equal or greater than the key in the argument. -func WithFromKey() OpOption { return WithRange("\x00") } +func WithFromKey() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key = []byte{0} + } + op.end = []byte("\x00") + } +} // WithSerializable makes 'Get' request serializable. By default, // it's linearizable. Serializable requests are better for lower latency @@ -472,6 +496,17 @@ func WithPrevKV() OpOption { } } +// WithFragment to receive raw watch response with fragmentation. +// Fragmentation is disabled by default. If fragmentation is enabled, +// etcd watch server will split watch response before sending to clients +// when the total size of watch events exceed server-side request limit. +// The default server-side request limit is 1.5 MiB, which can be configured +// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes. +// See "etcdserver/api/v3rpc/watch.go" for more details. +func WithFragment() OpOption { + return func(op *Op) { op.fragment = true } +} + // WithIgnoreValue updates the key using its current value. // This option can not be combined with non-empty values. // Returns an error if the key does not exist. @@ -518,13 +553,8 @@ func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLi return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} } -// WithFragment to receive raw watch response with fragmentation. -// Fragmentation is disabled by default. If fragmentation is enabled, -// etcd watch server will split watch response before sending to clients -// when the total size of watch events exceed server-side request limit. -// The default server-side request limit is 1.5 MiB, which can be configured -// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes. -// See "etcdserver/api/v3rpc/watch.go" for more details. -func WithFragment() OpOption { - return func(op *Op) { op.fragment = true } -} +// isWithPrefix returns true if WithPrefix is being called in the op +func isWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) } + +// isWithFromKey returns true if WithFromKey is being called in the op +func isWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) } diff --git a/vendor/github.com/coreos/etcd/clientv3/options.go b/vendor/go.etcd.io/etcd/clientv3/options.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/options.go rename to vendor/go.etcd.io/etcd/clientv3/options.go diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/go.etcd.io/etcd/clientv3/retry.go similarity index 97% rename from vendor/github.com/coreos/etcd/clientv3/retry.go rename to vendor/go.etcd.io/etcd/clientv3/retry.go index 6baa52e14..7e855de06 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/vendor/go.etcd.io/etcd/clientv3/retry.go @@ -17,8 +17,8 @@ package clientv3 import ( "context" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -179,6 +179,10 @@ func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUp return rcc.cc.MemberUpdate(ctx, in, opts...) } +func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) { + return rcc.cc.MemberPromote(ctx, in, opts...) +} + type retryMaintenanceClient struct { mc pb.MaintenanceClient } diff --git a/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go similarity index 96% rename from vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go rename to vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go index f3c505706..080490ae2 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go +++ b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -38,7 +38,6 @@ import ( func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor { intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - ctx = withVersion(ctx) grpcOpts, retryOpts := filterCallOptions(opts) callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) // short circuit for simplicity, and avoiding allocations. @@ -104,7 +103,6 @@ func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOpt func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor { intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - ctx = withVersion(ctx) grpcOpts, retryOpts := filterCallOptions(opts) callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) // short circuit for simplicity, and avoiding allocations. @@ -115,9 +113,10 @@ func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOp return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()") } newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...) + logger.Warn("retry stream intercept", zap.Error(err)) if err != nil { - logger.Error("streamer failed to create ClientStream", zap.Error(err)) - return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable? + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + return nil, err } retryingStreamer := &serverStreamingRetryingStream{ client: c, @@ -186,7 +185,6 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { if !attemptRetry { return lastErr // success or hard failure } - // We start off from attempt 1, because zeroth was already made on normal SendMsg(). for attempt := uint(1); attempt < s.callOpts.max; attempt++ { if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil { @@ -194,13 +192,12 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { } newStream, err := s.reestablishStreamAndResendBuffer(s.ctx) if err != nil { - s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err)) - return err // TODO(mwitkow): Maybe dial and transport errors should be retriable? + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + return err } s.setStream(newStream) - - s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr)) attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) + //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr) if !attemptRetry { return lastErr } diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/go.etcd.io/etcd/clientv3/sort.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/sort.go rename to vendor/go.etcd.io/etcd/clientv3/sort.go diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/go.etcd.io/etcd/clientv3/txn.go similarity index 98% rename from vendor/github.com/coreos/etcd/clientv3/txn.go rename to vendor/go.etcd.io/etcd/clientv3/txn.go index c3c2d2485..c19715da4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ b/vendor/go.etcd.io/etcd/clientv3/txn.go @@ -18,7 +18,7 @@ import ( "context" "sync" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) diff --git a/vendor/github.com/coreos/etcd/clientv3/utils.go b/vendor/go.etcd.io/etcd/clientv3/utils.go similarity index 100% rename from vendor/github.com/coreos/etcd/clientv3/utils.go rename to vendor/go.etcd.io/etcd/clientv3/utils.go diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/go.etcd.io/etcd/clientv3/watch.go similarity index 99% rename from vendor/github.com/coreos/etcd/clientv3/watch.go rename to vendor/go.etcd.io/etcd/clientv3/watch.go index 4a3b8cccd..87d222d1d 100644 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/vendor/go.etcd.io/etcd/clientv3/watch.go @@ -21,9 +21,9 @@ import ( "sync" "time" - v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + v3rpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + mvccpb "go.etcd.io/etcd/mvcc/mvccpb" "google.golang.org/grpc" "google.golang.org/grpc/codes" diff --git a/vendor/github.com/coreos/etcd/embed/config.go b/vendor/go.etcd.io/etcd/embed/config.go similarity index 57% rename from vendor/github.com/coreos/etcd/embed/config.go rename to vendor/go.etcd.io/etcd/embed/config.go index a48de2948..2f64d927f 100644 --- a/vendor/github.com/coreos/etcd/embed/config.go +++ b/vendor/go.etcd.io/etcd/embed/config.go @@ -24,20 +24,24 @@ import ( "os" "path/filepath" "strings" + "sync" "time" - "github.com/coreos/etcd/compactor" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/pkg/cors" - "github.com/coreos/etcd/pkg/netutil" - "github.com/coreos/etcd/pkg/srv" - "github.com/coreos/etcd/pkg/tlsutil" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - - "github.com/coreos/pkg/capnslog" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/v3compactor" + "go.etcd.io/etcd/pkg/flags" + "go.etcd.io/etcd/pkg/logutil" + "go.etcd.io/etcd/pkg/netutil" + "go.etcd.io/etcd/pkg/srv" + "go.etcd.io/etcd/pkg/tlsutil" + "go.etcd.io/etcd/pkg/transport" + "go.etcd.io/etcd/pkg/types" + + bolt "go.etcd.io/bbolt" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/crypto/bcrypt" "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" "sigs.k8s.io/yaml" ) @@ -58,18 +62,22 @@ const ( DefaultListenClientURLs = "http://localhost:2379" DefaultLogOutput = "default" + JournalLogOutput = "systemd/journal" + StdErrLogOutput = "stderr" + StdOutLogOutput = "stdout" // DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag. // It's enabled by default. DefaultStrictReconfigCheck = true // DefaultEnableV2 is the default value for "--enable-v2" flag. - // v2 is enabled by default. - // TODO: disable v2 when deprecated. - DefaultEnableV2 = true + // v2 API is disabled by default. + DefaultEnableV2 = false // maxElectionMs specifies the maximum value of election timeout. // More details are listed in ../Documentation/tuning.md#time-parameters. maxElectionMs = 50000 + // backend freelist map type + freelistMapType = "map" ) var ( @@ -84,30 +92,46 @@ var ( defaultHostStatus error ) +var ( + // CompactorModePeriodic is periodic compaction mode + // for "Config.AutoCompactionMode" field. + // If "AutoCompactionMode" is CompactorModePeriodic and + // "AutoCompactionRetention" is "1h", it automatically compacts + // compacts storage every hour. + CompactorModePeriodic = v3compactor.ModePeriodic + + // CompactorModeRevision is revision-based compaction mode + // for "Config.AutoCompactionMode" field. + // If "AutoCompactionMode" is CompactorModeRevision and + // "AutoCompactionRetention" is "1000", it compacts log on + // revision 5000 when the current revision is 6000. + // This runs every 5-minute if enough of logs have proceeded. + CompactorModeRevision = v3compactor.ModeRevision +) + func init() { defaultHostname, defaultHostStatus = netutil.GetDefaultHost() } // Config holds the arguments for configuring an etcd server. type Config struct { - // member + Name string `json:"name"` + Dir string `json:"data-dir"` + WalDir string `json:"wal-dir"` - CorsInfo *cors.CORSInfo - LPUrls, LCUrls []url.URL - Dir string `json:"data-dir"` - WalDir string `json:"wal-dir"` - MaxSnapFiles uint `json:"max-snapshots"` - MaxWalFiles uint `json:"max-wals"` - Name string `json:"name"` - SnapCount uint64 `json:"snapshot-count"` + SnapshotCount uint64 `json:"snapshot-count"` - // AutoCompactionMode is either 'periodic' or 'revision'. - AutoCompactionMode string `json:"auto-compaction-mode"` - // AutoCompactionRetention is either duration string with time unit - // (e.g. '5m' for 5-minute), or revision unit (e.g. '5000'). - // If no time unit is provided and compaction mode is 'periodic', - // the unit defaults to hour. For example, '5' translates into 5-hour. - AutoCompactionRetention string `json:"auto-compaction-retention"` + // SnapshotCatchUpEntries is the number of entries for a slow follower + // to catch-up after compacting the raft storage entries. + // We expect the follower has a millisecond level latency with the leader. + // The max throughput is around 10K. Keep a 5K entries is enough for helping + // follower to catch up. + // WARNING: only change this for tests. + // Always use "DefaultSnapshotCatchUpEntries" + SnapshotCatchUpEntries uint64 + + MaxSnapFiles uint `json:"max-snapshots"` + MaxWalFiles uint `json:"max-wals"` // TickMs is the number of milliseconds between heartbeat ticks. // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1). @@ -141,14 +165,46 @@ type Config struct { // // If single-node, it advances ticks regardless. // - // See https://github.com/coreos/etcd/issues/9333 for more detail. + // See https://github.com/etcd-io/etcd/issues/9333 for more detail. InitialElectionTickAdvance bool `json:"initial-election-tick-advance"` + // BackendBatchInterval is the maximum time before commit the backend transaction. + BackendBatchInterval time.Duration `json:"backend-batch-interval"` + // BackendBatchLimit is the maximum operations before commit the backend transaction. + BackendBatchLimit int `json:"backend-batch-limit"` QuotaBackendBytes int64 `json:"quota-backend-bytes"` MaxTxnOps uint `json:"max-txn-ops"` MaxRequestBytes uint `json:"max-request-bytes"` - // gRPC server options + LPUrls, LCUrls []url.URL + APUrls, ACUrls []url.URL + ClientTLSInfo transport.TLSInfo + ClientAutoTLS bool + PeerTLSInfo transport.TLSInfo + PeerAutoTLS bool + + // CipherSuites is a list of supported TLS cipher suites between + // client/server and peers. If empty, Go auto-populates the list. + // Note that cipher suites are prioritized in the given order. + CipherSuites []string `json:"cipher-suites"` + + ClusterState string `json:"initial-cluster-state"` + DNSCluster string `json:"discovery-srv"` + DNSClusterServiceName string `json:"discovery-srv-name"` + Dproxy string `json:"discovery-proxy"` + Durl string `json:"discovery"` + InitialCluster string `json:"initial-cluster"` + InitialClusterToken string `json:"initial-cluster-token"` + StrictReconfigCheck bool `json:"strict-reconfig-check"` + EnableV2 bool `json:"enable-v2"` + + // AutoCompactionMode is either 'periodic' or 'revision'. + AutoCompactionMode string `json:"auto-compaction-mode"` + // AutoCompactionRetention is either duration string with time unit + // (e.g. '5m' for 5-minute), or revision unit (e.g. '5000'). + // If no time unit is provided and compaction mode is 'periodic', + // the unit defaults to hour. For example, '5' translates into 5-hour. + AutoCompactionRetention string `json:"auto-compaction-retention"` // GRPCKeepAliveMinTime is the minimum interval that a client should // wait before pinging server. When client pings "too fast", server @@ -165,42 +221,40 @@ type Config struct { // before closing a non-responsive connection. 0 to disable. GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"` - // clustering - - APUrls, ACUrls []url.URL - ClusterState string `json:"initial-cluster-state"` - DNSCluster string `json:"discovery-srv"` - Dproxy string `json:"discovery-proxy"` - Durl string `json:"discovery"` - InitialCluster string `json:"initial-cluster"` - InitialClusterToken string `json:"initial-cluster-token"` - StrictReconfigCheck bool `json:"strict-reconfig-check"` - EnableV2 bool `json:"enable-v2"` - - // security - - ClientTLSInfo transport.TLSInfo - ClientAutoTLS bool - PeerTLSInfo transport.TLSInfo - PeerAutoTLS bool - - // CipherSuites is a list of supported TLS cipher suites between - // client/server and peers. If empty, Go auto-populates the list. - // Note that cipher suites are prioritized in the given order. - CipherSuites []string `json:"cipher-suites"` - - // debug - - Debug bool `json:"debug"` - LogPkgLevels string `json:"log-package-levels"` - LogOutput string `json:"log-output"` - EnablePprof bool `json:"enable-pprof"` - Metrics string `json:"metrics"` - ListenMetricsUrls []url.URL - ListenMetricsUrlsJSON string `json:"listen-metrics-urls"` - - // ForceNewCluster starts a new cluster even if previously started; unsafe. - ForceNewCluster bool `json:"force-new-cluster"` + // PreVote is true to enable Raft Pre-Vote. + // If enabled, Raft runs an additional election phase + // to check whether it would get enough votes to win + // an election, thus minimizing disruptions. + // TODO: enable by default in 3.5. + PreVote bool `json:"pre-vote"` + + CORS map[string]struct{} + + // HostWhitelist lists acceptable hostnames from HTTP client requests. + // Client origin policy protects against "DNS Rebinding" attacks + // to insecure etcd servers. That is, any website can simply create + // an authorized DNS name, and direct DNS to "localhost" (or any + // other address). Then, all HTTP endpoints of etcd server listening + // on "localhost" becomes accessible, thus vulnerable to DNS rebinding + // attacks. See "CVE-2018-5702" for more detail. + // + // 1. If client connection is secure via HTTPS, allow any hostnames. + // 2. If client connection is not secure and "HostWhitelist" is not empty, + // only allow HTTP requests whose Host field is listed in whitelist. + // + // Note that the client origin policy is enforced whether authentication + // is enabled or not, for tighter controls. + // + // By default, "HostWhitelist" is "*", which allows any hostnames. + // Note that when specifying hostnames, loopback addresses are not added + // automatically. To allow loopback interfaces, leave it empty or set it "*", + // or add them to whitelist manually (e.g. "localhost", "127.0.0.1", etc.). + // + // CVE-2018-5702 reference: + // - https://bugs.chromium.org/p/project-zero/issues/detail?id=1447#c2 + // - https://github.com/transmission/transmission/pull/468 + // - https://github.com/etcd-io/etcd/issues/9353 + HostWhitelist map[string]struct{} // UserHandlers is for registering users handlers and only used for // embedding etcd into other applications. @@ -216,15 +270,71 @@ type Config struct { // embed.StartEtcd(cfg) ServiceRegister func(*grpc.Server) `json:"-"` - // auth - - AuthToken string `json:"auth-token"` - - // Experimental flags + AuthToken string `json:"auth-token"` + BcryptCost uint `json:"bcrypt-cost"` ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"` ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"` ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"` + // ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses (array and map are supported types). + ExperimentalBackendFreelistType string `json:"experimental-backend-bbolt-freelist-type"` + // ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases. + ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"` + ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"` + + // ForceNewCluster starts a new cluster even if previously started; unsafe. + ForceNewCluster bool `json:"force-new-cluster"` + + EnablePprof bool `json:"enable-pprof"` + Metrics string `json:"metrics"` + ListenMetricsUrls []url.URL + ListenMetricsUrlsJSON string `json:"listen-metrics-urls"` + + // Logger is logger options: "zap", "capnslog". + // WARN: "capnslog" is being deprecated in v3.5. + Logger string `json:"logger"` + // LogLevel configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'. + LogLevel string `json:"log-level"` + // LogOutputs is either: + // - "default" as os.Stderr, + // - "stderr" as os.Stderr, + // - "stdout" as os.Stdout, + // - file path to append server logs to. + // It can be multiple when "Logger" is zap. + LogOutputs []string `json:"log-outputs"` + + // ZapLoggerBuilder is used to build the zap logger. + ZapLoggerBuilder func(*Config) error + + // logger logs server-side operations. The default is nil, + // and "setupLogging" must be called before starting server. + // Do not set logger directly. + loggerMu *sync.RWMutex + logger *zap.Logger + + // loggerConfig is server logger configuration for Raft logger. + // Must be either: "loggerConfig != nil" or "loggerCore != nil && loggerWriteSyncer != nil". + loggerConfig *zap.Config + // loggerCore is "zapcore.Core" for raft logger. + // Must be either: "loggerConfig != nil" or "loggerCore != nil && loggerWriteSyncer != nil". + loggerCore zapcore.Core + loggerWriteSyncer zapcore.WriteSyncer + + // EnableGRPCGateway is false to disable grpc gateway. + EnableGRPCGateway bool `json:"enable-grpc-gateway"` + + // TO BE DEPRECATED + + // DeprecatedLogOutput is to be deprecated in v3.5. + // Just here for safe migration in v3.4. + DeprecatedLogOutput []string `json:"log-output"` + // Debug is true, to enable debug level logging. + // WARNING: to be deprecated in 3.5. Use "--log-level=debug" instead. + Debug bool `json:"debug"` + // LogPkgLevels is being deprecated in v3.5. + // Only valid if "logger" option is "capnslog". + // WARN: DO NOT USE THIS! + LogPkgLevels string `json:"log-package-levels"` } // configYAML holds the config suitable for yaml parsing @@ -235,17 +345,19 @@ type configYAML struct { // configJSON has file options that are translated into Config options type configJSON struct { - LPUrlsJSON string `json:"listen-peer-urls"` - LCUrlsJSON string `json:"listen-client-urls"` - CorsJSON string `json:"cors"` - APUrlsJSON string `json:"initial-advertise-peer-urls"` - ACUrlsJSON string `json:"advertise-client-urls"` + LPUrlsJSON string `json:"listen-peer-urls"` + LCUrlsJSON string `json:"listen-client-urls"` + APUrlsJSON string `json:"initial-advertise-peer-urls"` + ACUrlsJSON string `json:"advertise-client-urls"` + + CORSJSON string `json:"cors"` + HostWhitelistJSON string `json:"host-whitelist"` + ClientSecurityJSON securityConfig `json:"client-transport-security"` PeerSecurityJSON securityConfig `json:"peer-transport-security"` } type securityConfig struct { - CAFile string `json:"ca-file"` CertFile string `json:"cert-file"` KeyFile string `json:"key-file"` CertAuth bool `json:"client-cert-auth"` @@ -260,30 +372,53 @@ func NewConfig() *Config { lcurl, _ := url.Parse(DefaultListenClientURLs) acurl, _ := url.Parse(DefaultAdvertiseClientURLs) cfg := &Config{ - CorsInfo: &cors.CORSInfo{}, - MaxSnapFiles: DefaultMaxSnapshots, - MaxWalFiles: DefaultMaxWALs, - Name: DefaultName, - SnapCount: etcdserver.DefaultSnapCount, - MaxTxnOps: DefaultMaxTxnOps, - MaxRequestBytes: DefaultMaxRequestBytes, - GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, - GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, - GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, + MaxSnapFiles: DefaultMaxSnapshots, + MaxWalFiles: DefaultMaxWALs, + + Name: DefaultName, + + SnapshotCount: etcdserver.DefaultSnapshotCount, + SnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries, + + MaxTxnOps: DefaultMaxTxnOps, + MaxRequestBytes: DefaultMaxRequestBytes, + + GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, + GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, + GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, + TickMs: 100, ElectionMs: 1000, InitialElectionTickAdvance: true, - LPUrls: []url.URL{*lpurl}, - LCUrls: []url.URL{*lcurl}, - APUrls: []url.URL{*apurl}, - ACUrls: []url.URL{*acurl}, - ClusterState: ClusterStateFlagNew, - InitialClusterToken: "etcd-cluster", - StrictReconfigCheck: DefaultStrictReconfigCheck, - LogOutput: DefaultLogOutput, - Metrics: "basic", - EnableV2: DefaultEnableV2, - AuthToken: "simple", + + LPUrls: []url.URL{*lpurl}, + LCUrls: []url.URL{*lcurl}, + APUrls: []url.URL{*apurl}, + ACUrls: []url.URL{*acurl}, + + ClusterState: ClusterStateFlagNew, + InitialClusterToken: "etcd-cluster", + + StrictReconfigCheck: DefaultStrictReconfigCheck, + Metrics: "basic", + EnableV2: DefaultEnableV2, + + CORS: map[string]struct{}{"*": {}}, + HostWhitelist: map[string]struct{}{"*": {}}, + + AuthToken: "simple", + BcryptCost: uint(bcrypt.DefaultCost), + + PreVote: false, // TODO: enable by default in v3.5 + + loggerMu: new(sync.RWMutex), + logger: nil, + Logger: "capnslog", + DeprecatedLogOutput: []string{DefaultLogOutput}, + LogOutputs: []string{DefaultLogOutput}, + Debug: false, + LogLevel: logutil.DefaultLogLevel, + LogPkgLevels: "", } cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) return cfg @@ -302,46 +437,6 @@ func logTLSHandshakeFailure(conn *tls.Conn, err error) { } } -// SetupLogging initializes etcd logging. -// Must be called after flag parsing. -func (cfg *Config) SetupLogging() { - cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure - cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure - - capnslog.SetGlobalLogLevel(capnslog.INFO) - if cfg.Debug { - capnslog.SetGlobalLogLevel(capnslog.DEBUG) - grpc.EnableTracing = true - // enable info, warning, error - grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)) - } else { - // only discard info - grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr)) - } - if cfg.LogPkgLevels != "" { - repoLog := capnslog.MustRepoLogger("github.com/coreos/etcd") - settings, err := repoLog.ParseLogLevelConfig(cfg.LogPkgLevels) - if err != nil { - plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error()) - return - } - repoLog.SetLogLevel(settings) - } - - // capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr)) - // where NewDefaultFormatter returns NewJournaldFormatter when syscall.Getppid() == 1 - // specify 'stdout' or 'stderr' to skip journald logging even when running under systemd - switch cfg.LogOutput { - case "stdout": - capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, cfg.Debug)) - case "stderr": - capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stderr, cfg.Debug)) - case DefaultLogOutput: - default: - plog.Panicf(`unknown log-output %q (only supports %q, "stdout", "stderr")`, cfg.LogOutput, DefaultLogOutput) - } -} - func ConfigFromFile(path string) (*Config, error) { cfg := &configYAML{Config: *NewConfig()} if err := cfg.configFromFile(path); err != nil { @@ -366,7 +461,8 @@ func (cfg *configYAML) configFromFile(path string) error { if cfg.LPUrlsJSON != "" { u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ",")) if err != nil { - plog.Fatalf("unexpected error setting up listen-peer-urls: %v", err) + fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err) + os.Exit(1) } cfg.LPUrls = []url.URL(u) } @@ -374,21 +470,17 @@ func (cfg *configYAML) configFromFile(path string) error { if cfg.LCUrlsJSON != "" { u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ",")) if err != nil { - plog.Fatalf("unexpected error setting up listen-client-urls: %v", err) + fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err) + os.Exit(1) } cfg.LCUrls = []url.URL(u) } - if cfg.CorsJSON != "" { - if err := cfg.CorsInfo.Set(cfg.CorsJSON); err != nil { - plog.Panicf("unexpected error setting up cors: %v", err) - } - } - if cfg.APUrlsJSON != "" { u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ",")) if err != nil { - plog.Fatalf("unexpected error setting up initial-advertise-peer-urls: %v", err) + fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err) + os.Exit(1) } cfg.APUrls = []url.URL(u) } @@ -396,7 +488,8 @@ func (cfg *configYAML) configFromFile(path string) error { if cfg.ACUrlsJSON != "" { u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ",")) if err != nil { - plog.Fatalf("unexpected error setting up advertise-peer-urls: %v", err) + fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err) + os.Exit(1) } cfg.ACUrls = []url.URL(u) } @@ -404,11 +497,22 @@ func (cfg *configYAML) configFromFile(path string) error { if cfg.ListenMetricsUrlsJSON != "" { u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ",")) if err != nil { - plog.Fatalf("unexpected error setting up listen-metrics-urls: %v", err) + fmt.Fprintf(os.Stderr, "unexpected error setting up listen-metrics-urls: %v\n", err) + os.Exit(1) } cfg.ListenMetricsUrls = []url.URL(u) } + if cfg.CORSJSON != "" { + uv := flags.NewUniqueURLsWithExceptions(cfg.CORSJSON, "*") + cfg.CORS = uv.Values + } + + if cfg.HostWhitelistJSON != "" { + uv := flags.NewUniqueStringsValue(cfg.HostWhitelistJSON) + cfg.HostWhitelist = uv.Values + } + // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster { cfg.InitialCluster = "" @@ -418,7 +522,6 @@ func (cfg *configYAML) configFromFile(path string) error { } copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) { - tls.CAFile = ysc.CAFile tls.CertFile = ysc.CertFile tls.KeyFile = ysc.KeyFile tls.ClientCertAuth = ysc.CertAuth @@ -452,6 +555,9 @@ func updateCipherSuites(tls *transport.TLSInfo, ss []string) error { // Validate ensures that '*embed.Config' fields are properly configured. func (cfg *Config) Validate() error { + if err := cfg.setupLogging(); err != nil { + return err + } if err := checkBindURLs(cfg.LPUrls); err != nil { return err } @@ -462,22 +568,13 @@ func (cfg *Config) Validate() error { return err } if err := checkHostURLs(cfg.APUrls); err != nil { - // TODO: return err in v3.4 - addrs := make([]string, len(cfg.APUrls)) - for i := range cfg.APUrls { - addrs[i] = cfg.APUrls[i].String() - } - plog.Warningf("advertise-peer-urls %q is deprecated (%v)", strings.Join(addrs, ","), err) + addrs := cfg.getAPURLs() + return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err) } if err := checkHostURLs(cfg.ACUrls); err != nil { - // TODO: return err in v3.4 - addrs := make([]string, len(cfg.ACUrls)) - for i := range cfg.ACUrls { - addrs[i] = cfg.ACUrls[i].String() - } - plog.Warningf("advertise-client-urls %q is deprecated (%v)", strings.Join(addrs, ","), err) + addrs := cfg.getACURLs() + return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err) } - // Check if conflicting flags are passed. nSet := 0 for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} { @@ -514,7 +611,7 @@ func (cfg *Config) Validate() error { switch cfg.AutoCompactionMode { case "": - case compactor.ModeRevision, compactor.ModePeriodic: + case CompactorModeRevision, CompactorModePeriodic: default: return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode) } @@ -532,17 +629,27 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok // self's advertised peer URLs urlsmap[cfg.Name] = cfg.APUrls token = cfg.Durl + case cfg.DNSCluster != "": - clusterStrs, cerr := srv.GetCluster("etcd-server", cfg.Name, cfg.DNSCluster, cfg.APUrls) + clusterStrs, cerr := cfg.GetDNSClusterNames() + lg := cfg.logger if cerr != nil { - plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr) + if lg != nil { + lg.Warn("failed to resolve during SRV discovery", zap.Error(cerr)) + } else { + plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr) + } return nil, "", cerr } for _, s := range clusterStrs { - plog.Noticef("got bootstrap from DNS for etcd-server at %s", s) + if lg != nil { + lg.Info("got bootstrap from DNS for etcd-server", zap.String("node", s)) + } else { + plog.Noticef("got bootstrap from DNS for etcd-server at %s", s) + } } clusterStr := strings.Join(clusterStrs, ",") - if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.CAFile == "" { + if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.TrustedCAFile == "" { cfg.PeerTLSInfo.ServerName = cfg.DNSCluster } urlsmap, err = types.NewURLsMap(clusterStr) @@ -553,6 +660,7 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name) } } + default: // We're statically configured, and cluster has appropriately been set. urlsmap, err = types.NewURLsMap(cfg.InitialCluster) @@ -560,6 +668,58 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok return urlsmap, token, err } +// GetDNSClusterNames uses DNS SRV records to get a list of initial nodes for cluster bootstrapping. +func (cfg *Config) GetDNSClusterNames() ([]string, error) { + var ( + clusterStrs []string + cerr error + serviceNameSuffix string + ) + if cfg.DNSClusterServiceName != "" { + serviceNameSuffix = "-" + cfg.DNSClusterServiceName + } + + lg := cfg.GetLogger() + + // Use both etcd-server-ssl and etcd-server for discovery. + // Combine the results if both are available. + clusterStrs, cerr = srv.GetCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls) + if cerr != nil { + clusterStrs = make([]string, 0) + } + if lg != nil { + lg.Info( + "get cluster for etcd-server-ssl SRV", + zap.String("service-scheme", "https"), + zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix), + zap.String("server-name", cfg.Name), + zap.String("discovery-srv", cfg.DNSCluster), + zap.Strings("advertise-peer-urls", cfg.getAPURLs()), + zap.Strings("found-cluster", clusterStrs), + zap.Error(cerr), + ) + } + + defaultHTTPClusterStrs, httpCerr := srv.GetCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls) + if httpCerr != nil { + clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...) + } + if lg != nil { + lg.Info( + "get cluster for etcd-server SRV", + zap.String("service-scheme", "http"), + zap.String("service-name", "etcd-server"+serviceNameSuffix), + zap.String("server-name", cfg.Name), + zap.String("discovery-srv", cfg.DNSCluster), + zap.Strings("advertise-peer-urls", cfg.getAPURLs()), + zap.Strings("found-cluster", clusterStrs), + zap.Error(httpCerr), + ) + } + + return clusterStrs, cerr +} + func (cfg Config) InitialClusterFromName(name string) (ret string) { if len(cfg.APUrls) == 0 { return "" @@ -590,14 +750,18 @@ func (cfg *Config) ClientSelfCert() (err error) { return nil } if !cfg.ClientTLSInfo.Empty() { - plog.Warningf("ignoring client auto TLS since certs given") + if cfg.logger != nil { + cfg.logger.Warn("ignoring client auto TLS since certs given") + } else { + plog.Warningf("ignoring client auto TLS since certs given") + } return nil } chosts := make([]string, len(cfg.LCUrls)) for i, u := range cfg.LCUrls { chosts[i] = u.Host } - cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts) + cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts) if err != nil { return err } @@ -609,14 +773,18 @@ func (cfg *Config) PeerSelfCert() (err error) { return nil } if !cfg.PeerTLSInfo.Empty() { - plog.Warningf("ignoring peer auto TLS since certs given") + if cfg.logger != nil { + cfg.logger.Warn("ignoring peer auto TLS since certs given") + } else { + plog.Warningf("ignoring peer auto TLS since certs given") + } return nil } phosts := make([]string, len(cfg.LPUrls)) for i, u := range cfg.LPUrls { phosts[i] = u.Host } - cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts) + cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts) if err != nil { return err } @@ -697,3 +865,51 @@ func checkHostURLs(urls []url.URL) error { } return nil } + +func (cfg *Config) getAPURLs() (ss []string) { + ss = make([]string, len(cfg.APUrls)) + for i := range cfg.APUrls { + ss[i] = cfg.APUrls[i].String() + } + return ss +} + +func (cfg *Config) getLPURLs() (ss []string) { + ss = make([]string, len(cfg.LPUrls)) + for i := range cfg.LPUrls { + ss[i] = cfg.LPUrls[i].String() + } + return ss +} + +func (cfg *Config) getACURLs() (ss []string) { + ss = make([]string, len(cfg.ACUrls)) + for i := range cfg.ACUrls { + ss[i] = cfg.ACUrls[i].String() + } + return ss +} + +func (cfg *Config) getLCURLs() (ss []string) { + ss = make([]string, len(cfg.LCUrls)) + for i := range cfg.LCUrls { + ss[i] = cfg.LCUrls[i].String() + } + return ss +} + +func (cfg *Config) getMetricsURLs() (ss []string) { + ss = make([]string, len(cfg.ListenMetricsUrls)) + for i := range cfg.ListenMetricsUrls { + ss[i] = cfg.ListenMetricsUrls[i].String() + } + return ss +} + +func parseBackendFreelistType(freelistType string) bolt.FreelistType { + if freelistType == freelistMapType { + return bolt.FreelistMapType + } + + return bolt.FreelistArrayType +} diff --git a/vendor/go.etcd.io/etcd/embed/config_logging.go b/vendor/go.etcd.io/etcd/embed/config_logging.go new file mode 100644 index 000000000..e42103cb1 --- /dev/null +++ b/vendor/go.etcd.io/etcd/embed/config_logging.go @@ -0,0 +1,312 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "crypto/tls" + "errors" + "fmt" + "io/ioutil" + "os" + "reflect" + "sync" + + "go.etcd.io/etcd/pkg/logutil" + + "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" +) + +// GetLogger returns the logger. +func (cfg Config) GetLogger() *zap.Logger { + cfg.loggerMu.RLock() + l := cfg.logger + cfg.loggerMu.RUnlock() + return l +} + +// for testing +var grpcLogOnce = new(sync.Once) + +// setupLogging initializes etcd logging. +// Must be called after flag parsing or finishing configuring embed.Config. +func (cfg *Config) setupLogging() error { + // handle "DeprecatedLogOutput" in v3.4 + // TODO: remove "DeprecatedLogOutput" in v3.5 + len1 := len(cfg.DeprecatedLogOutput) + len2 := len(cfg.LogOutputs) + if len1 != len2 { + switch { + case len1 > len2: // deprecate "log-output" flag is used + fmt.Fprintln(os.Stderr, "'--log-output' flag has been deprecated! Please use '--log-outputs'!") + cfg.LogOutputs = cfg.DeprecatedLogOutput + case len1 < len2: // "--log-outputs" flag has been set with multiple writers + cfg.DeprecatedLogOutput = []string{} + } + } else { + if len1 > 1 { + return errors.New("both '--log-output' and '--log-outputs' are set; only set '--log-outputs'") + } + if len1 < 1 { + return errors.New("either '--log-output' or '--log-outputs' flag must be set") + } + if reflect.DeepEqual(cfg.DeprecatedLogOutput, cfg.LogOutputs) && cfg.DeprecatedLogOutput[0] != DefaultLogOutput { + return fmt.Errorf("'--log-output=%q' and '--log-outputs=%q' are incompatible; only set --log-outputs", cfg.DeprecatedLogOutput, cfg.LogOutputs) + } + if !reflect.DeepEqual(cfg.DeprecatedLogOutput, []string{DefaultLogOutput}) { + fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--log-output' flag is set to %q\n", cfg.DeprecatedLogOutput) + fmt.Fprintln(os.Stderr, "Please use '--log-outputs' flag") + } + } + + // TODO: remove after deprecating log related flags in v3.5 + if cfg.Debug { + fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--debug' flag is set to %v (use '--log-level=debug' instead\n", cfg.Debug) + } + if cfg.Debug && cfg.LogLevel != "debug" { + fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--debug' flag is set to %v with inconsistent '--log-level=%s' flag\n", cfg.Debug, cfg.LogLevel) + } + if cfg.Logger == "capnslog" { + fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--logger=%s' flag is set; use '--logger=zap' flag instead\n", cfg.Logger) + } + if cfg.LogPkgLevels != "" { + fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--log-package-levels=%s' flag is set; use '--logger=zap' flag instead\n", cfg.LogPkgLevels) + } + + switch cfg.Logger { + case "capnslog": // TODO: deprecate this in v3.5 + cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure + cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure + + if cfg.Debug { + capnslog.SetGlobalLogLevel(capnslog.DEBUG) + grpc.EnableTracing = true + // enable info, warning, error + grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)) + } else { + capnslog.SetGlobalLogLevel(logutil.ConvertToCapnslogLogLevel(cfg.LogLevel)) + // only discard info + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr)) + } + + // TODO: deprecate with "capnslog" + if cfg.LogPkgLevels != "" { + repoLog := capnslog.MustRepoLogger("go.etcd.io/etcd") + settings, err := repoLog.ParseLogLevelConfig(cfg.LogPkgLevels) + if err != nil { + plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error()) + return nil + } + repoLog.SetLogLevel(settings) + } + + if len(cfg.LogOutputs) != 1 { + return fmt.Errorf("--logger=capnslog supports only 1 value in '--log-outputs', got %q", cfg.LogOutputs) + } + // capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr)) + // where NewDefaultFormatter returns NewJournaldFormatter when syscall.Getppid() == 1 + // specify 'stdout' or 'stderr' to skip journald logging even when running under systemd + output := cfg.LogOutputs[0] + switch output { + case StdErrLogOutput: + capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stderr, cfg.Debug)) + case StdOutLogOutput: + capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, cfg.Debug)) + case DefaultLogOutput: + default: + return fmt.Errorf("unknown log-output %q (only supports %q, %q, %q)", output, DefaultLogOutput, StdErrLogOutput, StdOutLogOutput) + } + + case "zap": + if len(cfg.LogOutputs) == 0 { + cfg.LogOutputs = []string{DefaultLogOutput} + } + if len(cfg.LogOutputs) > 1 { + for _, v := range cfg.LogOutputs { + if v == DefaultLogOutput { + return fmt.Errorf("multi logoutput for %q is not supported yet", DefaultLogOutput) + } + } + } + + outputPaths, errOutputPaths := make([]string, 0), make([]string, 0) + isJournal := false + for _, v := range cfg.LogOutputs { + switch v { + case DefaultLogOutput: + outputPaths = append(outputPaths, StdErrLogOutput) + errOutputPaths = append(errOutputPaths, StdErrLogOutput) + + case JournalLogOutput: + isJournal = true + + case StdErrLogOutput: + outputPaths = append(outputPaths, StdErrLogOutput) + errOutputPaths = append(errOutputPaths, StdErrLogOutput) + + case StdOutLogOutput: + outputPaths = append(outputPaths, StdOutLogOutput) + errOutputPaths = append(errOutputPaths, StdOutLogOutput) + + default: + outputPaths = append(outputPaths, v) + errOutputPaths = append(errOutputPaths, v) + } + } + + if !isJournal { + copied := logutil.DefaultZapLoggerConfig + copied.OutputPaths = outputPaths + copied.ErrorOutputPaths = errOutputPaths + copied = logutil.MergeOutputPaths(copied) + copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) + if cfg.Debug || cfg.LogLevel == "debug" { + // enable tracing even when "--debug --log-level info" + // in order to keep backward compatibility with <= v3.3 + // TODO: remove "Debug" check in v3.5 + grpc.EnableTracing = true + } + if cfg.ZapLoggerBuilder == nil { + cfg.ZapLoggerBuilder = func(c *Config) error { + var err error + c.logger, err = copied.Build() + if err != nil { + return err + } + c.loggerMu.Lock() + defer c.loggerMu.Unlock() + c.loggerConfig = &copied + c.loggerCore = nil + c.loggerWriteSyncer = nil + grpcLogOnce.Do(func() { + // debug true, enable info, warning, error + // debug false, only discard info + var gl grpclog.LoggerV2 + gl, err = logutil.NewGRPCLoggerV2(copied) + if err == nil { + grpclog.SetLoggerV2(gl) + } + }) + return nil + } + } + } else { + if len(cfg.LogOutputs) > 1 { + for _, v := range cfg.LogOutputs { + if v != DefaultLogOutput { + return fmt.Errorf("running with systemd/journal but other '--log-outputs' values (%q) are configured with 'default'; override 'default' value with something else", cfg.LogOutputs) + } + } + } + + // use stderr as fallback + syncer, lerr := getJournalWriteSyncer() + if lerr != nil { + return lerr + } + + lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) + if cfg.Debug || cfg.LogLevel == "debug" { + // enable tracing even when "--debug --log-level info" + // in order to keep backward compatibility with <= v3.3 + // TODO: remove "Debug" check in v3.5 + grpc.EnableTracing = true + } + + // WARN: do not change field names in encoder config + // journald logging writer assumes field names of "level" and "caller" + cr := zapcore.NewCore( + zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig), + syncer, + lvl, + ) + if cfg.ZapLoggerBuilder == nil { + cfg.ZapLoggerBuilder = func(c *Config) error { + c.logger = zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer)) + c.loggerMu.Lock() + defer c.loggerMu.Unlock() + c.loggerConfig = nil + c.loggerCore = cr + c.loggerWriteSyncer = syncer + + grpcLogOnce.Do(func() { + grpclog.SetLoggerV2(logutil.NewGRPCLoggerV2FromZapCore(cr, syncer)) + }) + return nil + } + } + } + + err := cfg.ZapLoggerBuilder(cfg) + if err != nil { + return err + } + + logTLSHandshakeFailure := func(conn *tls.Conn, err error) { + state := conn.ConnectionState() + remoteAddr := conn.RemoteAddr().String() + serverName := state.ServerName + if len(state.PeerCertificates) > 0 { + cert := state.PeerCertificates[0] + ips := make([]string, len(cert.IPAddresses)) + for i := range cert.IPAddresses { + ips[i] = cert.IPAddresses[i].String() + } + cfg.logger.Warn( + "rejected connection", + zap.String("remote-addr", remoteAddr), + zap.String("server-name", serverName), + zap.Strings("ip-addresses", ips), + zap.Strings("dns-names", cert.DNSNames), + zap.Error(err), + ) + } else { + cfg.logger.Warn( + "rejected connection", + zap.String("remote-addr", remoteAddr), + zap.String("server-name", serverName), + zap.Error(err), + ) + } + } + cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure + cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure + + default: + return fmt.Errorf("unknown logger option %q", cfg.Logger) + } + + return nil +} + +// NewZapCoreLoggerBuilder generates a zap core logger builder. +func NewZapCoreLoggerBuilder(lg *zap.Logger, cr zapcore.Core, syncer zapcore.WriteSyncer) func(*Config) error { + return func(cfg *Config) error { + cfg.loggerMu.Lock() + defer cfg.loggerMu.Unlock() + cfg.logger = lg + cfg.loggerConfig = nil + cfg.loggerCore = cr + cfg.loggerWriteSyncer = syncer + + grpcLogOnce.Do(func() { + grpclog.SetLoggerV2(logutil.NewGRPCLoggerV2FromZapCore(cr, syncer)) + }) + return nil + } +} diff --git a/vendor/go.etcd.io/etcd/embed/config_logging_journal_unix.go b/vendor/go.etcd.io/etcd/embed/config_logging_journal_unix.go new file mode 100644 index 000000000..44a51d677 --- /dev/null +++ b/vendor/go.etcd.io/etcd/embed/config_logging_journal_unix.go @@ -0,0 +1,35 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package embed + +import ( + "fmt" + "os" + + "go.etcd.io/etcd/pkg/logutil" + + "go.uber.org/zap/zapcore" +) + +// use stderr as fallback +func getJournalWriteSyncer() (zapcore.WriteSyncer, error) { + jw, err := logutil.NewJournalWriter(os.Stderr) + if err != nil { + return nil, fmt.Errorf("can't find journal (%v)", err) + } + return zapcore.AddSync(jw), nil +} diff --git a/vendor/go.etcd.io/etcd/embed/config_logging_journal_windows.go b/vendor/go.etcd.io/etcd/embed/config_logging_journal_windows.go new file mode 100644 index 000000000..5b7625648 --- /dev/null +++ b/vendor/go.etcd.io/etcd/embed/config_logging_journal_windows.go @@ -0,0 +1,27 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package embed + +import ( + "os" + + "go.uber.org/zap/zapcore" +) + +func getJournalWriteSyncer() (zapcore.WriteSyncer, error) { + return zapcore.AddSync(os.Stderr), nil +} diff --git a/vendor/github.com/coreos/etcd/embed/doc.go b/vendor/go.etcd.io/etcd/embed/doc.go similarity index 97% rename from vendor/github.com/coreos/etcd/embed/doc.go rename to vendor/go.etcd.io/etcd/embed/doc.go index c555aa58e..4811bb634 100644 --- a/vendor/github.com/coreos/etcd/embed/doc.go +++ b/vendor/go.etcd.io/etcd/embed/doc.go @@ -21,7 +21,7 @@ Launch an embedded etcd server using the configuration defaults: "log" "time" - "github.com/coreos/etcd/embed" + "go.etcd.io/etcd/embed" ) func main() { diff --git a/vendor/github.com/coreos/etcd/embed/etcd.go b/vendor/go.etcd.io/etcd/embed/etcd.go similarity index 54% rename from vendor/github.com/coreos/etcd/embed/etcd.go rename to vendor/go.etcd.io/etcd/embed/etcd.go index bd848a713..ac7dbc987 100644 --- a/vendor/github.com/coreos/etcd/embed/etcd.go +++ b/vendor/go.etcd.io/etcd/embed/etcd.go @@ -23,32 +23,34 @@ import ( "net" "net/http" "net/url" + "runtime" + "sort" "strconv" "sync" "time" - "github.com/coreos/etcd/compactor" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/etcdhttp" - "github.com/coreos/etcd/etcdserver/api/v2http" - "github.com/coreos/etcd/etcdserver/api/v2v3" - "github.com/coreos/etcd/etcdserver/api/v3client" - "github.com/coreos/etcd/etcdserver/api/v3rpc" - "github.com/coreos/etcd/pkg/cors" - "github.com/coreos/etcd/pkg/debugutil" - runtimeutil "github.com/coreos/etcd/pkg/runtime" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/rafthttp" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/etcdhttp" + "go.etcd.io/etcd/etcdserver/api/rafthttp" + "go.etcd.io/etcd/etcdserver/api/v2http" + "go.etcd.io/etcd/etcdserver/api/v2v3" + "go.etcd.io/etcd/etcdserver/api/v3client" + "go.etcd.io/etcd/etcdserver/api/v3rpc" + "go.etcd.io/etcd/pkg/debugutil" + runtimeutil "go.etcd.io/etcd/pkg/runtime" + "go.etcd.io/etcd/pkg/transport" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/version" "github.com/coreos/pkg/capnslog" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/soheilhy/cmux" + "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" ) -var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed") +var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "embed") const ( // internal fd usage includes disk usage and transport usage. @@ -111,12 +113,26 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { e = nil }() - if e.Peers, err = startPeerListeners(cfg); err != nil { + if e.cfg.logger != nil { + e.cfg.logger.Info( + "configuring peer listeners", + zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), + ) + } + if e.Peers, err = configurePeerListeners(cfg); err != nil { return e, err } - if e.sctxs, err = startClientListeners(cfg); err != nil { + + if e.cfg.logger != nil { + e.cfg.logger.Info( + "configuring client listeners", + zap.Strings("listen-client-urls", e.cfg.getLCURLs()), + ) + } + if e.sctxs, err = configureClientListeners(cfg); err != nil { return e, err } + for _, sctx := range e.sctxs { e.Clients = append(e.Clients, sctx.l) } @@ -125,7 +141,6 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { urlsmap types.URLsMap token string ) - memberInitialized := true if !isMemberInitialized(cfg) { memberInitialized = false @@ -144,13 +159,16 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { return e, err } + backendFreelistType := parseBackendFreelistType(cfg.ExperimentalBackendFreelistType) + srvcfg := etcdserver.ServerConfig{ Name: cfg.Name, ClientURLs: cfg.ACUrls, PeerURLs: cfg.APUrls, DataDir: cfg.Dir, DedicatedWALDir: cfg.WalDir, - SnapCount: cfg.SnapCount, + SnapshotCount: cfg.SnapshotCount, + SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries, MaxSnapFiles: cfg.MaxSnapFiles, MaxWALFiles: cfg.MaxWalFiles, InitialPeerURLsMap: urlsmap, @@ -158,7 +176,6 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { DiscoveryURL: cfg.Durl, DiscoveryProxy: cfg.Dproxy, NewCluster: cfg.IsNewCluster(), - ForceNewCluster: cfg.ForceNewCluster, PeerTLSInfo: cfg.PeerTLSInfo, TickMs: cfg.TickMs, ElectionTicks: cfg.ElectionTicks(), @@ -166,16 +183,31 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { AutoCompactionRetention: autoCompactionRetention, AutoCompactionMode: cfg.AutoCompactionMode, QuotaBackendBytes: cfg.QuotaBackendBytes, + BackendBatchLimit: cfg.BackendBatchLimit, + BackendFreelistType: backendFreelistType, + BackendBatchInterval: cfg.BackendBatchInterval, MaxTxnOps: cfg.MaxTxnOps, MaxRequestBytes: cfg.MaxRequestBytes, StrictReconfigCheck: cfg.StrictReconfigCheck, ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, AuthToken: cfg.AuthToken, + BcryptCost: cfg.BcryptCost, + CORS: cfg.CORS, + HostWhitelist: cfg.HostWhitelist, InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck, CorruptCheckTime: cfg.ExperimentalCorruptCheckTime, + PreVote: cfg.PreVote, + Logger: cfg.logger, + LoggerConfig: cfg.loggerConfig, + LoggerCore: cfg.loggerCore, + LoggerWriteSyncer: cfg.loggerWriteSyncer, Debug: cfg.Debug, + ForceNewCluster: cfg.ForceNewCluster, + EnableGRPCGateway: cfg.EnableGRPCGateway, + EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint, + CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit, } - + print(e.cfg.logger, *cfg, srvcfg, memberInitialized) if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { return e, err } @@ -205,10 +237,109 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { return e, err } + if e.cfg.logger != nil { + e.cfg.logger.Info( + "now serving peer/client/metrics", + zap.String("local-member-id", e.Server.ID().String()), + zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()), + zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), + zap.Strings("advertise-client-urls", e.cfg.getACURLs()), + zap.Strings("listen-client-urls", e.cfg.getLCURLs()), + zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()), + ) + } serving = true return e, nil } +func print(lg *zap.Logger, ec Config, sc etcdserver.ServerConfig, memberInitialized bool) { + // TODO: remove this after dropping "capnslog" + if lg == nil { + plog.Infof("name = %s", ec.Name) + if sc.ForceNewCluster { + plog.Infof("force new cluster") + } + plog.Infof("data dir = %s", sc.DataDir) + plog.Infof("member dir = %s", sc.MemberDir()) + if sc.DedicatedWALDir != "" { + plog.Infof("dedicated WAL dir = %s", sc.DedicatedWALDir) + } + plog.Infof("heartbeat = %dms", sc.TickMs) + plog.Infof("election = %dms", sc.ElectionTicks*int(sc.TickMs)) + plog.Infof("snapshot count = %d", sc.SnapshotCount) + if len(sc.DiscoveryURL) != 0 { + plog.Infof("discovery URL= %s", sc.DiscoveryURL) + if len(sc.DiscoveryProxy) != 0 { + plog.Infof("discovery proxy = %s", sc.DiscoveryProxy) + } + } + plog.Infof("advertise client URLs = %s", sc.ClientURLs) + if memberInitialized { + plog.Infof("initial advertise peer URLs = %s", sc.PeerURLs) + plog.Infof("initial cluster = %s", sc.InitialPeerURLsMap) + } + } else { + cors := make([]string, 0, len(ec.CORS)) + for v := range ec.CORS { + cors = append(cors, v) + } + sort.Strings(cors) + + hss := make([]string, 0, len(ec.HostWhitelist)) + for v := range ec.HostWhitelist { + hss = append(hss, v) + } + sort.Strings(hss) + + quota := ec.QuotaBackendBytes + if quota == 0 { + quota = etcdserver.DefaultQuotaBytes + } + + lg.Info( + "starting an etcd server", + zap.String("etcd-version", version.Version), + zap.String("git-sha", version.GitSHA), + zap.String("go-version", runtime.Version()), + zap.String("go-os", runtime.GOOS), + zap.String("go-arch", runtime.GOARCH), + zap.Int("max-cpu-set", runtime.GOMAXPROCS(0)), + zap.Int("max-cpu-available", runtime.NumCPU()), + zap.Bool("member-initialized", memberInitialized), + zap.String("name", sc.Name), + zap.String("data-dir", sc.DataDir), + zap.String("wal-dir", ec.WalDir), + zap.String("wal-dir-dedicated", sc.DedicatedWALDir), + zap.String("member-dir", sc.MemberDir()), + zap.Bool("force-new-cluster", sc.ForceNewCluster), + zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(sc.TickMs)*time.Millisecond)), + zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)), + zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance), + zap.Uint64("snapshot-count", sc.SnapshotCount), + zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries), + zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()), + zap.Strings("listen-peer-urls", ec.getLPURLs()), + zap.Strings("advertise-client-urls", ec.getACURLs()), + zap.Strings("listen-client-urls", ec.getLCURLs()), + zap.Strings("listen-metrics-urls", ec.getMetricsURLs()), + zap.Strings("cors", cors), + zap.Strings("host-whitelist", hss), + zap.String("initial-cluster", sc.InitialPeerURLsMap.String()), + zap.String("initial-cluster-state", ec.ClusterState), + zap.String("initial-cluster-token", sc.InitialClusterToken), + zap.Int64("quota-size-bytes", quota), + zap.Bool("pre-vote", sc.PreVote), + zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck), + zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()), + zap.String("auto-compaction-mode", sc.AutoCompactionMode), + zap.Duration("auto-compaction-retention", sc.AutoCompactionRetention), + zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()), + zap.String("discovery-url", sc.DiscoveryURL), + zap.String("discovery-proxy", sc.DiscoveryProxy), + ) + } +} + // Config returns the current configuration. func (e *Etcd) Config() Config { return e.cfg @@ -218,6 +349,23 @@ func (e *Etcd) Config() Config { // Client requests will be terminated with request timeout. // After timeout, enforce remaning requests be closed immediately. func (e *Etcd) Close() { + fields := []zap.Field{ + zap.String("name", e.cfg.Name), + zap.String("data-dir", e.cfg.Dir), + zap.Strings("advertise-peer-urls", e.cfg.getAPURLs()), + zap.Strings("advertise-client-urls", e.cfg.getACURLs()), + } + lg := e.GetLogger() + if lg != nil { + lg.Info("closing etcd server", fields...) + } + defer func() { + if lg != nil { + lg.Info("closed etcd server", fields...) + lg.Sync() + } + }() + e.closeOnce.Do(func() { close(e.stopc) }) // close client requests with request timeout @@ -272,7 +420,7 @@ func stopServers(ctx context.Context, ss *servers) { // do not grpc.Server.GracefulStop with TLS enabled etcd server // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531 - // and https://github.com/coreos/etcd/issues/8916 + // and https://github.com/etcd-io/etcd/issues/8916 if ss.secure { shutdownNow() return @@ -301,15 +449,27 @@ func stopServers(ctx context.Context, ss *servers) { func (e *Etcd) Err() <-chan error { return e.errc } -func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { +func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) { if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil { return nil, err } if err = cfg.PeerSelfCert(); err != nil { - plog.Fatalf("could not get certs (%v)", err) + if cfg.logger != nil { + cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err)) + } else { + plog.Fatalf("could not get certs (%v)", err) + } } if !cfg.PeerTLSInfo.Empty() { - plog.Infof("peerTLS: %s", cfg.PeerTLSInfo) + if cfg.logger != nil { + cfg.logger.Info( + "starting with peer TLS", + zap.String("tls-info", fmt.Sprintf("%+v", cfg.PeerTLSInfo)), + zap.Strings("cipher-suites", cfg.CipherSuites), + ) + } else { + plog.Infof("peerTLS: %s", cfg.PeerTLSInfo) + } } peers = make([]*peerListener, len(cfg.LPUrls)) @@ -319,7 +479,15 @@ func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { } for i := range peers { if peers[i] != nil && peers[i].close != nil { - plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) + if cfg.logger != nil { + cfg.logger.Warn( + "closing peer listener", + zap.String("address", cfg.LPUrls[i].String()), + zap.Error(err), + ) + } else { + plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) + } ctx, cancel := context.WithTimeout(context.Background(), time.Second) peers[i].close(ctx) cancel() @@ -330,10 +498,18 @@ func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { for i, u := range cfg.LPUrls { if u.Scheme == "http" { if !cfg.PeerTLSInfo.Empty() { - plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) + if cfg.logger != nil { + cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String())) + } else { + plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) + } } if cfg.PeerTLSInfo.ClientCertAuth { - plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) + if cfg.logger != nil { + cfg.logger.Warn("scheme is HTTP while --peer-client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("peer-url", u.String())) + } else { + plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) + } } } peers[i] = &peerListener{close: func(context.Context) error { return nil }} @@ -345,14 +521,13 @@ func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { peers[i].close = func(context.Context) error { return peers[i].Listener.Close() } - plog.Info("listening for peers on ", u.String()) } return peers, nil } // configure peer handlers after rafthttp.Transport started func (e *Etcd) servePeers() (err error) { - ph := etcdhttp.NewPeerHandler(e.Server) + ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server) var peerTLScfg *tls.Config if !e.cfg.PeerTLSInfo.Empty() { if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil { @@ -361,6 +536,7 @@ func (e *Etcd) servePeers() (err error) { } for _, p := range e.Peers { + u := p.Listener.Addr().String() gs := v3rpc.Server(e.Server, peerTLScfg) m := cmux.New(p.Listener) go gs.Serve(m.Match(cmux.HTTP2())) @@ -375,7 +551,19 @@ func (e *Etcd) servePeers() (err error) { // gracefully shutdown http.Server // close open listeners, idle connections // until context cancel or time-out + if e.cfg.logger != nil { + e.cfg.logger.Info( + "stopping serving peer traffic", + zap.String("address", u), + ) + } stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv}) + if e.cfg.logger != nil { + e.cfg.logger.Info( + "stopped serving peer traffic", + zap.String("address", u), + ) + } return nil } } @@ -383,45 +571,70 @@ func (e *Etcd) servePeers() (err error) { // start peer servers in a goroutine for _, pl := range e.Peers { go func(l *peerListener) { + u := l.Addr().String() + if e.cfg.logger != nil { + e.cfg.logger.Info( + "serving peer traffic", + zap.String("address", u), + ) + } else { + plog.Info("listening for peers on ", u) + } e.errHandler(l.serve()) }(pl) } return nil } -func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { +func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil { return nil, err } if err = cfg.ClientSelfCert(); err != nil { - plog.Fatalf("could not get certs (%v)", err) + if cfg.logger != nil { + cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err)) + } else { + plog.Fatalf("could not get certs (%v)", err) + } } if cfg.EnablePprof { - plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf) + if cfg.logger != nil { + cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf)) + } else { + plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf) + } } sctxs = make(map[string]*serveCtx) for _, u := range cfg.LCUrls { - sctx := newServeCtx() - + sctx := newServeCtx(cfg.logger) if u.Scheme == "http" || u.Scheme == "unix" { if !cfg.ClientTLSInfo.Empty() { - plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String()) + if cfg.logger != nil { + cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String())) + } else { + plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String()) + } } if cfg.ClientTLSInfo.ClientCertAuth { - plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) + if cfg.logger != nil { + cfg.logger.Warn("scheme is HTTP while --client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("client-url", u.String())) + } else { + plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) + } } } if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() { - return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String()) + return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String()) } - proto := "tcp" + network := "tcp" addr := u.Host if u.Scheme == "unix" || u.Scheme == "unixs" { - proto = "unix" + network = "unix" addr = u.Host + u.Path } + sctx.network = network sctx.secure = u.Scheme == "https" || u.Scheme == "unixs" sctx.insecure = !sctx.secure @@ -431,7 +644,7 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { continue } - if sctx.l, err = net.Listen(proto, addr); err != nil { + if sctx.l, err = net.Listen(network, addr); err != nil { return nil, err } // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking @@ -440,21 +653,37 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { if fdLimit <= reservedInternalFDNum { - plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) + if cfg.logger != nil { + cfg.logger.Fatal( + "file descriptor limit of etcd process is too low; please set higher", + zap.Uint64("limit", fdLimit), + zap.Int("recommended-limit", reservedInternalFDNum), + ) + } else { + plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum) + } } sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum)) } - if proto == "tcp" { - if sctx.l, err = transport.NewKeepAliveListener(sctx.l, "tcp", nil); err != nil { + if network == "tcp" { + if sctx.l, err = transport.NewKeepAliveListener(sctx.l, network, nil); err != nil { return nil, err } } - plog.Info("listening for client requests on ", u.Host) defer func() { - if err != nil { - sctx.l.Close() + if err == nil { + return + } + sctx.l.Close() + if cfg.logger != nil { + cfg.logger.Warn( + "closing peer listener", + zap.String("address", u.Host), + zap.Error(err), + ) + } else { plog.Info("stopping listening for client requests on ", u.Host) } }() @@ -475,28 +704,31 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { func (e *Etcd) serveClients() (err error) { if !e.cfg.ClientTLSInfo.Empty() { - plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo) - } - - if e.cfg.CorsInfo.String() != "" { - plog.Infof("cors = %s", e.cfg.CorsInfo) + if e.cfg.logger != nil { + e.cfg.logger.Info( + "starting with client TLS", + zap.String("tls-info", fmt.Sprintf("%+v", e.cfg.ClientTLSInfo)), + zap.Strings("cipher-suites", e.cfg.CipherSuites), + ) + } else { + plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo) + } } // Start a client server goroutine for each listen address var h http.Handler if e.Config().EnableV2 { if len(e.Config().ExperimentalEnableV2V3) > 0 { - srv := v2v3.NewServer(v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3) - h = v2http.NewClientHandler(srv, e.Server.Cfg.ReqTimeout()) + srv := v2v3.NewServer(e.cfg.logger, v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3) + h = v2http.NewClientHandler(e.GetLogger(), srv, e.Server.Cfg.ReqTimeout()) } else { - h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout()) + h = v2http.NewClientHandler(e.GetLogger(), e.Server, e.Server.Cfg.ReqTimeout()) } } else { mux := http.NewServeMux() etcdhttp.HandleBasic(mux, e.Server) h = mux } - h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo}) gopts := []grpc.ServerOption{} if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { @@ -513,7 +745,7 @@ func (e *Etcd) serveClients() (err error) { })) } - // start client servers in a goroutine + // start client servers in each goroutine for _, sctx := range e.sctxs { go func(s *serveCtx) { e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...)) @@ -542,7 +774,14 @@ func (e *Etcd) serveMetrics() (err error) { } e.metricsListeners = append(e.metricsListeners, ml) go func(u url.URL, ln net.Listener) { - plog.Info("listening for metrics on ", u.String()) + if e.cfg.logger != nil { + e.cfg.logger.Info( + "serving metrics", + zap.String("address", u.String()), + ) + } else { + plog.Info("listening for metrics on ", u.String()) + } e.errHandler(http.Serve(ln, metricsMux)) }(murl, ml) } @@ -562,13 +801,21 @@ func (e *Etcd) errHandler(err error) { } } +// GetLogger returns the logger. +func (e *Etcd) GetLogger() *zap.Logger { + e.cfg.loggerMu.RLock() + l := e.cfg.logger + e.cfg.loggerMu.RUnlock() + return l +} + func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) { h, err := strconv.Atoi(retention) if err == nil { switch mode { - case compactor.ModeRevision: + case CompactorModeRevision: ret = time.Duration(int64(h)) - case compactor.ModePeriodic: + case CompactorModePeriodic: ret = time.Duration(int64(h)) * time.Hour } } else { diff --git a/vendor/go.etcd.io/etcd/embed/serve.go b/vendor/go.etcd.io/etcd/embed/serve.go new file mode 100644 index 000000000..a3b20c46c --- /dev/null +++ b/vendor/go.etcd.io/etcd/embed/serve.go @@ -0,0 +1,435 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "context" + "fmt" + "io/ioutil" + defaultLog "log" + "net" + "net/http" + "strings" + + "go.etcd.io/etcd/clientv3/credentials" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/v3client" + "go.etcd.io/etcd/etcdserver/api/v3election" + "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb" + v3electiongw "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw" + "go.etcd.io/etcd/etcdserver/api/v3lock" + "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb" + v3lockgw "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw" + "go.etcd.io/etcd/etcdserver/api/v3rpc" + etcdservergw "go.etcd.io/etcd/etcdserver/etcdserverpb/gw" + "go.etcd.io/etcd/pkg/debugutil" + "go.etcd.io/etcd/pkg/httputil" + "go.etcd.io/etcd/pkg/transport" + + gw "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/soheilhy/cmux" + "github.com/tmc/grpc-websocket-proxy/wsproxy" + "go.uber.org/zap" + "golang.org/x/net/trace" + "google.golang.org/grpc" +) + +type serveCtx struct { + lg *zap.Logger + l net.Listener + addr string + network string + secure bool + insecure bool + + ctx context.Context + cancel context.CancelFunc + + userHandlers map[string]http.Handler + serviceRegister func(*grpc.Server) + serversC chan *servers +} + +type servers struct { + secure bool + grpc *grpc.Server + http *http.Server +} + +func newServeCtx(lg *zap.Logger) *serveCtx { + ctx, cancel := context.WithCancel(context.Background()) + return &serveCtx{ + lg: lg, + ctx: ctx, + cancel: cancel, + userHandlers: make(map[string]http.Handler), + serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true + } +} + +// serve accepts incoming connections on the listener l, +// creating a new service goroutine for each. The service goroutines +// read requests and then call handler to reply to them. +func (sctx *serveCtx) serve( + s *etcdserver.EtcdServer, + tlsinfo *transport.TLSInfo, + handler http.Handler, + errHandler func(error), + gopts ...grpc.ServerOption) (err error) { + logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) + <-s.ReadyNotify() + + if sctx.lg == nil { + plog.Info("ready to serve client requests") + } + + m := cmux.New(sctx.l) + v3c := v3client.New(s) + servElection := v3election.NewElectionServer(v3c) + servLock := v3lock.NewLockServer(v3c) + + var gs *grpc.Server + defer func() { + if err != nil && gs != nil { + gs.Stop() + } + }() + + if sctx.insecure { + gs = v3rpc.Server(s, nil, gopts...) + v3electionpb.RegisterElectionServer(gs, servElection) + v3lockpb.RegisterLockServer(gs, servLock) + if sctx.serviceRegister != nil { + sctx.serviceRegister(gs) + } + grpcl := m.Match(cmux.HTTP2()) + go func() { errHandler(gs.Serve(grpcl)) }() + + var gwmux *gw.ServeMux + if s.Cfg.EnableGRPCGateway { + gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()}) + if err != nil { + return err + } + } + + httpmux := sctx.createMux(gwmux, handler) + + srvhttp := &http.Server{ + Handler: createAccessController(sctx.lg, s, httpmux), + ErrorLog: logger, // do not log user error + } + httpl := m.Match(cmux.HTTP1()) + go func() { errHandler(srvhttp.Serve(httpl)) }() + + sctx.serversC <- &servers{grpc: gs, http: srvhttp} + if sctx.lg != nil { + sctx.lg.Info( + "serving client traffic insecurely; this is strongly discouraged!", + zap.String("address", sctx.l.Addr().String()), + ) + } else { + plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String()) + } + } + + if sctx.secure { + tlscfg, tlsErr := tlsinfo.ServerConfig() + if tlsErr != nil { + return tlsErr + } + gs = v3rpc.Server(s, tlscfg, gopts...) + v3electionpb.RegisterElectionServer(gs, servElection) + v3lockpb.RegisterLockServer(gs, servLock) + if sctx.serviceRegister != nil { + sctx.serviceRegister(gs) + } + handler = grpcHandlerFunc(gs, handler) + + var gwmux *gw.ServeMux + if s.Cfg.EnableGRPCGateway { + dtls := tlscfg.Clone() + // trust local server + dtls.InsecureSkipVerify = true + bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls}) + opts := []grpc.DialOption{grpc.WithTransportCredentials(bundle.TransportCredentials())} + gwmux, err = sctx.registerGateway(opts) + if err != nil { + return err + } + } + + var tlsl net.Listener + tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo) + if err != nil { + return err + } + // TODO: add debug flag; enable logging when debug flag is set + httpmux := sctx.createMux(gwmux, handler) + + srv := &http.Server{ + Handler: createAccessController(sctx.lg, s, httpmux), + TLSConfig: tlscfg, + ErrorLog: logger, // do not log user error + } + go func() { errHandler(srv.Serve(tlsl)) }() + + sctx.serversC <- &servers{secure: true, grpc: gs, http: srv} + if sctx.lg != nil { + sctx.lg.Info( + "serving client traffic securely", + zap.String("address", sctx.l.Addr().String()), + ) + } else { + plog.Infof("serving client requests on %s", sctx.l.Addr().String()) + } + } + + close(sctx.serversC) + return m.Serve() +} + +// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC +// connections or otherHandler otherwise. Given in gRPC docs. +func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler { + if otherHandler == nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + grpcServer.ServeHTTP(w, r) + }) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { + grpcServer.ServeHTTP(w, r) + } else { + otherHandler.ServeHTTP(w, r) + } + }) +} + +type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error + +func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) { + ctx := sctx.ctx + + addr := sctx.addr + if network := sctx.network; network == "unix" { + // explicitly define unix network for gRPC socket support + addr = fmt.Sprintf("%s://%s", network, addr) + } + + conn, err := grpc.DialContext(ctx, addr, opts...) + if err != nil { + return nil, err + } + gwmux := gw.NewServeMux() + + handlers := []registerHandlerFunc{ + etcdservergw.RegisterKVHandler, + etcdservergw.RegisterWatchHandler, + etcdservergw.RegisterLeaseHandler, + etcdservergw.RegisterClusterHandler, + etcdservergw.RegisterMaintenanceHandler, + etcdservergw.RegisterAuthHandler, + v3lockgw.RegisterLockHandler, + v3electiongw.RegisterElectionHandler, + } + for _, h := range handlers { + if err := h(ctx, gwmux, conn); err != nil { + return nil, err + } + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + if sctx.lg != nil { + sctx.lg.Warn( + "failed to close connection", + zap.String("address", sctx.l.Addr().String()), + zap.Error(cerr), + ) + } else { + plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr) + } + } + }() + + return gwmux, nil +} + +func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux { + httpmux := http.NewServeMux() + for path, h := range sctx.userHandlers { + httpmux.Handle(path, h) + } + + if gwmux != nil { + httpmux.Handle( + "/v3/", + wsproxy.WebsocketProxy( + gwmux, + wsproxy.WithRequestMutator( + // Default to the POST method for streams + func(_ *http.Request, outgoing *http.Request) *http.Request { + outgoing.Method = "POST" + return outgoing + }, + ), + ), + ) + } + if handler != nil { + httpmux.Handle("/", handler) + } + return httpmux +} + +// createAccessController wraps HTTP multiplexer: +// - mutate gRPC gateway request paths +// - check hostname whitelist +// client HTTP requests goes here first +func createAccessController(lg *zap.Logger, s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler { + return &accessController{lg: lg, s: s, mux: mux} +} + +type accessController struct { + lg *zap.Logger + s *etcdserver.EtcdServer + mux *http.ServeMux +} + +func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // redirect for backward compatibilities + if req != nil && req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3beta/") { + req.URL.Path = strings.Replace(req.URL.Path, "/v3beta/", "/v3/", 1) + } + + if req.TLS == nil { // check origin if client connection is not secure + host := httputil.GetHostname(req) + if !ac.s.AccessController.IsHostWhitelisted(host) { + if ac.lg != nil { + ac.lg.Warn( + "rejecting HTTP request to prevent DNS rebinding attacks", + zap.String("host", host), + ) + } else { + plog.Warningf("rejecting HTTP request from %q to prevent DNS rebinding attacks", host) + } + // TODO: use Go's "http.StatusMisdirectedRequest" (421) + // https://github.com/golang/go/commit/4b8a7eafef039af1834ef9bfa879257c4a72b7b5 + http.Error(rw, errCVE20185702(host), 421) + return + } + } else if ac.s.Cfg.ClientCertAuthEnabled && ac.s.Cfg.EnableGRPCGateway && + ac.s.AuthStore().IsAuthEnabled() && strings.HasPrefix(req.URL.Path, "/v3/") { + for _, chains := range req.TLS.VerifiedChains { + if len(chains) < 1 { + continue + } + if len(chains[0].Subject.CommonName) != 0 { + http.Error(rw, "CommonName of client sending a request against gateway will be ignored and not used as expected", 400) + return + } + } + } + + // Write CORS header. + if ac.s.AccessController.OriginAllowed("*") { + addCORSHeader(rw, "*") + } else if origin := req.Header.Get("Origin"); ac.s.OriginAllowed(origin) { + addCORSHeader(rw, origin) + } + + if req.Method == "OPTIONS" { + rw.WriteHeader(http.StatusOK) + return + } + + ac.mux.ServeHTTP(rw, req) +} + +// addCORSHeader adds the correct cors headers given an origin +func addCORSHeader(w http.ResponseWriter, origin string) { + w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") + w.Header().Add("Access-Control-Allow-Origin", origin) + w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization") +} + +// https://github.com/transmission/transmission/pull/468 +func errCVE20185702(host string) string { + return fmt.Sprintf(` +etcd received your request, but the Host header was unrecognized. + +To fix this, choose one of the following options: +- Enable TLS, then any HTTPS request will be allowed. +- Add the hostname you want to use to the whitelist in settings. + - e.g. etcd --host-whitelist %q + +This requirement has been added to help prevent "DNS Rebinding" attacks (CVE-2018-5702). +`, host) +} + +// WrapCORS wraps existing handler with CORS. +// TODO: deprecate this after v2 proxy deprecate +func WrapCORS(cors map[string]struct{}, h http.Handler) http.Handler { + return &corsHandler{ + ac: &etcdserver.AccessController{CORS: cors}, + h: h, + } +} + +type corsHandler struct { + ac *etcdserver.AccessController + h http.Handler +} + +func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if ch.ac.OriginAllowed("*") { + addCORSHeader(rw, "*") + } else if origin := req.Header.Get("Origin"); ch.ac.OriginAllowed(origin) { + addCORSHeader(rw, origin) + } + + if req.Method == "OPTIONS" { + rw.WriteHeader(http.StatusOK) + return + } + + ch.h.ServeHTTP(rw, req) +} + +func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) { + if sctx.userHandlers[s] != nil { + if sctx.lg != nil { + sctx.lg.Warn("path is already registered by user handler", zap.String("path", s)) + } else { + plog.Warningf("path %s already registered by user handler", s) + } + return + } + sctx.userHandlers[s] = h +} + +func (sctx *serveCtx) registerPprof() { + for p, h := range debugutil.PProfHandlers() { + sctx.registerUserHandler(p, h) + } +} + +func (sctx *serveCtx) registerTrace() { + reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) } + sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf)) + evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) } + sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf)) +} diff --git a/vendor/github.com/coreos/etcd/embed/util.go b/vendor/go.etcd.io/etcd/embed/util.go similarity index 96% rename from vendor/github.com/coreos/etcd/embed/util.go rename to vendor/go.etcd.io/etcd/embed/util.go index 168e03138..40f3ce9d5 100644 --- a/vendor/github.com/coreos/etcd/embed/util.go +++ b/vendor/go.etcd.io/etcd/embed/util.go @@ -17,7 +17,7 @@ package embed import ( "path/filepath" - "github.com/coreos/etcd/wal" + "go.etcd.io/etcd/wal" ) func isMemberInitialized(cfg *Config) bool { @@ -25,6 +25,5 @@ func isMemberInitialized(cfg *Config) bool { if waldir == "" { waldir = filepath.Join(cfg.Dir, "member", "wal") } - return wal.Exist(waldir) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/go.etcd.io/etcd/etcdserver/api/capability.go similarity index 81% rename from vendor/github.com/coreos/etcd/etcdserver/api/capability.go rename to vendor/go.etcd.io/etcd/etcdserver/api/capability.go index eb34383d7..8b13f4742 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/capability.go @@ -17,7 +17,9 @@ package api import ( "sync" - "github.com/coreos/etcd/version" + "go.etcd.io/etcd/version" + "go.uber.org/zap" + "github.com/coreos/go-semver/semver" "github.com/coreos/pkg/capnslog" ) @@ -30,7 +32,7 @@ const ( ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/api") // capabilityMaps is a static map of version to capability map. capabilityMaps = map[string]map[Capability]bool{ @@ -38,6 +40,7 @@ var ( "3.1.0": {AuthCapability: true, V3rpcCapability: true}, "3.2.0": {AuthCapability: true, V3rpcCapability: true}, "3.3.0": {AuthCapability: true, V3rpcCapability: true}, + "3.4.0": {AuthCapability: true, V3rpcCapability: true}, } enableMapMu sync.RWMutex @@ -55,7 +58,7 @@ func init() { } // UpdateCapability updates the enabledMap when the cluster version increases. -func UpdateCapability(v *semver.Version) { +func UpdateCapability(lg *zap.Logger, v *semver.Version) { if v == nil { // if recovered but version was never set by cluster return @@ -68,7 +71,15 @@ func UpdateCapability(v *semver.Version) { curVersion = v enabledMap = capabilityMaps[curVersion.String()] enableMapMu.Unlock() - plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) + + if lg != nil { + lg.Info( + "enabled capabilities for version", + zap.String("cluster-version", version.Cluster(v.String())), + ) + } else { + plog.Infof("enabled capabilities for version %s", version.Cluster(v.String())) + } } func IsCapabilityEnabled(c Capability) bool { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go b/vendor/go.etcd.io/etcd/etcdserver/api/cluster.go similarity index 93% rename from vendor/github.com/coreos/etcd/etcdserver/api/cluster.go rename to vendor/go.etcd.io/etcd/etcdserver/api/cluster.go index 654c25804..901be9d85 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/cluster.go @@ -15,8 +15,8 @@ package api import ( - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/pkg/types" "github.com/coreos/go-semver/semver" ) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/api/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/doc.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/base.go similarity index 65% rename from vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go rename to vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/base.go index f0d3b0bd3..c9df62ea8 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/base.go @@ -21,17 +21,19 @@ import ( "net/http" "strings" - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/pkg/logutil" - "github.com/coreos/etcd/version" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api" + "go.etcd.io/etcd/etcdserver/api/v2error" + "go.etcd.io/etcd/etcdserver/api/v2http/httptypes" + "go.etcd.io/etcd/pkg/logutil" + "go.etcd.io/etcd/version" + "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/api/etcdhttp") mlog = logutil.NewMergeLogger(plog) ) @@ -45,7 +47,10 @@ const ( // that do not access the v2 store. func HandleBasic(mux *http.ServeMux, server etcdserver.ServerPeer) { mux.HandleFunc(varsPath, serveVars) + + // TODO: deprecate '/config/local/log' in v3.5 mux.HandleFunc(configPath+"/local/log", logHandleFunc) + HandleMetricsHealth(mux, server) mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) } @@ -78,6 +83,7 @@ func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { w.Write(b) } +// TODO: deprecate '/config/local/log' in v3.5 func logHandleFunc(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r, "PUT") { return @@ -87,13 +93,13 @@ func logHandleFunc(w http.ResponseWriter, r *http.Request) { d := json.NewDecoder(r.Body) if err := d.Decode(&in); err != nil { - WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) + WriteError(nil, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) return } logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) if err != nil { - WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) + WriteError(nil, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) return } @@ -132,27 +138,66 @@ func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool { // WriteError logs and writes the given Error to the ResponseWriter // If Error is an etcdErr, it is rendered to the ResponseWriter // Otherwise, it is assumed to be a StatusInternalServerError -func WriteError(w http.ResponseWriter, r *http.Request, err error) { +func WriteError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) { if err == nil { return } switch e := err.(type) { - case *etcdErr.Error: + case *v2error.Error: e.WriteTo(w) + case *httptypes.HTTPError: if et := e.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + if lg != nil { + lg.Debug( + "failed to write v2 HTTP error", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-server-error", e.Error()), + zap.Error(et), + ) + } else { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } } + default: switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: - mlog.MergeError(err) + case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, + etcdserver.ErrUnhealthy: + if lg != nil { + lg.Warn( + "v2 response error", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-server-error", err.Error()), + ) + } else { + mlog.MergeError(err) + } + default: - mlog.MergeErrorf("got unexpected response error (%v)", err) + if lg != nil { + lg.Warn( + "unexpected v2 response error", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-server-error", err.Error()), + ) + } else { + mlog.MergeErrorf("got unexpected response error (%v)", err) + } } + herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") if et := herr.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + if lg != nil { + lg.Debug( + "failed to write v2 HTTP error", + zap.String("remote-addr", r.RemoteAddr), + zap.String("internal-server-error", err.Error()), + zap.Error(et), + ) + } else { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } } } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/doc.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/metrics.go similarity index 84% rename from vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go rename to vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/metrics.go index 2f6a0a7b2..f455e40a7 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/metrics.go @@ -20,9 +20,9 @@ import ( "net/http" "time" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/raft" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/raft" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -50,7 +50,6 @@ func NewHealthHandler(hfunc func() Health) http.HandlerFunc { if r.Method != http.MethodGet { w.Header().Set("Allow", http.MethodGet) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - plog.Warningf("/health error (status code %d)", http.StatusMethodNotAllowed) return } h := hfunc() @@ -98,15 +97,11 @@ func checkHealth(srv etcdserver.ServerV2) Health { as := srv.Alarms() if len(as) > 0 { h.Health = "false" - for _, v := range as { - plog.Warningf("/health error due to an alarm %s", v.String()) - } } if h.Health == "true" { if uint64(srv.Leader()) == raft.None { h.Health = "false" - plog.Warningf("/health error; no leader (status code %d)", http.StatusServiceUnavailable) } } @@ -116,13 +111,11 @@ func checkHealth(srv etcdserver.ServerV2) Health { cancel() if err != nil { h.Health = "false" - plog.Warningf("/health error; QGET failed %v (status code %d)", err, http.StatusServiceUnavailable) } } if h.Health == "true" { healthSuccess.Inc() - plog.Infof("/health OK (status code %d)", http.StatusOK) } else { healthFailed.Inc() } diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/peer.go b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/peer.go new file mode 100644 index 000000000..6c61bf5d5 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/peer.go @@ -0,0 +1,159 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api" + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/etcdserver/api/rafthttp" + "go.etcd.io/etcd/lease/leasehttp" + "go.etcd.io/etcd/pkg/types" + + "go.uber.org/zap" +) + +const ( + peerMembersPath = "/members" + peerMemberPromotePrefix = "/members/promote/" +) + +// NewPeerHandler generates an http.Handler to handle etcd peer requests. +func NewPeerHandler(lg *zap.Logger, s etcdserver.ServerPeer) http.Handler { + return newPeerHandler(lg, s, s.RaftHandler(), s.LeaseHandler()) +} + +func newPeerHandler(lg *zap.Logger, s etcdserver.Server, raftHandler http.Handler, leaseHandler http.Handler) http.Handler { + peerMembersHandler := newPeerMembersHandler(lg, s.Cluster()) + peerMemberPromoteHandler := newPeerMemberPromoteHandler(lg, s) + + mux := http.NewServeMux() + mux.HandleFunc("/", http.NotFound) + mux.Handle(rafthttp.RaftPrefix, raftHandler) + mux.Handle(rafthttp.RaftPrefix+"/", raftHandler) + mux.Handle(peerMembersPath, peerMembersHandler) + mux.Handle(peerMemberPromotePrefix, peerMemberPromoteHandler) + if leaseHandler != nil { + mux.Handle(leasehttp.LeasePrefix, leaseHandler) + mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler) + } + mux.HandleFunc(versionPath, versionHandler(s.Cluster(), serveVersion)) + return mux +} + +func newPeerMembersHandler(lg *zap.Logger, cluster api.Cluster) http.Handler { + return &peerMembersHandler{ + lg: lg, + cluster: cluster, + } +} + +type peerMembersHandler struct { + lg *zap.Logger + cluster api.Cluster +} + +func newPeerMemberPromoteHandler(lg *zap.Logger, s etcdserver.Server) http.Handler { + return &peerMemberPromoteHandler{ + lg: lg, + cluster: s.Cluster(), + server: s, + } +} + +type peerMemberPromoteHandler struct { + lg *zap.Logger + cluster api.Cluster + server etcdserver.Server +} + +func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) + + if r.URL.Path != peerMembersPath { + http.Error(w, "bad path", http.StatusBadRequest) + return + } + ms := h.cluster.Members() + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(ms); err != nil { + if h.lg != nil { + h.lg.Warn("failed to encode membership members", zap.Error(err)) + } else { + plog.Warningf("failed to encode members response (%v)", err) + } + } +} + +func (h *peerMemberPromoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "POST") { + return + } + w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) + + if !strings.HasPrefix(r.URL.Path, peerMemberPromotePrefix) { + http.Error(w, "bad path", http.StatusBadRequest) + return + } + idStr := strings.TrimPrefix(r.URL.Path, peerMemberPromotePrefix) + id, err := strconv.ParseUint(idStr, 10, 64) + if err != nil { + http.Error(w, fmt.Sprintf("member %s not found in cluster", idStr), http.StatusNotFound) + return + } + + resp, err := h.server.PromoteMember(r.Context(), id) + if err != nil { + switch err { + case membership.ErrIDNotFound: + http.Error(w, err.Error(), http.StatusNotFound) + case membership.ErrMemberNotLearner: + http.Error(w, err.Error(), http.StatusPreconditionFailed) + case etcdserver.ErrLearnerNotReady: + http.Error(w, err.Error(), http.StatusPreconditionFailed) + default: + WriteError(h.lg, w, r, err) + } + if h.lg != nil { + h.lg.Warn( + "failed to promote a member", + zap.String("member-id", types.ID(id).String()), + zap.Error(err), + ) + } else { + plog.Errorf("error promoting member %s (%v)", types.ID(id).String(), err) + } + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if err := json.NewEncoder(w).Encode(resp); err != nil { + if h.lg != nil { + h.lg.Warn("failed to encode members response", zap.Error(err)) + } else { + plog.Warningf("failed to encode members response (%v)", err) + } + } +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/membership/cluster.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/cluster.go new file mode 100644 index 000000000..b1a011b50 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/membership/cluster.go @@ -0,0 +1,840 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +import ( + "bytes" + "context" + "crypto/sha1" + "encoding/binary" + "encoding/json" + "fmt" + "path" + "sort" + "strings" + "sync" + "time" + + "go.etcd.io/etcd/etcdserver/api/v2store" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/pkg/netutil" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft" + "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/version" + + "github.com/coreos/go-semver/semver" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" +) + +const maxLearners = 1 + +// RaftCluster is a list of Members that belong to the same raft cluster +type RaftCluster struct { + lg *zap.Logger + + localID types.ID + cid types.ID + token string + + v2store v2store.Store + be backend.Backend + + sync.Mutex // guards the fields below + version *semver.Version + members map[types.ID]*Member + // removed contains the ids of removed members in the cluster. + // removed id cannot be reused. + removed map[types.ID]bool +} + +// ConfigChangeContext represents a context for confChange. +type ConfigChangeContext struct { + Member + // IsPromote indicates if the config change is for promoting a learner member. + // This flag is needed because both adding a new member and promoting a learner member + // uses the same config change type 'ConfChangeAddNode'. + IsPromote bool `json:"isPromote"` +} + +// NewClusterFromURLsMap creates a new raft cluster using provided urls map. Currently, it does not support creating +// cluster with raft learner member. +func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) (*RaftCluster, error) { + c := NewCluster(lg, token) + for name, urls := range urlsmap { + m := NewMember(name, urls, token, nil) + if _, ok := c.members[m.ID]; ok { + return nil, fmt.Errorf("member exists with identical ID %v", m) + } + if uint64(m.ID) == raft.None { + return nil, fmt.Errorf("cannot use %x as member id", raft.None) + } + c.members[m.ID] = m + } + c.genID() + return c, nil +} + +func NewClusterFromMembers(lg *zap.Logger, token string, id types.ID, membs []*Member) *RaftCluster { + c := NewCluster(lg, token) + c.cid = id + for _, m := range membs { + c.members[m.ID] = m + } + return c +} + +func NewCluster(lg *zap.Logger, token string) *RaftCluster { + return &RaftCluster{ + lg: lg, + token: token, + members: make(map[types.ID]*Member), + removed: make(map[types.ID]bool), + } +} + +func (c *RaftCluster) ID() types.ID { return c.cid } + +func (c *RaftCluster) Members() []*Member { + c.Lock() + defer c.Unlock() + var ms MembersByID + for _, m := range c.members { + ms = append(ms, m.Clone()) + } + sort.Sort(ms) + return []*Member(ms) +} + +func (c *RaftCluster) Member(id types.ID) *Member { + c.Lock() + defer c.Unlock() + return c.members[id].Clone() +} + +func (c *RaftCluster) VotingMembers() []*Member { + c.Lock() + defer c.Unlock() + var ms MembersByID + for _, m := range c.members { + if !m.IsLearner { + ms = append(ms, m.Clone()) + } + } + sort.Sort(ms) + return []*Member(ms) +} + +// MemberByName returns a Member with the given name if exists. +// If more than one member has the given name, it will panic. +func (c *RaftCluster) MemberByName(name string) *Member { + c.Lock() + defer c.Unlock() + var memb *Member + for _, m := range c.members { + if m.Name == name { + if memb != nil { + if c.lg != nil { + c.lg.Panic("two member with same name found", zap.String("name", name)) + } else { + plog.Panicf("two members with the given name %q exist", name) + } + } + memb = m + } + } + return memb.Clone() +} + +func (c *RaftCluster) MemberIDs() []types.ID { + c.Lock() + defer c.Unlock() + var ids []types.ID + for _, m := range c.members { + ids = append(ids, m.ID) + } + sort.Sort(types.IDSlice(ids)) + return ids +} + +func (c *RaftCluster) IsIDRemoved(id types.ID) bool { + c.Lock() + defer c.Unlock() + return c.removed[id] +} + +// PeerURLs returns a list of all peer addresses. +// The returned list is sorted in ascending lexicographical order. +func (c *RaftCluster) PeerURLs() []string { + c.Lock() + defer c.Unlock() + urls := make([]string, 0) + for _, p := range c.members { + urls = append(urls, p.PeerURLs...) + } + sort.Strings(urls) + return urls +} + +// ClientURLs returns a list of all client addresses. +// The returned list is sorted in ascending lexicographical order. +func (c *RaftCluster) ClientURLs() []string { + c.Lock() + defer c.Unlock() + urls := make([]string, 0) + for _, p := range c.members { + urls = append(urls, p.ClientURLs...) + } + sort.Strings(urls) + return urls +} + +func (c *RaftCluster) String() string { + c.Lock() + defer c.Unlock() + b := &bytes.Buffer{} + fmt.Fprintf(b, "{ClusterID:%s ", c.cid) + var ms []string + for _, m := range c.members { + ms = append(ms, fmt.Sprintf("%+v", m)) + } + fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " ")) + var ids []string + for id := range c.removed { + ids = append(ids, id.String()) + } + fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " ")) + return b.String() +} + +func (c *RaftCluster) genID() { + mIDs := c.MemberIDs() + b := make([]byte, 8*len(mIDs)) + for i, id := range mIDs { + binary.BigEndian.PutUint64(b[8*i:], uint64(id)) + } + hash := sha1.Sum(b) + c.cid = types.ID(binary.BigEndian.Uint64(hash[:8])) +} + +func (c *RaftCluster) SetID(localID, cid types.ID) { + c.localID = localID + c.cid = cid +} + +func (c *RaftCluster) SetStore(st v2store.Store) { c.v2store = st } + +func (c *RaftCluster) SetBackend(be backend.Backend) { + c.be = be + mustCreateBackendBuckets(c.be) +} + +func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) { + c.Lock() + defer c.Unlock() + + c.members, c.removed = membersFromStore(c.lg, c.v2store) + c.version = clusterVersionFromStore(c.lg, c.v2store) + mustDetectDowngrade(c.lg, c.version) + onSet(c.lg, c.version) + + for _, m := range c.members { + if c.lg != nil { + c.lg.Info( + "recovered/added member from store", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("recovered-remote-peer-id", m.ID.String()), + zap.Strings("recovered-remote-peer-urls", m.PeerURLs), + ) + } else { + plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.cid) + } + } + if c.version != nil { + if c.lg != nil { + c.lg.Info( + "set cluster version from store", + zap.String("cluster-version", version.Cluster(c.version.String())), + ) + } else { + plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String())) + } + } +} + +// ValidateConfigurationChange takes a proposed ConfChange and +// ensures that it is still valid. +func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { + members, removed := membersFromStore(c.lg, c.v2store) + id := types.ID(cc.NodeID) + if removed[id] { + return ErrIDRemoved + } + switch cc.Type { + case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode: + confChangeContext := new(ConfigChangeContext) + if err := json.Unmarshal(cc.Context, confChangeContext); err != nil { + if c.lg != nil { + c.lg.Panic("failed to unmarshal confChangeContext", zap.Error(err)) + } else { + plog.Panicf("unmarshal confChangeContext should never fail: %v", err) + } + } + + if confChangeContext.IsPromote { // promoting a learner member to voting member + if members[id] == nil { + return ErrIDNotFound + } + if !members[id].IsLearner { + return ErrMemberNotLearner + } + } else { // adding a new member + if members[id] != nil { + return ErrIDExists + } + + urls := make(map[string]bool) + for _, m := range members { + for _, u := range m.PeerURLs { + urls[u] = true + } + } + for _, u := range confChangeContext.Member.PeerURLs { + if urls[u] { + return ErrPeerURLexists + } + } + + if confChangeContext.Member.IsLearner { // the new member is a learner + numLearners := 0 + for _, m := range members { + if m.IsLearner { + numLearners++ + } + } + if numLearners+1 > maxLearners { + return ErrTooManyLearners + } + } + } + case raftpb.ConfChangeRemoveNode: + if members[id] == nil { + return ErrIDNotFound + } + + case raftpb.ConfChangeUpdateNode: + if members[id] == nil { + return ErrIDNotFound + } + urls := make(map[string]bool) + for _, m := range members { + if m.ID == id { + continue + } + for _, u := range m.PeerURLs { + urls[u] = true + } + } + m := new(Member) + if err := json.Unmarshal(cc.Context, m); err != nil { + if c.lg != nil { + c.lg.Panic("failed to unmarshal member", zap.Error(err)) + } else { + plog.Panicf("unmarshal member should never fail: %v", err) + } + } + for _, u := range m.PeerURLs { + if urls[u] { + return ErrPeerURLexists + } + } + + default: + if c.lg != nil { + c.lg.Panic("unknown ConfChange type", zap.String("type", cc.Type.String())) + } else { + plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode") + } + } + return nil +} + +// AddMember adds a new Member into the cluster, and saves the given member's +// raftAttributes into the store. The given member should have empty attributes. +// A Member with a matching id must not exist. +func (c *RaftCluster) AddMember(m *Member) { + c.Lock() + defer c.Unlock() + if c.v2store != nil { + mustSaveMemberToStore(c.v2store, m) + } + if c.be != nil { + mustSaveMemberToBackend(c.be, m) + } + + c.members[m.ID] = m + + if c.lg != nil { + c.lg.Info( + "added member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("added-peer-id", m.ID.String()), + zap.Strings("added-peer-peer-urls", m.PeerURLs), + ) + } else { + plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.cid) + } +} + +// RemoveMember removes a member from the store. +// The given id MUST exist, or the function panics. +func (c *RaftCluster) RemoveMember(id types.ID) { + c.Lock() + defer c.Unlock() + if c.v2store != nil { + mustDeleteMemberFromStore(c.v2store, id) + } + if c.be != nil { + mustDeleteMemberFromBackend(c.be, id) + } + + m, ok := c.members[id] + delete(c.members, id) + c.removed[id] = true + + if c.lg != nil { + if ok { + c.lg.Info( + "removed member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("removed-remote-peer-id", id.String()), + zap.Strings("removed-remote-peer-urls", m.PeerURLs), + ) + } else { + c.lg.Warn( + "skipped removing already removed member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("removed-remote-peer-id", id.String()), + ) + } + } else { + plog.Infof("removed member %s from cluster %s", id, c.cid) + } +} + +func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) { + c.Lock() + defer c.Unlock() + + if m, ok := c.members[id]; ok { + m.Attributes = attr + if c.v2store != nil { + mustUpdateMemberAttrInStore(c.v2store, m) + } + if c.be != nil { + mustSaveMemberToBackend(c.be, m) + } + return + } + + _, ok := c.removed[id] + if !ok { + if c.lg != nil { + c.lg.Panic( + "failed to update; member unknown", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("unknown-remote-peer-id", id.String()), + ) + } else { + plog.Panicf("error updating attributes of unknown member %s", id) + } + } + + if c.lg != nil { + c.lg.Warn( + "skipped attributes update of removed member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("updated-peer-id", id.String()), + ) + } else { + plog.Warningf("skipped updating attributes of removed member %s", id) + } +} + +// PromoteMember marks the member's IsLearner RaftAttributes to false. +func (c *RaftCluster) PromoteMember(id types.ID) { + c.Lock() + defer c.Unlock() + + c.members[id].RaftAttributes.IsLearner = false + if c.v2store != nil { + mustUpdateMemberInStore(c.v2store, c.members[id]) + } + if c.be != nil { + mustSaveMemberToBackend(c.be, c.members[id]) + } + + if c.lg != nil { + c.lg.Info( + "promote member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + ) + } else { + plog.Noticef("promote member %s in cluster %s", id, c.cid) + } +} + +func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) { + c.Lock() + defer c.Unlock() + + c.members[id].RaftAttributes = raftAttr + if c.v2store != nil { + mustUpdateMemberInStore(c.v2store, c.members[id]) + } + if c.be != nil { + mustSaveMemberToBackend(c.be, c.members[id]) + } + + if c.lg != nil { + c.lg.Info( + "updated member", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("updated-remote-peer-id", id.String()), + zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs), + ) + } else { + plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.cid) + } +} + +func (c *RaftCluster) Version() *semver.Version { + c.Lock() + defer c.Unlock() + if c.version == nil { + return nil + } + return semver.Must(semver.NewVersion(c.version.String())) +} + +func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *semver.Version)) { + c.Lock() + defer c.Unlock() + if c.version != nil { + if c.lg != nil { + c.lg.Info( + "updated cluster version", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("from", version.Cluster(c.version.String())), + zap.String("from", version.Cluster(ver.String())), + ) + } else { + plog.Noticef("updated the cluster version from %v to %v", version.Cluster(c.version.String()), version.Cluster(ver.String())) + } + } else { + if c.lg != nil { + c.lg.Info( + "set initial cluster version", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + zap.String("cluster-version", version.Cluster(ver.String())), + ) + } else { + plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String())) + } + } + oldVer := c.version + c.version = ver + mustDetectDowngrade(c.lg, c.version) + if c.v2store != nil { + mustSaveClusterVersionToStore(c.v2store, ver) + } + if c.be != nil { + mustSaveClusterVersionToBackend(c.be, ver) + } + if oldVer != nil { + ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(oldVer.String())}).Set(0) + } + ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(ver.String())}).Set(1) + onSet(c.lg, ver) +} + +func (c *RaftCluster) IsReadyToAddVotingMember() bool { + nmembers := 1 + nstarted := 0 + + for _, member := range c.VotingMembers() { + if member.IsStarted() { + nstarted++ + } + nmembers++ + } + + if nstarted == 1 && nmembers == 2 { + // a case of adding a new node to 1-member cluster for restoring cluster data + // https://github.com/etcd-io/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster + if c.lg != nil { + c.lg.Debug("number of started member is 1; can accept add member request") + } else { + plog.Debugf("The number of started member is 1. This cluster can accept add member request.") + } + return true + } + + nquorum := nmembers/2 + 1 + if nstarted < nquorum { + if c.lg != nil { + c.lg.Warn( + "rejecting member add; started member will be less than quorum", + zap.Int("number-of-started-member", nstarted), + zap.Int("quorum", nquorum), + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + ) + } else { + plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum) + } + return false + } + + return true +} + +func (c *RaftCluster) IsReadyToRemoveVotingMember(id uint64) bool { + nmembers := 0 + nstarted := 0 + + for _, member := range c.VotingMembers() { + if uint64(member.ID) == id { + continue + } + + if member.IsStarted() { + nstarted++ + } + nmembers++ + } + + nquorum := nmembers/2 + 1 + if nstarted < nquorum { + if c.lg != nil { + c.lg.Warn( + "rejecting member remove; started member will be less than quorum", + zap.Int("number-of-started-member", nstarted), + zap.Int("quorum", nquorum), + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + ) + } else { + plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum) + } + return false + } + + return true +} + +func (c *RaftCluster) IsReadyToPromoteMember(id uint64) bool { + nmembers := 1 + nstarted := 0 + + for _, member := range c.VotingMembers() { + if member.IsStarted() { + nstarted++ + } + nmembers++ + } + + nquorum := nmembers/2 + 1 + if nstarted < nquorum { + if c.lg != nil { + c.lg.Warn( + "rejecting member promote; started member will be less than quorum", + zap.Int("number-of-started-member", nstarted), + zap.Int("quorum", nquorum), + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + ) + } else { + plog.Warningf("Reject promote member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum) + } + return false + } + + return true +} + +func membersFromStore(lg *zap.Logger, st v2store.Store) (map[types.ID]*Member, map[types.ID]bool) { + members := make(map[types.ID]*Member) + removed := make(map[types.ID]bool) + e, err := st.Get(StoreMembersPrefix, true, true) + if err != nil { + if isKeyNotFound(err) { + return members, removed + } + if lg != nil { + lg.Panic("failed to get members from store", zap.String("path", StoreMembersPrefix), zap.Error(err)) + } else { + plog.Panicf("get storeMembers should never fail: %v", err) + } + } + for _, n := range e.Node.Nodes { + var m *Member + m, err = nodeToMember(n) + if err != nil { + if lg != nil { + lg.Panic("failed to nodeToMember", zap.Error(err)) + } else { + plog.Panicf("nodeToMember should never fail: %v", err) + } + } + members[m.ID] = m + } + + e, err = st.Get(storeRemovedMembersPrefix, true, true) + if err != nil { + if isKeyNotFound(err) { + return members, removed + } + if lg != nil { + lg.Panic( + "failed to get removed members from store", + zap.String("path", storeRemovedMembersPrefix), + zap.Error(err), + ) + } else { + plog.Panicf("get storeRemovedMembers should never fail: %v", err) + } + } + for _, n := range e.Node.Nodes { + removed[MustParseMemberIDFromKey(n.Key)] = true + } + return members, removed +} + +func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version { + e, err := st.Get(path.Join(storePrefix, "version"), false, false) + if err != nil { + if isKeyNotFound(err) { + return nil + } + if lg != nil { + lg.Panic( + "failed to get cluster version from store", + zap.String("path", path.Join(storePrefix, "version")), + zap.Error(err), + ) + } else { + plog.Panicf("unexpected error (%v) when getting cluster version from store", err) + } + } + return semver.Must(semver.NewVersion(*e.Node.Value)) +} + +// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs +// with the existing cluster. If the validation succeeds, it assigns the IDs +// from the existing cluster to the local cluster. +// If the validation fails, an error will be returned. +func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *RaftCluster) error { + ems := existing.Members() + lms := local.Members() + if len(ems) != len(lms) { + return fmt.Errorf("member count is unequal") + } + sort.Sort(MembersByPeerURLs(ems)) + sort.Sort(MembersByPeerURLs(lms)) + + ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + defer cancel() + for i := range ems { + if ok, err := netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[i].PeerURLs); !ok { + return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err) + } + lms[i].ID = ems[i].ID + } + local.members = make(map[types.ID]*Member) + for _, m := range lms { + local.members[m.ID] = m + } + return nil +} + +func mustDetectDowngrade(lg *zap.Logger, cv *semver.Version) { + lv := semver.Must(semver.NewVersion(version.Version)) + // only keep major.minor version for comparison against cluster version + lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} + if cv != nil && lv.LessThan(*cv) { + if lg != nil { + lg.Fatal( + "invalid downgrade; server version is lower than determined cluster version", + zap.String("current-server-version", version.Version), + zap.String("determined-cluster-version", version.Cluster(cv.String())), + ) + } else { + plog.Fatalf("cluster cannot be downgraded (current version: %s is lower than determined cluster version: %s).", version.Version, version.Cluster(cv.String())) + } + } +} + +// IsLocalMemberLearner returns if the local member is raft learner +func (c *RaftCluster) IsLocalMemberLearner() bool { + c.Lock() + defer c.Unlock() + localMember, ok := c.members[c.localID] + if !ok { + if c.lg != nil { + c.lg.Panic( + "failed to find local ID in cluster members", + zap.String("cluster-id", c.cid.String()), + zap.String("local-member-id", c.localID.String()), + ) + } else { + plog.Panicf("failed to find local ID %s in cluster %s", c.localID.String(), c.cid.String()) + } + } + return localMember.IsLearner +} + +// IsMemberExist returns if the member with the given id exists in cluster. +func (c *RaftCluster) IsMemberExist(id types.ID) bool { + c.Lock() + defer c.Unlock() + _, ok := c.members[id] + return ok +} + +// VotingMemberIDs returns the ID of voting members in cluster. +func (c *RaftCluster) VotingMemberIDs() []types.ID { + c.Lock() + defer c.Unlock() + var ids []types.ID + for _, m := range c.members { + if !m.IsLearner { + ids = append(ids, m.ID) + } + } + sort.Sort(types.IDSlice(ids)) + return ids +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/membership/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/membership/doc.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/errors.go similarity index 55% rename from vendor/github.com/coreos/etcd/etcdserver/membership/errors.go rename to vendor/go.etcd.io/etcd/etcdserver/api/membership/errors.go index e4d36af25..8f6fe504e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/membership/errors.go @@ -17,17 +17,19 @@ package membership import ( "errors" - etcdErr "github.com/coreos/etcd/error" + "go.etcd.io/etcd/etcdserver/api/v2error" ) var ( - ErrIDRemoved = errors.New("membership: ID removed") - ErrIDExists = errors.New("membership: ID exists") - ErrIDNotFound = errors.New("membership: ID not found") - ErrPeerURLexists = errors.New("membership: peerURL exists") + ErrIDRemoved = errors.New("membership: ID removed") + ErrIDExists = errors.New("membership: ID exists") + ErrIDNotFound = errors.New("membership: ID not found") + ErrPeerURLexists = errors.New("membership: peerURL exists") + ErrMemberNotLearner = errors.New("membership: can only promote a learner member") + ErrTooManyLearners = errors.New("membership: too many learner members in cluster") ) func isKeyNotFound(err error) bool { - e, ok := err.(*etcdErr.Error) - return ok && e.ErrorCode == etcdErr.EcodeKeyNotFound + e, ok := err.(*v2error.Error) + return ok && e.ErrorCode == v2error.EcodeKeyNotFound } diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/member.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/member.go similarity index 76% rename from vendor/github.com/coreos/etcd/etcdserver/membership/member.go rename to vendor/go.etcd.io/etcd/etcdserver/api/membership/member.go index 6de74d26f..896cb36aa 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/member.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/membership/member.go @@ -22,12 +22,12 @@ import ( "sort" "time" - "github.com/coreos/etcd/pkg/types" "github.com/coreos/pkg/capnslog" + "go.etcd.io/etcd/pkg/types" ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/membership") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd/v3", "etcdserver/membership") ) // RaftAttributes represents the raft related attributes of an etcd member. @@ -35,6 +35,8 @@ type RaftAttributes struct { // PeerURLs is the list of peers in the raft cluster. // TODO(philips): ensure these are URLs PeerURLs []string `json:"peerURLs"` + // IsLearner indicates if the member is raft learner. + IsLearner bool `json:"isLearner,omitempty"` } // Attributes represents all the non-raft related attributes of an etcd member. @@ -52,9 +54,22 @@ type Member struct { // NewMember creates a Member without an ID and generates one based on the // cluster name, peer URLs, and time. This is used for bootstrapping/adding new member. func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { + return newMember(name, peerURLs, clusterName, now, false) +} + +// NewMemberAsLearner creates a learner Member without an ID and generates one based on the +// cluster name, peer URLs, and time. This is used for adding new learner member. +func NewMemberAsLearner(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member { + return newMember(name, peerURLs, clusterName, now, true) +} + +func newMember(name string, peerURLs types.URLs, clusterName string, now *time.Time, isLearner bool) *Member { m := &Member{ - RaftAttributes: RaftAttributes{PeerURLs: peerURLs.StringSlice()}, - Attributes: Attributes{Name: name}, + RaftAttributes: RaftAttributes{ + PeerURLs: peerURLs.StringSlice(), + IsLearner: isLearner, + }, + Attributes: Attributes{Name: name}, } var b []byte @@ -77,7 +92,7 @@ func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.T // It will panic if there is no PeerURLs available in Member. func (m *Member) PickPeerURL() string { if len(m.PeerURLs) == 0 { - plog.Panicf("member should always have some peer url") + panic("member should always have some peer url") } return m.PeerURLs[rand.Intn(len(m.PeerURLs))] } @@ -88,6 +103,9 @@ func (m *Member) Clone() *Member { } mm := &Member{ ID: m.ID, + RaftAttributes: RaftAttributes{ + IsLearner: m.IsLearner, + }, Attributes: Attributes{ Name: m.Name, }, diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/metrics.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/membership/metrics.go rename to vendor/go.etcd.io/etcd/etcdserver/api/membership/metrics.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/store.go similarity index 84% rename from vendor/github.com/coreos/etcd/etcdserver/membership/store.go rename to vendor/go.etcd.io/etcd/etcdserver/api/membership/store.go index d3f8f2474..14ab1190e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/membership/store.go @@ -19,9 +19,9 @@ import ( "fmt" "path" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/store" + "go.etcd.io/etcd/etcdserver/api/v2store" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/pkg/types" "github.com/coreos/go-semver/semver" ) @@ -75,57 +75,57 @@ func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) { tx.UnsafePut(clusterBucketName, ckey, []byte(ver.String())) } -func mustSaveMemberToStore(s store.Store, m *Member) { +func mustSaveMemberToStore(s v2store.Store, m *Member) { b, err := json.Marshal(m.RaftAttributes) if err != nil { plog.Panicf("marshal raftAttributes should never fail: %v", err) } p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) - if _, err := s.Create(p, false, string(b), false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { + if _, err := s.Create(p, false, string(b), false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { plog.Panicf("create raftAttributes should never fail: %v", err) } } -func mustDeleteMemberFromStore(s store.Store, id types.ID) { +func mustDeleteMemberFromStore(s v2store.Store, id types.ID) { if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil { plog.Panicf("delete member should never fail: %v", err) } - if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { + if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { plog.Panicf("create removedMember should never fail: %v", err) } } -func mustUpdateMemberInStore(s store.Store, m *Member) { +func mustUpdateMemberInStore(s v2store.Store, m *Member) { b, err := json.Marshal(m.RaftAttributes) if err != nil { plog.Panicf("marshal raftAttributes should never fail: %v", err) } p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) - if _, err := s.Update(p, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { + if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { plog.Panicf("update raftAttributes should never fail: %v", err) } } -func mustUpdateMemberAttrInStore(s store.Store, m *Member) { +func mustUpdateMemberAttrInStore(s v2store.Store, m *Member) { b, err := json.Marshal(m.Attributes) if err != nil { plog.Panicf("marshal raftAttributes should never fail: %v", err) } p := path.Join(MemberStoreKey(m.ID), attributesSuffix) - if _, err := s.Set(p, false, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { + if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { plog.Panicf("update raftAttributes should never fail: %v", err) } } -func mustSaveClusterVersionToStore(s store.Store, ver *semver.Version) { - if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil { +func mustSaveClusterVersionToStore(s v2store.Store, ver *semver.Version) { + if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { plog.Panicf("save cluster version should never fail: %v", err) } } // nodeToMember builds member from a key value node. // the child nodes of the given node MUST be sorted by key. -func nodeToMember(n *store.NodeExtern) (*Member, error) { +func nodeToMember(n *v2store.NodeExtern) (*Member, error) { m := &Member{ID: MustParseMemberIDFromKey(n.Key)} attrs := make(map[string][]byte) raftAttrKey := path.Join(n.Key, raftAttributesSuffix) diff --git a/vendor/github.com/coreos/etcd/rafthttp/coder.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/coder.go similarity index 95% rename from vendor/github.com/coreos/etcd/rafthttp/coder.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/coder.go index 86ede972e..12c3e4424 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/coder.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/coder.go @@ -14,7 +14,7 @@ package rafthttp -import "github.com/coreos/etcd/raft/raftpb" +import "go.etcd.io/etcd/raft/raftpb" type encoder interface { // encode encodes the given message to an output stream. diff --git a/vendor/github.com/coreos/etcd/rafthttp/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/rafthttp/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/doc.go diff --git a/vendor/github.com/coreos/etcd/rafthttp/http.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/http.go similarity index 53% rename from vendor/github.com/coreos/etcd/rafthttp/http.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/http.go index fbb96d48b..d0e0c81e2 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/http.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/http.go @@ -24,12 +24,14 @@ import ( "strings" "time" - pioutil "github.com/coreos/etcd/pkg/ioutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/version" - "github.com/dustin/go-humanize" + "go.etcd.io/etcd/etcdserver/api/snap" + pioutil "go.etcd.io/etcd/pkg/ioutil" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/version" + + humanize "github.com/dustin/go-humanize" + "go.uber.org/zap" ) const ( @@ -61,9 +63,11 @@ type writerToResponse interface { } type pipelineHandler struct { - tr Transporter - r Raft - cid types.ID + lg *zap.Logger + localID types.ID + tr Transporter + r Raft + cid types.ID } // newPipelineHandler returns a handler for handling raft messages @@ -71,11 +75,13 @@ type pipelineHandler struct { // // The handler reads out the raft message from request body, // and forwards it to the given raft state machine for processing. -func newPipelineHandler(tr Transporter, r Raft, cid types.ID) http.Handler { +func newPipelineHandler(t *Transport, r Raft, cid types.ID) http.Handler { return &pipelineHandler{ - tr: tr, - r: r, - cid: cid, + lg: t.Logger, + localID: t.ID, + tr: t, + r: r, + cid: cid, } } @@ -88,7 +94,7 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) - if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil { + if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil { http.Error(w, err.Error(), http.StatusPreconditionFailed) return } @@ -100,7 +106,15 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte) b, err := ioutil.ReadAll(limitedr) if err != nil { - plog.Errorf("failed to read raft message (%v)", err) + if h.lg != nil { + h.lg.Warn( + "failed to read Raft message", + zap.String("local-member-id", h.localID.String()), + zap.Error(err), + ) + } else { + plog.Errorf("failed to read raft message (%v)", err) + } http.Error(w, "error reading raft message", http.StatusBadRequest) recvFailures.WithLabelValues(r.RemoteAddr).Inc() return @@ -108,8 +122,16 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var m raftpb.Message if err := m.Unmarshal(b); err != nil { - plog.Errorf("failed to unmarshal raft message (%v)", err) - http.Error(w, "error unmarshaling raft message", http.StatusBadRequest) + if h.lg != nil { + h.lg.Warn( + "failed to unmarshal Raft message", + zap.String("local-member-id", h.localID.String()), + zap.Error(err), + ) + } else { + plog.Errorf("failed to unmarshal raft message (%v)", err) + } + http.Error(w, "error unmarshalling raft message", http.StatusBadRequest) recvFailures.WithLabelValues(r.RemoteAddr).Inc() return } @@ -121,7 +143,15 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { case writerToResponse: v.WriteTo(w) default: - plog.Warningf("failed to process raft message (%v)", err) + if h.lg != nil { + h.lg.Warn( + "failed to process Raft message", + zap.String("local-member-id", h.localID.String()), + zap.Error(err), + ) + } else { + plog.Warningf("failed to process raft message (%v)", err) + } http.Error(w, "error processing raft message", http.StatusInternalServerError) w.(http.Flusher).Flush() // disconnect the http stream @@ -136,17 +166,22 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type snapshotHandler struct { + lg *zap.Logger tr Transporter r Raft snapshotter *snap.Snapshotter - cid types.ID + + localID types.ID + cid types.ID } -func newSnapshotHandler(tr Transporter, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler { +func newSnapshotHandler(t *Transport, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler { return &snapshotHandler{ - tr: tr, + lg: t.Logger, + tr: t, r: r, snapshotter: snapshotter, + localID: t.ID, cid: cid, } } @@ -174,7 +209,7 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) - if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil { + if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil { http.Error(w, err.Error(), http.StatusPreconditionFailed) snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc() return @@ -188,19 +223,36 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { from := types.ID(m.From).String() if err != nil { msg := fmt.Sprintf("failed to decode raft message (%v)", err) - plog.Errorf(msg) + if h.lg != nil { + h.lg.Warn( + "failed to decode Raft message", + zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.Error(err), + ) + } else { + plog.Error(msg) + } http.Error(w, msg, http.StatusBadRequest) recvFailures.WithLabelValues(r.RemoteAddr).Inc() snapshotReceiveFailures.WithLabelValues(from).Inc() return } - msgSizeVal := m.Size() - msgSize := humanize.Bytes(uint64(msgSizeVal)) - receivedBytes.WithLabelValues(from).Add(float64(msgSizeVal)) + msgSize := m.Size() + receivedBytes.WithLabelValues(from).Add(float64(msgSize)) if m.Type != raftpb.MsgSnap { - plog.Errorf("unexpected raft message type %s on snapshot path", m.Type) + if h.lg != nil { + h.lg.Warn( + "unexpected Raft message type", + zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.String("message-type", m.Type.String()), + ) + } else { + plog.Errorf("unexpected raft message type %s on snapshot path", m.Type) + } http.Error(w, "wrong raft message type", http.StatusBadRequest) snapshotReceiveFailures.WithLabelValues(from).Inc() return @@ -210,21 +262,54 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { defer func() { snapshotReceiveInflights.WithLabelValues(from).Dec() }() - plog.Infof("receiving database snapshot [index: %d, from: %s, raft message size: %s]", m.Snapshot.Metadata.Index, types.ID(m.From), msgSize) + + if h.lg != nil { + h.lg.Info( + "receiving database snapshot", + zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index), + zap.Int("incoming-snapshot-message-size-bytes", msgSize), + zap.String("incoming-snapshot-message-size", humanize.Bytes(uint64(msgSize))), + ) + } else { + plog.Infof("receiving database snapshot [index:%d, from %s] ...", m.Snapshot.Metadata.Index, types.ID(m.From)) + } + // save incoming database snapshot. n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index) if err != nil { msg := fmt.Sprintf("failed to save KV snapshot (%v)", err) - plog.Error(msg) + if h.lg != nil { + h.lg.Warn( + "failed to save incoming database snapshot", + zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index), + zap.Error(err), + ) + } else { + plog.Error(msg) + } http.Error(w, msg, http.StatusInternalServerError) snapshotReceiveFailures.WithLabelValues(from).Inc() return } - downloadTook := time.Since(start) - dbSize := humanize.Bytes(uint64(n)) receivedBytes.WithLabelValues(from).Add(float64(n)) - plog.Infof("successfully received and saved database snapshot [index: %d, from: %s, raft message size: %s, db size: %s, took: %s]", m.Snapshot.Metadata.Index, types.ID(m.From), msgSize, dbSize, downloadTook) + + if h.lg != nil { + h.lg.Info( + "received and saved database snapshot", + zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index), + zap.Int64("incoming-snapshot-size-bytes", n), + zap.String("incoming-snapshot-size", humanize.Bytes(uint64(n))), + ) + } else { + plog.Infof("received and saved database snapshot [index: %d, from: %s] successfully", m.Snapshot.Metadata.Index, types.ID(m.From)) + } if err := h.r.Process(context.TODO(), m); err != nil { switch v := err.(type) { @@ -234,12 +319,22 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { v.WriteTo(w) default: msg := fmt.Sprintf("failed to process raft message (%v)", err) - plog.Warningf(msg) + if h.lg != nil { + h.lg.Warn( + "failed to process Raft message", + zap.String("local-member-id", h.localID.String()), + zap.String("remote-snapshot-sender-id", from), + zap.Error(err), + ) + } else { + plog.Error(msg) + } http.Error(w, msg, http.StatusInternalServerError) snapshotReceiveFailures.WithLabelValues(from).Inc() } return } + // Write StatusNoContent header after the message has been processed by // raft, which facilitates the client to report MsgSnap status. w.WriteHeader(http.StatusNoContent) @@ -249,6 +344,7 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type streamHandler struct { + lg *zap.Logger tr *Transport peerGetter peerGetter r Raft @@ -256,9 +352,10 @@ type streamHandler struct { cid types.ID } -func newStreamHandler(tr *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler { +func newStreamHandler(t *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler { return &streamHandler{ - tr: tr, + lg: t.Logger, + tr: t, peerGetter: pg, r: r, id: id, @@ -276,7 +373,7 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header().Set("X-Server-Version", version.Version) w.Header().Set("X-Etcd-Cluster-ID", h.cid.String()) - if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil { + if err := checkClusterCompatibilityFromHeader(h.lg, h.tr.ID, r.Header, h.cid); err != nil { http.Error(w, err.Error(), http.StatusPreconditionFailed) return } @@ -288,7 +385,16 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { case streamTypeMessage.endpoint(): t = streamTypeMessage default: - plog.Debugf("ignored unexpected streaming request path %s", r.URL.Path) + if h.lg != nil { + h.lg.Debug( + "ignored unexpected streaming request path", + zap.String("local-member-id", h.tr.ID.String()), + zap.String("remote-peer-id-stream-handler", h.id.String()), + zap.String("path", r.URL.Path), + ) + } else { + plog.Debugf("ignored unexpected streaming request path %s", r.URL.Path) + } http.Error(w, "invalid path", http.StatusNotFound) return } @@ -296,12 +402,31 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { fromStr := path.Base(r.URL.Path) from, err := types.IDFromString(fromStr) if err != nil { - plog.Errorf("failed to parse from %s into ID (%v)", fromStr, err) + if h.lg != nil { + h.lg.Warn( + "failed to parse path into ID", + zap.String("local-member-id", h.tr.ID.String()), + zap.String("remote-peer-id-stream-handler", h.id.String()), + zap.String("path", fromStr), + zap.Error(err), + ) + } else { + plog.Errorf("failed to parse from %s into ID (%v)", fromStr, err) + } http.Error(w, "invalid from", http.StatusNotFound) return } if h.r.IsIDRemoved(uint64(from)) { - plog.Warningf("rejected the stream from peer %s since it was removed", from) + if h.lg != nil { + h.lg.Warn( + "rejected stream from remote peer because it was removed", + zap.String("local-member-id", h.tr.ID.String()), + zap.String("remote-peer-id-stream-handler", h.id.String()), + zap.String("remote-peer-id-from", from.String()), + ) + } else { + plog.Warningf("rejected the stream from peer %s since it was removed", from) + } http.Error(w, "removed member", http.StatusGone) return } @@ -315,14 +440,35 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if urls := r.Header.Get("X-PeerURLs"); urls != "" { h.tr.AddRemote(from, strings.Split(urls, ",")) } - plog.Errorf("failed to find member %s in cluster %s", from, h.cid) + if h.lg != nil { + h.lg.Warn( + "failed to find remote peer in cluster", + zap.String("local-member-id", h.tr.ID.String()), + zap.String("remote-peer-id-stream-handler", h.id.String()), + zap.String("remote-peer-id-from", from.String()), + zap.String("cluster-id", h.cid.String()), + ) + } else { + plog.Errorf("failed to find member %s in cluster %s", from, h.cid) + } http.Error(w, "error sender not found", http.StatusNotFound) return } wto := h.id.String() if gto := r.Header.Get("X-Raft-To"); gto != wto { - plog.Errorf("streaming request ignored (ID mismatch got %s want %s)", gto, wto) + if h.lg != nil { + h.lg.Warn( + "ignored streaming request; ID mismatch", + zap.String("local-member-id", h.tr.ID.String()), + zap.String("remote-peer-id-stream-handler", h.id.String()), + zap.String("remote-peer-id-header", gto), + zap.String("remote-peer-id-from", from.String()), + zap.String("cluster-id", h.cid.String()), + ) + } else { + plog.Errorf("streaming request ignored (ID mismatch got %s want %s)", gto, wto) + } http.Error(w, "to field mismatch", http.StatusPreconditionFailed) return } @@ -336,6 +482,8 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { Writer: w, Flusher: w.(http.Flusher), Closer: c, + localID: h.tr.ID, + peerID: h.id, } p.attachOutgoingConn(conn) <-c.closeNotify() @@ -346,13 +494,66 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // It checks whether the version of local member is compatible with // the versions in the header, and whether the cluster ID of local member // matches the one in the header. -func checkClusterCompatibilityFromHeader(header http.Header, cid types.ID) error { - if err := checkVersionCompability(header.Get("X-Server-From"), serverVersion(header), minClusterVersion(header)); err != nil { - plog.Errorf("request version incompatibility (%v)", err) +func checkClusterCompatibilityFromHeader(lg *zap.Logger, localID types.ID, header http.Header, cid types.ID) error { + remoteName := header.Get("X-Server-From") + + remoteServer := serverVersion(header) + remoteVs := "" + if remoteServer != nil { + remoteVs = remoteServer.String() + } + + remoteMinClusterVer := minClusterVersion(header) + remoteMinClusterVs := "" + if remoteMinClusterVer != nil { + remoteMinClusterVs = remoteMinClusterVer.String() + } + + localServer, localMinCluster, err := checkVersionCompatibility(remoteName, remoteServer, remoteMinClusterVer) + + localVs := "" + if localServer != nil { + localVs = localServer.String() + } + localMinClusterVs := "" + if localMinCluster != nil { + localMinClusterVs = localMinCluster.String() + } + + if err != nil { + if lg != nil { + lg.Warn( + "failed to check version compatibility", + zap.String("local-member-id", localID.String()), + zap.String("local-member-cluster-id", cid.String()), + zap.String("local-member-server-version", localVs), + zap.String("local-member-server-minimum-cluster-version", localMinClusterVs), + zap.String("remote-peer-server-name", remoteName), + zap.String("remote-peer-server-version", remoteVs), + zap.String("remote-peer-server-minimum-cluster-version", remoteMinClusterVs), + zap.Error(err), + ) + } else { + plog.Errorf("request version incompatibility (%v)", err) + } return errIncompatibleVersion } if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() { - plog.Errorf("request cluster ID mismatch (got %s want %s)", gcid, cid) + if lg != nil { + lg.Warn( + "request cluster ID mismatch", + zap.String("local-member-id", localID.String()), + zap.String("local-member-cluster-id", cid.String()), + zap.String("local-member-server-version", localVs), + zap.String("local-member-server-minimum-cluster-version", localMinClusterVs), + zap.String("remote-peer-server-name", remoteName), + zap.String("remote-peer-server-version", remoteVs), + zap.String("remote-peer-server-minimum-cluster-version", remoteMinClusterVs), + zap.String("remote-peer-cluster-id", gcid), + ) + } else { + plog.Errorf("request cluster ID mismatch (got %s want %s)", gcid, cid) + } return errClusterIDMismatch } return nil diff --git a/vendor/github.com/coreos/etcd/rafthttp/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/metrics.go similarity index 83% rename from vendor/github.com/coreos/etcd/rafthttp/metrics.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/metrics.go index a5e49178a..02fff84be 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/metrics.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/metrics.go @@ -17,6 +17,24 @@ package rafthttp import "github.com/prometheus/client_golang/prometheus" var ( + activePeers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "active_peers", + Help: "The current number of active peer connections.", + }, + []string{"Local", "Remote"}, + ) + + disconnectedPeers = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "disconnected_peers_total", + Help: "The total number of disconnected peers.", + }, + []string{"Local", "Remote"}, + ) + sentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "etcd", Subsystem: "network", @@ -133,18 +151,23 @@ var ( []string{"From"}, ) - rtts = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + rttSec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "etcd", Subsystem: "network", Name: "peer_round_trip_time_seconds", - Help: "Round-Trip-Time histogram between peers.", - Buckets: prometheus.ExponentialBuckets(0.0001, 2, 14), + Help: "Round-Trip-Time histogram between peers", + + // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2 + // highest bucket start of 0.0001 sec * 2^15 == 3.2768 sec + Buckets: prometheus.ExponentialBuckets(0.0001, 2, 16), }, []string{"To"}, ) ) func init() { + prometheus.MustRegister(activePeers) + prometheus.MustRegister(disconnectedPeers) prometheus.MustRegister(sentBytes) prometheus.MustRegister(receivedBytes) prometheus.MustRegister(sentFailures) @@ -159,5 +182,5 @@ func init() { prometheus.MustRegister(snapshotReceiveFailures) prometheus.MustRegister(snapshotReceiveSeconds) - prometheus.MustRegister(rtts) + prometheus.MustRegister(rttSec) } diff --git a/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msg_codec.go similarity index 95% rename from vendor/github.com/coreos/etcd/rafthttp/msg_codec.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msg_codec.go index ef59bc888..2417d222e 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msg_codec.go @@ -19,8 +19,8 @@ import ( "errors" "io" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft/raftpb" + "go.etcd.io/etcd/pkg/pbutil" + "go.etcd.io/etcd/raft/raftpb" ) // messageEncoder is a encoder that can encode all kinds of messages. diff --git a/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msgappv2_codec.go similarity index 95% rename from vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msgappv2_codec.go index 013ffe7c7..1fa36deb3 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msgappv2_codec.go @@ -20,10 +20,10 @@ import ( "io" "time" - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" + stats "go.etcd.io/etcd/etcdserver/api/v2stats" + "go.etcd.io/etcd/pkg/pbutil" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft/raftpb" ) const ( @@ -86,12 +86,12 @@ func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error { start := time.Now() switch { case isLinkHeartbeatMessage(m): - enc.uint8buf[0] = byte(msgTypeLinkHeartbeat) + enc.uint8buf[0] = msgTypeLinkHeartbeat if _, err := enc.w.Write(enc.uint8buf); err != nil { return err } case enc.index == m.Index && enc.term == m.LogTerm && m.LogTerm == m.Term: - enc.uint8buf[0] = byte(msgTypeAppEntries) + enc.uint8buf[0] = msgTypeAppEntries if _, err := enc.w.Write(enc.uint8buf); err != nil { return err } @@ -179,7 +179,7 @@ func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) { if _, err := io.ReadFull(dec.r, dec.uint8buf); err != nil { return m, err } - typ = uint8(dec.uint8buf[0]) + typ = dec.uint8buf[0] switch typ { case msgTypeLinkHeartbeat: return linkHeartbeatMessage, nil diff --git a/vendor/github.com/coreos/etcd/rafthttp/peer.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer.go similarity index 72% rename from vendor/github.com/coreos/etcd/rafthttp/peer.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer.go index e9a25bb3a..8130c4a96 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/peer.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer.go @@ -19,12 +19,13 @@ import ( "sync" "time" - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" + "go.etcd.io/etcd/etcdserver/api/snap" + stats "go.etcd.io/etcd/etcdserver/api/v2stats" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft" + "go.etcd.io/etcd/raft/raftpb" + "go.uber.org/zap" "golang.org/x/time/rate" ) @@ -93,9 +94,13 @@ type Peer interface { // A pipeline is a series of http clients that send http requests to the remote. // It is only used when the stream has not been established. type peer struct { + lg *zap.Logger + + localID types.ID // id of the remote raft peer node id types.ID - r Raft + + r Raft status *peerStatus @@ -118,17 +123,27 @@ type peer struct { stopc chan struct{} } -func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer { - plog.Infof("starting peer %s...", peerID) - defer plog.Infof("started peer %s", peerID) +func startPeer(t *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer { + if t.Logger != nil { + t.Logger.Info("starting remote peer", zap.String("remote-peer-id", peerID.String())) + } else { + plog.Infof("starting peer %s...", peerID) + } + defer func() { + if t.Logger != nil { + t.Logger.Info("started remote peer", zap.String("remote-peer-id", peerID.String())) + } else { + plog.Infof("started peer %s", peerID) + } + }() - status := newPeerStatus(peerID) + status := newPeerStatus(t.Logger, t.ID, peerID) picker := newURLPicker(urls) - errorc := transport.ErrorC - r := transport.Raft + errorc := t.ErrorC + r := t.Raft pipeline := &pipeline{ peerID: peerID, - tr: transport, + tr: t, picker: picker, status: status, followerStats: fs, @@ -138,14 +153,16 @@ func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats pipeline.start() p := &peer{ + lg: t.Logger, + localID: t.ID, id: peerID, r: r, status: status, picker: picker, - msgAppV2Writer: startStreamWriter(peerID, status, fs, r), - writer: startStreamWriter(peerID, status, fs, r), + msgAppV2Writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r), + writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r), pipeline: pipeline, - snapSender: newSnapshotSender(transport, picker, peerID, status), + snapSender: newSnapshotSender(t, picker, peerID, status), recvc: make(chan raftpb.Message, recvBufSize), propc: make(chan raftpb.Message, maxPendingProposals), stopc: make(chan struct{}), @@ -158,7 +175,11 @@ func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats select { case mm := <-p.recvc: if err := r.Process(ctx, mm); err != nil { - plog.Warningf("failed to process raft message (%v)", err) + if t.Logger != nil { + t.Logger.Warn("failed to process Raft message", zap.Error(err)) + } else { + plog.Warningf("failed to process raft message (%v)", err) + } } case <-p.stopc: return @@ -183,24 +204,26 @@ func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats }() p.msgAppV2Reader = &streamReader{ + lg: t.Logger, peerID: peerID, typ: streamTypeMsgAppV2, - tr: transport, + tr: t, picker: picker, status: status, recvc: p.recvc, propc: p.propc, - rl: rate.NewLimiter(transport.DialRetryFrequency, 1), + rl: rate.NewLimiter(t.DialRetryFrequency, 1), } p.msgAppReader = &streamReader{ + lg: t.Logger, peerID: peerID, typ: streamTypeMessage, - tr: transport, + tr: t, picker: picker, status: status, recvc: p.recvc, propc: p.propc, - rl: rate.NewLimiter(transport.DialRetryFrequency, 1), + rl: rate.NewLimiter(t.DialRetryFrequency, 1), } p.msgAppV2Reader.start() @@ -227,9 +250,32 @@ func (p *peer) send(m raftpb.Message) { p.r.ReportSnapshot(m.To, raft.SnapshotFailure) } if p.status.isActive() { - plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name) + if p.lg != nil { + p.lg.Warn( + "dropped internal Raft message since sending buffer is full (overloaded network)", + zap.String("message-type", m.Type.String()), + zap.String("local-member-id", p.localID.String()), + zap.String("from", types.ID(m.From).String()), + zap.String("remote-peer-id", p.id.String()), + zap.Bool("remote-peer-active", p.status.isActive()), + ) + } else { + plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name) + } + } else { + if p.lg != nil { + p.lg.Warn( + "dropped internal Raft message since sending buffer is full (overloaded network)", + zap.String("message-type", m.Type.String()), + zap.String("local-member-id", p.localID.String()), + zap.String("from", types.ID(m.From).String()), + zap.String("remote-peer-id", p.id.String()), + zap.Bool("remote-peer-active", p.status.isActive()), + ) + } else { + plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name) + } } - plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name) sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() } } @@ -250,7 +296,11 @@ func (p *peer) attachOutgoingConn(conn *outgoingConn) { case streamTypeMessage: ok = p.writer.attach(conn) default: - plog.Panicf("unhandled stream type %s", conn.t) + if p.lg != nil { + p.lg.Panic("unknown stream type", zap.String("type", conn.t.String())) + } else { + plog.Panicf("unhandled stream type %s", conn.t) + } } if !ok { conn.Close() @@ -279,8 +329,19 @@ func (p *peer) Resume() { } func (p *peer) stop() { - plog.Infof("stopping peer %s...", p.id) - defer plog.Infof("stopped peer %s", p.id) + if p.lg != nil { + p.lg.Info("stopping remote peer", zap.String("remote-peer-id", p.id.String())) + } else { + plog.Infof("stopping peer %s...", p.id) + } + + defer func() { + if p.lg != nil { + p.lg.Info("stopped remote peer", zap.String("remote-peer-id", p.id.String())) + } else { + plog.Infof("stopped peer %s", p.id) + } + }() close(p.stopc) p.cancel() diff --git a/vendor/github.com/coreos/etcd/rafthttp/peer_status.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer_status.go similarity index 59% rename from vendor/github.com/coreos/etcd/rafthttp/peer_status.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer_status.go index 69cbd384c..66149ff67 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/peer_status.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer_status.go @@ -15,11 +15,14 @@ package rafthttp import ( + "errors" "fmt" "sync" "time" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/pkg/types" + + "go.uber.org/zap" ) type failureType struct { @@ -28,25 +31,31 @@ type failureType struct { } type peerStatus struct { + lg *zap.Logger + local types.ID id types.ID mu sync.Mutex // protect variables below active bool since time.Time } -func newPeerStatus(id types.ID) *peerStatus { - return &peerStatus{ - id: id, - } +func newPeerStatus(lg *zap.Logger, local, id types.ID) *peerStatus { + return &peerStatus{lg: lg, local: local, id: id} } func (s *peerStatus) activate() { s.mu.Lock() defer s.mu.Unlock() if !s.active { - plog.Infof("peer %s became active", s.id) + if s.lg != nil { + s.lg.Info("peer became active", zap.String("peer-id", s.id.String())) + } else { + plog.Infof("peer %s became active", s.id) + } s.active = true s.since = time.Now() + + activePeers.WithLabelValues(s.local.String(), s.id.String()).Inc() } } @@ -55,13 +64,23 @@ func (s *peerStatus) deactivate(failure failureType, reason string) { defer s.mu.Unlock() msg := fmt.Sprintf("failed to %s %s on %s (%s)", failure.action, s.id, failure.source, reason) if s.active { - plog.Errorf(msg) - plog.Infof("peer %s became inactive (message send to peer failed)", s.id) + if s.lg != nil { + s.lg.Warn("peer became inactive (message send to peer failed)", zap.String("peer-id", s.id.String()), zap.Error(errors.New(msg))) + } else { + plog.Errorf(msg) + plog.Infof("peer %s became inactive (message send to peer failed)", s.id) + } s.active = false s.since = time.Time{} + + activePeers.WithLabelValues(s.local.String(), s.id.String()).Dec() + disconnectedPeers.WithLabelValues(s.local.String(), s.id.String()).Inc() return } - plog.Debugf(msg) + + if s.lg != nil { + s.lg.Debug("peer deactivated again", zap.String("peer-id", s.id.String()), zap.Error(errors.New(msg))) + } } func (s *peerStatus) isActive() bool { diff --git a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/pipeline.go similarity index 82% rename from vendor/github.com/coreos/etcd/rafthttp/pipeline.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/pipeline.go index d9f07c347..70f92575d 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/pipeline.go @@ -22,11 +22,13 @@ import ( "sync" "time" - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" + stats "go.etcd.io/etcd/etcdserver/api/v2stats" + "go.etcd.io/etcd/pkg/pbutil" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft" + "go.etcd.io/etcd/raft/raftpb" + + "go.uber.org/zap" ) const ( @@ -64,13 +66,31 @@ func (p *pipeline) start() { for i := 0; i < connPerPipeline; i++ { go p.handle() } - plog.Infof("started HTTP pipelining with peer %s", p.peerID) + + if p.tr != nil && p.tr.Logger != nil { + p.tr.Logger.Info( + "started HTTP pipelining with remote peer", + zap.String("local-member-id", p.tr.ID.String()), + zap.String("remote-peer-id", p.peerID.String()), + ) + } else { + plog.Infof("started HTTP pipelining with peer %s", p.peerID) + } } func (p *pipeline) stop() { close(p.stopc) p.wg.Wait() - plog.Infof("stopped HTTP pipelining with peer %s", p.peerID) + + if p.tr != nil && p.tr.Logger != nil { + p.tr.Logger.Info( + "stopped HTTP pipelining with remote peer", + zap.String("local-member-id", p.tr.ID.String()), + zap.String("remote-peer-id", p.peerID.String()), + ) + } else { + plog.Infof("stopped HTTP pipelining with peer %s", p.peerID) + } } func (p *pipeline) handle() { @@ -135,12 +155,12 @@ func (p *pipeline) post(data []byte) (err error) { p.picker.unreachable(u) return err } + defer resp.Body.Close() b, err := ioutil.ReadAll(resp.Body) if err != nil { p.picker.unreachable(u) return err } - resp.Body.Close() err = checkPostResponse(resp, b, req, p.peerID) if err != nil { diff --git a/vendor/github.com/coreos/etcd/rafthttp/probing_status.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/probing_status.go similarity index 58% rename from vendor/github.com/coreos/etcd/rafthttp/probing_status.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/probing_status.go index 109a0aea0..474d9a0e4 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/probing_status.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/probing_status.go @@ -19,14 +19,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/xiang90/probing" -) - -var ( - // proberInterval must be shorter than read timeout. - // Or the connection will time-out. - proberInterval = ConnReadTimeout - time.Second - statusMonitoringInterval = 30 * time.Second - statusErrorInterval = 5 * time.Second + "go.uber.org/zap" ) const ( @@ -37,7 +30,15 @@ const ( RoundTripperNameSnapshot = "ROUND_TRIPPER_SNAPSHOT" ) -func addPeerToProber(p probing.Prober, id string, us []string, roundTripperName string, rttSecProm *prometheus.HistogramVec) { +var ( + // proberInterval must be shorter than read timeout. + // Or the connection will time-out. + proberInterval = ConnReadTimeout - time.Second + statusMonitoringInterval = 30 * time.Second + statusErrorInterval = 5 * time.Second +) + +func addPeerToProber(lg *zap.Logger, p probing.Prober, id string, us []string, roundTripperName string, rttSecProm *prometheus.HistogramVec) { hus := make([]string, len(us)) for i := range us { hus[i] = us[i] + ProbingPrefix @@ -47,28 +48,55 @@ func addPeerToProber(p probing.Prober, id string, us []string, roundTripperName s, err := p.Status(id) if err != nil { - plog.Errorf("failed to add peer %s into prober", id) - } else { - go monitorProbingStatus(s, id, roundTripperName, rttSecProm) + if lg != nil { + lg.Warn("failed to add peer into prober", zap.String("remote-peer-id", id)) + } else { + plog.Errorf("failed to add peer %s into prober", id) + } + return } + + go monitorProbingStatus(lg, s, id, roundTripperName, rttSecProm) } -func monitorProbingStatus(s probing.Status, id string, roundTripperName string, rttSecProm *prometheus.HistogramVec) { +func monitorProbingStatus(lg *zap.Logger, s probing.Status, id string, roundTripperName string, rttSecProm *prometheus.HistogramVec) { // set the first interval short to log error early. interval := statusErrorInterval for { select { case <-time.After(interval): if !s.Health() { - plog.Warningf("health check for peer %s could not connect: %v (prober %q)", id, s.Err(), roundTripperName) + if lg != nil { + lg.Warn( + "prober detected unhealthy status", + zap.String("round-tripper-name", roundTripperName), + zap.String("remote-peer-id", id), + zap.Duration("rtt", s.SRTT()), + zap.Error(s.Err()), + ) + } else { + plog.Warningf("health check for peer %s could not connect: %v", id, s.Err()) + } interval = statusErrorInterval } else { interval = statusMonitoringInterval } if s.ClockDiff() > time.Second { - plog.Warningf("the clock difference against peer %s is too high [%v > %v] (prober %q)", id, s.ClockDiff(), time.Second, roundTripperName) + if lg != nil { + lg.Warn( + "prober found high clock drift", + zap.String("round-tripper-name", roundTripperName), + zap.String("remote-peer-id", id), + zap.Duration("clock-drift", s.ClockDiff()), + zap.Duration("rtt", s.SRTT()), + zap.Error(s.Err()), + ) + } else { + plog.Warningf("the clock difference against peer %s is too high [%v > %v]", id, s.ClockDiff(), time.Second) + } } rttSecProm.WithLabelValues(id).Observe(s.SRTT().Seconds()) + case <-s.StopNotify(): return } diff --git a/vendor/github.com/coreos/etcd/rafthttp/remote.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/remote.go similarity index 52% rename from vendor/github.com/coreos/etcd/rafthttp/remote.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/remote.go index f7f9d2ceb..1ef2493ed 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/remote.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/remote.go @@ -15,11 +15,15 @@ package rafthttp import ( - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft/raftpb" + + "go.uber.org/zap" ) type remote struct { + lg *zap.Logger + localID types.ID id types.ID status *peerStatus pipeline *pipeline @@ -27,7 +31,7 @@ type remote struct { func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote { picker := newURLPicker(urls) - status := newPeerStatus(id) + status := newPeerStatus(tr.Logger, tr.ID, id) pipeline := &pipeline{ peerID: id, tr: tr, @@ -39,6 +43,8 @@ func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote { pipeline.start() return &remote{ + lg: tr.Logger, + localID: tr.ID, id: id, status: status, pipeline: pipeline, @@ -50,9 +56,32 @@ func (g *remote) send(m raftpb.Message) { case g.pipeline.msgc <- m: default: if g.status.isActive() { - plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id) + if g.lg != nil { + g.lg.Warn( + "dropped internal Raft message since sending buffer is full (overloaded network)", + zap.String("message-type", m.Type.String()), + zap.String("local-member-id", g.localID.String()), + zap.String("from", types.ID(m.From).String()), + zap.String("remote-peer-id", g.id.String()), + zap.Bool("remote-peer-active", g.status.isActive()), + ) + } else { + plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id) + } + } else { + if g.lg != nil { + g.lg.Warn( + "dropped Raft message since sending buffer is full (overloaded network)", + zap.String("message-type", m.Type.String()), + zap.String("local-member-id", g.localID.String()), + zap.String("from", types.ID(m.From).String()), + zap.String("remote-peer-id", g.id.String()), + zap.Bool("remote-peer-active", g.status.isActive()), + ) + } else { + plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id) + } } - plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id) sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() } } diff --git a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/snapshot_sender.go similarity index 70% rename from vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/snapshot_sender.go index 2d36e123a..62efb0cdc 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/snapshot_sender.go @@ -22,12 +22,14 @@ import ( "net/http" "time" - "github.com/coreos/etcd/pkg/httputil" - pioutil "github.com/coreos/etcd/pkg/ioutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/snap" + "go.etcd.io/etcd/etcdserver/api/snap" + "go.etcd.io/etcd/pkg/httputil" + pioutil "go.etcd.io/etcd/pkg/ioutil" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft" + "github.com/dustin/go-humanize" + "go.uber.org/zap" ) var ( @@ -70,15 +72,24 @@ func (s *snapshotSender) send(merged snap.Message) { m := merged.Message to := types.ID(m.To).String() - body := createSnapBody(merged) + body := createSnapBody(s.tr.Logger, merged) defer body.Close() u := s.picker.pick() req := createPostRequest(u, RaftSnapshotPrefix, body, "application/octet-stream", s.tr.URLs, s.from, s.cid) - snapshotTotalSizeVal := uint64(merged.TotalSize) - snapshotTotalSize := humanize.Bytes(snapshotTotalSizeVal) - plog.Infof("start to send database snapshot [index: %d, to %s, size %s]...", m.Snapshot.Metadata.Index, types.ID(m.To), snapshotTotalSize) + if s.tr.Logger != nil { + s.tr.Logger.Info( + "sending database snapshot", + zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index), + zap.String("remote-peer-id", to), + zap.Int64("bytes", merged.TotalSize), + zap.String("size", humanize.Bytes(uint64(merged.TotalSize))), + ) + } else { + plog.Infof("start to send database snapshot [index: %d, to %s]...", m.Snapshot.Metadata.Index, types.ID(m.To)) + } + snapshotSendInflights.WithLabelValues(to).Inc() defer func() { snapshotSendInflights.WithLabelValues(to).Dec() @@ -87,7 +98,18 @@ func (s *snapshotSender) send(merged snap.Message) { err := s.post(req) defer merged.CloseWithError(err) if err != nil { - plog.Warningf("database snapshot [index: %d, to: %s] failed to be sent out (%v)", m.Snapshot.Metadata.Index, types.ID(m.To), err) + if s.tr.Logger != nil { + s.tr.Logger.Warn( + "failed to send database snapshot", + zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index), + zap.String("remote-peer-id", to), + zap.Int64("bytes", merged.TotalSize), + zap.String("size", humanize.Bytes(uint64(merged.TotalSize))), + zap.Error(err), + ) + } else { + plog.Warningf("database snapshot [index: %d, to: %s] failed to be sent out (%v)", m.Snapshot.Metadata.Index, types.ID(m.To), err) + } // errMemberRemoved is a critical error since a removed member should // always be stopped. So we use reportCriticalError to report it to errorc. @@ -108,10 +130,20 @@ func (s *snapshotSender) send(merged snap.Message) { } s.status.activate() s.r.ReportSnapshot(m.To, raft.SnapshotFinish) - plog.Infof("database snapshot [index: %d, to: %s] sent out successfully", m.Snapshot.Metadata.Index, types.ID(m.To)) - sentBytes.WithLabelValues(to).Add(float64(merged.TotalSize)) + if s.tr.Logger != nil { + s.tr.Logger.Info( + "sent database snapshot", + zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index), + zap.String("remote-peer-id", to), + zap.Int64("bytes", merged.TotalSize), + zap.String("size", humanize.Bytes(uint64(merged.TotalSize))), + ) + } else { + plog.Infof("database snapshot [index: %d, to: %s] sent out successfully", m.Snapshot.Metadata.Index, types.ID(m.To)) + } + sentBytes.WithLabelValues(to).Add(float64(merged.TotalSize)) snapshotSend.WithLabelValues(to).Inc() snapshotSendSeconds.WithLabelValues(to).Observe(time.Since(start).Seconds()) } @@ -156,12 +188,16 @@ func (s *snapshotSender) post(req *http.Request) (err error) { } } -func createSnapBody(merged snap.Message) io.ReadCloser { +func createSnapBody(lg *zap.Logger, merged snap.Message) io.ReadCloser { buf := new(bytes.Buffer) enc := &messageEncoder{w: buf} // encode raft message if err := enc.encode(&merged.Message); err != nil { - plog.Panicf("encode message error (%v)", err) + if lg != nil { + lg.Panic("failed to encode message", zap.Error(err)) + } else { + plog.Panicf("encode message error (%v)", err) + } } return &pioutil.ReaderAndCloser{ diff --git a/vendor/github.com/coreos/etcd/rafthttp/stream.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/stream.go similarity index 58% rename from vendor/github.com/coreos/etcd/rafthttp/stream.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/stream.go index af49c18b1..cf7d8ccf6 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/stream.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/stream.go @@ -25,15 +25,16 @@ import ( "sync" "time" - "golang.org/x/time/rate" + stats "go.etcd.io/etcd/etcdserver/api/v2stats" + "go.etcd.io/etcd/pkg/httputil" + "go.etcd.io/etcd/pkg/transport" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/version" - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/httputil" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" + "go.uber.org/zap" + "golang.org/x/time/rate" ) const ( @@ -56,6 +57,7 @@ var ( "3.1.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.2.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.3.0": {streamTypeMsgAppV2, streamTypeMessage}, + "3.4.0": {streamTypeMsgAppV2, streamTypeMessage}, } ) @@ -100,11 +102,18 @@ type outgoingConn struct { io.Writer http.Flusher io.Closer + + localID types.ID + peerID types.ID } // streamWriter writes messages to the attached outgoingConn. type streamWriter struct { - peerID types.ID + lg *zap.Logger + + localID types.ID + peerID types.ID + status *peerStatus fs *stats.FollowerStats r Raft @@ -121,9 +130,13 @@ type streamWriter struct { // startStreamWriter creates a streamWrite and starts a long running go-routine that accepts // messages and writes to the attached outgoing connection. -func startStreamWriter(id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter { +func startStreamWriter(lg *zap.Logger, local, id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter { w := &streamWriter{ - peerID: id, + lg: lg, + + localID: local, + peerID: id, + status: status, fs: fs, r: r, @@ -149,7 +162,15 @@ func (cw *streamWriter) run() { defer tickc.Stop() unflushed := 0 - plog.Infof("started streaming with peer %s (writer)", cw.peerID) + if cw.lg != nil { + cw.lg.Info( + "started stream writer with remote peer", + zap.String("local-member-id", cw.localID.String()), + zap.String("remote-peer-id", cw.peerID.String()), + ) + } else { + plog.Infof("started streaming with peer %s (writer)", cw.peerID) + } for { select { @@ -168,7 +189,16 @@ func (cw *streamWriter) run() { sentFailures.WithLabelValues(cw.peerID.String()).Inc() cw.close() - plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + if cw.lg != nil { + cw.lg.Warn( + "lost TCP streaming connection with remote peer", + zap.String("stream-writer-type", t.String()), + zap.String("local-member-id", cw.localID.String()), + zap.String("remote-peer-id", cw.peerID.String()), + ) + } else { + plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + } heartbeatc, msgc = nil, nil case m := <-msgc: @@ -190,7 +220,16 @@ func (cw *streamWriter) run() { cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error()) cw.close() - plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + if cw.lg != nil { + cw.lg.Warn( + "lost TCP streaming connection with remote peer", + zap.String("stream-writer-type", t.String()), + zap.String("local-member-id", cw.localID.String()), + zap.String("remote-peer-id", cw.peerID.String()), + ) + } else { + plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + } heartbeatc, msgc = nil, nil cw.r.ReportUnreachable(m.To) sentFailures.WithLabelValues(cw.peerID.String()).Inc() @@ -207,6 +246,14 @@ func (cw *streamWriter) run() { default: plog.Panicf("unhandled stream type %s", conn.t) } + if cw.lg != nil { + cw.lg.Info( + "set message encoder", + zap.String("from", conn.localID.String()), + zap.String("to", conn.peerID.String()), + zap.String("stream-type", t.String()), + ) + } flusher = conn.Flusher unflushed = 0 cw.status.activate() @@ -215,15 +262,50 @@ func (cw *streamWriter) run() { cw.mu.Unlock() if closed { - plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + if cw.lg != nil { + cw.lg.Warn( + "closed TCP streaming connection with remote peer", + zap.String("stream-writer-type", t.String()), + zap.String("local-member-id", cw.localID.String()), + zap.String("remote-peer-id", cw.peerID.String()), + ) + } else { + plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + } + } + if cw.lg != nil { + cw.lg.Warn( + "established TCP streaming connection with remote peer", + zap.String("stream-writer-type", t.String()), + zap.String("local-member-id", cw.localID.String()), + zap.String("remote-peer-id", cw.peerID.String()), + ) + } else { + plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t) } - plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t) heartbeatc, msgc = tickc.C, cw.msgc + case <-cw.stopc: if cw.close() { - plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + if cw.lg != nil { + cw.lg.Warn( + "closed TCP streaming connection with remote peer", + zap.String("stream-writer-type", t.String()), + zap.String("remote-peer-id", cw.peerID.String()), + ) + } else { + plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) + } + } + if cw.lg != nil { + cw.lg.Warn( + "stopped TCP streaming connection with remote peer", + zap.String("stream-writer-type", t.String()), + zap.String("remote-peer-id", cw.peerID.String()), + ) + } else { + plog.Infof("stopped streaming with peer %s (writer)", cw.peerID) } - plog.Infof("stopped streaming with peer %s (writer)", cw.peerID) close(cw.done) return } @@ -247,7 +329,15 @@ func (cw *streamWriter) closeUnlocked() bool { return false } if err := cw.closer.Close(); err != nil { - plog.Errorf("peer %s (writer) connection close error: %v", cw.peerID, err) + if cw.lg != nil { + cw.lg.Warn( + "failed to close connection with remote peer", + zap.String("remote-peer-id", cw.peerID.String()), + zap.Error(err), + ) + } else { + plog.Errorf("peer %s (writer) connection close error: %v", cw.peerID, err) + } } if len(cw.msgc) > 0 { cw.r.ReportUnreachable(uint64(cw.peerID)) @@ -274,6 +364,8 @@ func (cw *streamWriter) stop() { // streamReader is a long-running go-routine that dials to the remote stream // endpoint and reads messages from the response body returned. type streamReader struct { + lg *zap.Logger + peerID types.ID typ streamType @@ -309,7 +401,18 @@ func (cr *streamReader) start() { func (cr *streamReader) run() { t := cr.typ - plog.Infof("started streaming with peer %s (%s reader)", cr.peerID, t) + + if cr.lg != nil { + cr.lg.Info( + "started stream reader with remote peer", + zap.String("stream-reader-type", t.String()), + zap.String("local-member-id", cr.tr.ID.String()), + zap.String("remote-peer-id", cr.peerID.String()), + ) + } else { + plog.Infof("started streaming with peer %s (%s reader)", cr.peerID, t) + } + for { rc, err := cr.dial(t) if err != nil { @@ -318,9 +421,28 @@ func (cr *streamReader) run() { } } else { cr.status.activate() - plog.Infof("established a TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ) + if cr.lg != nil { + cr.lg.Info( + "established TCP streaming connection with remote peer", + zap.String("stream-reader-type", cr.typ.String()), + zap.String("local-member-id", cr.tr.ID.String()), + zap.String("remote-peer-id", cr.peerID.String()), + ) + } else { + plog.Infof("established a TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ) + } err = cr.decodeLoop(rc, t) - plog.Warningf("lost the TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ) + if cr.lg != nil { + cr.lg.Warn( + "lost TCP streaming connection with remote peer", + zap.String("stream-reader-type", cr.typ.String()), + zap.String("local-member-id", cr.tr.ID.String()), + zap.String("remote-peer-id", cr.peerID.String()), + zap.Error(err), + ) + } else { + plog.Warningf("lost the TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ) + } switch { // all data is read out case err == io.EOF: @@ -333,12 +455,31 @@ func (cr *streamReader) run() { // Wait for a while before new dial attempt err = cr.rl.Wait(cr.ctx) if cr.ctx.Err() != nil { - plog.Infof("stopped streaming with peer %s (%s reader)", cr.peerID, t) + if cr.lg != nil { + cr.lg.Info( + "stopped stream reader with remote peer", + zap.String("stream-reader-type", t.String()), + zap.String("local-member-id", cr.tr.ID.String()), + zap.String("remote-peer-id", cr.peerID.String()), + ) + } else { + plog.Infof("stopped streaming with peer %s (%s reader)", cr.peerID, t) + } close(cr.done) return } if err != nil { - plog.Errorf("streaming with peer %s (%s reader) rate limiter error: %v", cr.peerID, t, err) + if cr.lg != nil { + cr.lg.Warn( + "rate limit on stream reader with remote peer", + zap.String("stream-reader-type", t.String()), + zap.String("local-member-id", cr.tr.ID.String()), + zap.String("remote-peer-id", cr.peerID.String()), + zap.Error(err), + ) + } else { + plog.Errorf("streaming with peer %s (%s reader) rate limiter error: %v", cr.peerID, t, err) + } } } } @@ -352,7 +493,11 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { case streamTypeMessage: dec = &messageDecoder{r: rc} default: - plog.Panicf("unhandled stream type %s", t) + if cr.lg != nil { + cr.lg.Panic("unknown stream type", zap.String("type", t.String())) + } else { + plog.Panicf("unhandled stream type %s", t) + } } select { case <-cr.ctx.Done(): @@ -366,6 +511,7 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { } cr.mu.Unlock() + // gofail: labelRaftDropHeartbeat: for { m, err := dec.decode() if err != nil { @@ -375,6 +521,8 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { return err } + // gofail-go: var raftDropHeartbeat struct{} + // continue labelRaftDropHeartbeat receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size())) cr.mu.Lock() @@ -401,9 +549,32 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { case recvc <- m: default: if cr.status.isActive() { - plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From)) + if cr.lg != nil { + cr.lg.Warn( + "dropped internal Raft message since receiving buffer is full (overloaded network)", + zap.String("message-type", m.Type.String()), + zap.String("local-member-id", cr.tr.ID.String()), + zap.String("from", types.ID(m.From).String()), + zap.String("remote-peer-id", types.ID(m.To).String()), + zap.Bool("remote-peer-active", cr.status.isActive()), + ) + } else { + plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From)) + } + } else { + if cr.lg != nil { + cr.lg.Warn( + "dropped Raft message since receiving buffer is full (overloaded network)", + zap.String("message-type", m.Type.String()), + zap.String("local-member-id", cr.tr.ID.String()), + zap.String("from", types.ID(m.From).String()), + zap.String("remote-peer-id", types.ID(m.To).String()), + zap.Bool("remote-peer-active", cr.status.isActive()), + ) + } else { + plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From)) + } } - plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From)) recvFailures.WithLabelValues(types.ID(m.From).String()).Inc() } } @@ -422,6 +593,14 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { uu := u uu.Path = path.Join(t.endpoint(), cr.tr.ID.String()) + if cr.lg != nil { + cr.lg.Debug( + "dial stream reader", + zap.String("from", cr.tr.ID.String()), + zap.String("to", cr.peerID.String()), + zap.String("address", uu.String()), + ) + } req, err := http.NewRequest("GET", uu.String(), nil) if err != nil { cr.picker.unreachable(u) @@ -466,12 +645,15 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { cr.picker.unreachable(u) reportCriticalError(errMemberRemoved, cr.errorc) return nil, errMemberRemoved + case http.StatusOK: return resp.Body, nil + case http.StatusNotFound: httputil.GracefulClose(resp) cr.picker.unreachable(u) return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID) + case http.StatusPreconditionFailed: b, err := ioutil.ReadAll(resp.Body) if err != nil { @@ -483,15 +665,38 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { switch strings.TrimSuffix(string(b), "\n") { case errIncompatibleVersion.Error(): - plog.Errorf("request sent was ignored by peer %s (server version incompatible)", cr.peerID) + if cr.lg != nil { + cr.lg.Warn( + "request sent was ignored by remote peer due to server version incompatibility", + zap.String("local-member-id", cr.tr.ID.String()), + zap.String("remote-peer-id", cr.peerID.String()), + zap.Error(errIncompatibleVersion), + ) + } else { + plog.Errorf("request sent was ignored by peer %s (server version incompatible)", cr.peerID) + } return nil, errIncompatibleVersion + case errClusterIDMismatch.Error(): - plog.Errorf("request sent was ignored (cluster ID mismatch: peer[%s]=%s, local=%s)", - cr.peerID, resp.Header.Get("X-Etcd-Cluster-ID"), cr.tr.ClusterID) + if cr.lg != nil { + cr.lg.Warn( + "request sent was ignored by remote peer due to cluster ID mismatch", + zap.String("remote-peer-id", cr.peerID.String()), + zap.String("remote-peer-cluster-id", resp.Header.Get("X-Etcd-Cluster-ID")), + zap.String("local-member-id", cr.tr.ID.String()), + zap.String("local-member-cluster-id", cr.tr.ClusterID.String()), + zap.Error(errClusterIDMismatch), + ) + } else { + plog.Errorf("request sent was ignored (cluster ID mismatch: peer[%s]=%s, local=%s)", + cr.peerID, resp.Header.Get("X-Etcd-Cluster-ID"), cr.tr.ClusterID) + } return nil, errClusterIDMismatch + default: return nil, fmt.Errorf("unhandled error %q when precondition failed", string(b)) } + default: httputil.GracefulClose(resp) cr.picker.unreachable(u) @@ -502,7 +707,16 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { func (cr *streamReader) close() { if cr.closer != nil { if err := cr.closer.Close(); err != nil { - plog.Errorf("peer %s (reader) connection close error: %v", cr.peerID, err) + if cr.lg != nil { + cr.lg.Warn( + "failed to close remote peer connection", + zap.String("local-member-id", cr.tr.ID.String()), + zap.String("remote-peer-id", cr.peerID.String()), + zap.Error(err), + ) + } else { + plog.Errorf("peer %s (reader) connection close error: %v", cr.peerID, err) + } } } cr.closer = nil diff --git a/vendor/github.com/coreos/etcd/rafthttp/transport.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/transport.go similarity index 78% rename from vendor/github.com/coreos/etcd/rafthttp/transport.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/transport.go index 9ec765086..7191c3d60 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/transport.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/transport.go @@ -20,20 +20,21 @@ import ( "sync" "time" - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/logutil" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" + "go.etcd.io/etcd/etcdserver/api/snap" + stats "go.etcd.io/etcd/etcdserver/api/v2stats" + "go.etcd.io/etcd/pkg/logutil" + "go.etcd.io/etcd/pkg/transport" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft" + "go.etcd.io/etcd/raft/raftpb" "github.com/coreos/pkg/capnslog" "github.com/xiang90/probing" + "go.uber.org/zap" "golang.org/x/time/rate" ) -var plog = logutil.NewMergeLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "rafthttp")) +var plog = logutil.NewMergeLogger(capnslog.NewPackageLogger("go.etcd.io/etcd", "rafthttp")) type Raft interface { Process(ctx context.Context, m raftpb.Message) error @@ -98,6 +99,8 @@ type Transporter interface { // User needs to call Start before calling other functions, and call // Stop when the Transport is no longer used. type Transport struct { + Logger *zap.Logger + DialTimeout time.Duration // maximum duration before timing out dial of the request // DialRetryFrequency defines the frequency of streamReader dial retrial attempts; // a distinct rate limiter is created per every peer (default value: 10 events/sec) @@ -199,7 +202,15 @@ func (t *Transport) Send(msgs []raftpb.Message) { continue } - plog.Debugf("ignored message %s (sent to unknown peer %s)", m.Type, to) + if t.Logger != nil { + t.Logger.Debug( + "ignored message send request; unknown remote peer target", + zap.String("type", m.Type.String()), + zap.String("unknown-target-peer-id", to.String()), + ) + } else { + plog.Debugf("ignored message %s (sent to unknown peer %s)", m.Type, to) + } } } @@ -271,9 +282,22 @@ func (t *Transport) AddRemote(id types.ID, us []string) { } urls, err := types.NewURLs(us) if err != nil { - plog.Panicf("newURLs %+v should never fail: %+v", us, err) + if t.Logger != nil { + t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err)) + } else { + plog.Panicf("newURLs %+v should never fail: %+v", us, err) + } } t.remotes[id] = startRemote(t, urls, id) + + if t.Logger != nil { + t.Logger.Info( + "added new remote peer", + zap.String("local-member-id", t.ID.String()), + zap.String("remote-peer-id", id.String()), + zap.Strings("remote-peer-urls", us), + ) + } } func (t *Transport) AddPeer(id types.ID, us []string) { @@ -288,13 +312,27 @@ func (t *Transport) AddPeer(id types.ID, us []string) { } urls, err := types.NewURLs(us) if err != nil { - plog.Panicf("newURLs %+v should never fail: %+v", us, err) + if t.Logger != nil { + t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err)) + } else { + plog.Panicf("newURLs %+v should never fail: %+v", us, err) + } } fs := t.LeaderStats.Follower(id.String()) t.peers[id] = startPeer(t, urls, id, fs) - addPeerToProber(t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rtts) - addPeerToProber(t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rtts) - plog.Infof("added peer %s", id) + addPeerToProber(t.Logger, t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rttSec) + addPeerToProber(t.Logger, t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rttSec) + + if t.Logger != nil { + t.Logger.Info( + "added remote peer", + zap.String("local-member-id", t.ID.String()), + zap.String("remote-peer-id", id.String()), + zap.Strings("remote-peer-urls", us), + ) + } else { + plog.Infof("added peer %s", id) + } } func (t *Transport) RemovePeer(id types.ID) { @@ -316,13 +354,26 @@ func (t *Transport) removePeer(id types.ID) { if peer, ok := t.peers[id]; ok { peer.stop() } else { - plog.Panicf("unexpected removal of unknown peer '%d'", id) + if t.Logger != nil { + t.Logger.Panic("unexpected removal of unknown remote peer", zap.String("remote-peer-id", id.String())) + } else { + plog.Panicf("unexpected removal of unknown peer '%d'", id) + } } delete(t.peers, id) delete(t.LeaderStats.Followers, id.String()) t.pipelineProber.Remove(id.String()) t.streamProber.Remove(id.String()) - plog.Infof("removed peer %s", id) + + if t.Logger != nil { + t.Logger.Info( + "removed remote peer", + zap.String("local-member-id", t.ID.String()), + zap.String("removed-remote-peer-id", id.String()), + ) + } else { + plog.Infof("removed peer %s", id) + } } func (t *Transport) UpdatePeer(id types.ID, us []string) { @@ -334,20 +385,34 @@ func (t *Transport) UpdatePeer(id types.ID, us []string) { } urls, err := types.NewURLs(us) if err != nil { - plog.Panicf("newURLs %+v should never fail: %+v", us, err) + if t.Logger != nil { + t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err)) + } else { + plog.Panicf("newURLs %+v should never fail: %+v", us, err) + } } t.peers[id].update(urls) t.pipelineProber.Remove(id.String()) - addPeerToProber(t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rtts) + addPeerToProber(t.Logger, t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rttSec) t.streamProber.Remove(id.String()) - addPeerToProber(t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rtts) - plog.Infof("updated peer %s", id) + addPeerToProber(t.Logger, t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rttSec) + + if t.Logger != nil { + t.Logger.Info( + "updated remote peer", + zap.String("local-member-id", t.ID.String()), + zap.String("updated-remote-peer-id", id.String()), + zap.Strings("updated-remote-peer-urls", us), + ) + } else { + plog.Infof("updated peer %s", id) + } } func (t *Transport) ActiveSince(id types.ID) time.Time { - t.mu.Lock() - defer t.mu.Unlock() + t.mu.RLock() + defer t.mu.RUnlock() if p, ok := t.peers[id]; ok { return p.activeSince() } @@ -400,43 +465,3 @@ func (t *Transport) ActivePeers() (cnt int) { } return cnt } - -type nopTransporter struct{} - -func NewNopTransporter() Transporter { - return &nopTransporter{} -} - -func (s *nopTransporter) Start() error { return nil } -func (s *nopTransporter) Handler() http.Handler { return nil } -func (s *nopTransporter) Send(m []raftpb.Message) {} -func (s *nopTransporter) SendSnapshot(m snap.Message) {} -func (s *nopTransporter) AddRemote(id types.ID, us []string) {} -func (s *nopTransporter) AddPeer(id types.ID, us []string) {} -func (s *nopTransporter) RemovePeer(id types.ID) {} -func (s *nopTransporter) RemoveAllPeers() {} -func (s *nopTransporter) UpdatePeer(id types.ID, us []string) {} -func (s *nopTransporter) ActiveSince(id types.ID) time.Time { return time.Time{} } -func (s *nopTransporter) ActivePeers() int { return 0 } -func (s *nopTransporter) Stop() {} -func (s *nopTransporter) Pause() {} -func (s *nopTransporter) Resume() {} - -type snapTransporter struct { - nopTransporter - snapDoneC chan snap.Message - snapDir string -} - -func NewSnapTransporter(snapDir string) (Transporter, <-chan snap.Message) { - ch := make(chan snap.Message, 1) - tr := &snapTransporter{snapDoneC: ch, snapDir: snapDir} - return tr, ch -} - -func (s *snapTransporter) SendSnapshot(m snap.Message) { - ss := snap.New(s.snapDir) - ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1) - m.CloseWithError(nil) - s.snapDoneC <- m -} diff --git a/vendor/github.com/coreos/etcd/rafthttp/urlpick.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/urlpick.go similarity index 97% rename from vendor/github.com/coreos/etcd/rafthttp/urlpick.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/urlpick.go index 61839deeb..61ef46864 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/urlpick.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/urlpick.go @@ -18,7 +18,7 @@ import ( "net/url" "sync" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/pkg/types" ) type urlPicker struct { diff --git a/vendor/github.com/coreos/etcd/rafthttp/util.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/util.go similarity index 88% rename from vendor/github.com/coreos/etcd/rafthttp/util.go rename to vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/util.go index 6ec3641aa..20938647c 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/util.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/util.go @@ -23,9 +23,10 @@ import ( "strings" "time" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/version" + "go.etcd.io/etcd/pkg/transport" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/version" + "github.com/coreos/go-semver/semver" ) @@ -149,18 +150,21 @@ func minClusterVersion(h http.Header) *semver.Version { return semver.Must(semver.NewVersion(verStr)) } -// checkVersionCompability checks whether the given version is compatible +// checkVersionCompatibility checks whether the given version is compatible // with the local version. -func checkVersionCompability(name string, server, minCluster *semver.Version) error { - localServer := semver.Must(semver.NewVersion(version.Version)) - localMinCluster := semver.Must(semver.NewVersion(version.MinClusterVersion)) +func checkVersionCompatibility(name string, server, minCluster *semver.Version) ( + localServer *semver.Version, + localMinCluster *semver.Version, + err error) { + localServer = semver.Must(semver.NewVersion(version.Version)) + localMinCluster = semver.Must(semver.NewVersion(version.MinClusterVersion)) if compareMajorMinorVersion(server, localMinCluster) == -1 { - return fmt.Errorf("remote version is too low: remote[%s]=%s, local=%s", name, server, localServer) + return localServer, localMinCluster, fmt.Errorf("remote version is too low: remote[%s]=%s, local=%s", name, server, localServer) } if compareMajorMinorVersion(minCluster, localServer) == 1 { - return fmt.Errorf("local version is too low: remote[%s]=%s, local=%s", name, server, localServer) + return localServer, localMinCluster, fmt.Errorf("local version is too low: remote[%s]=%s, local=%s", name, server, localServer) } - return nil + return localServer, localMinCluster, nil } // setPeerURLsHeader reports local urls for peer discovery diff --git a/vendor/github.com/coreos/etcd/snap/db.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/db.go similarity index 76% rename from vendor/github.com/coreos/etcd/snap/db.go rename to vendor/go.etcd.io/etcd/etcdserver/api/snap/db.go index dcbd3bd67..3002ccdcc 100644 --- a/vendor/github.com/coreos/etcd/snap/db.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/db.go @@ -23,7 +23,10 @@ import ( "path/filepath" "time" - "github.com/coreos/etcd/pkg/fileutil" + "go.etcd.io/etcd/pkg/fileutil" + + humanize "github.com/dustin/go-humanize" + "go.uber.org/zap" ) var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") @@ -60,7 +63,16 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { return n, err } - plog.Infof("saved database snapshot to disk [total bytes: %d]", n) + if s.lg != nil { + s.lg.Info( + "saved database snapshot to disk", + zap.String("path", fn), + zap.Int64("bytes", n), + zap.String("size", humanize.Bytes(uint64(n))), + ) + } else { + plog.Infof("saved database snapshot to disk [total bytes: %d]", n) + } snapDBSaveSec.Observe(time.Since(start).Seconds()) return n, nil @@ -72,9 +84,18 @@ func (s *Snapshotter) DBFilePath(id uint64) (string, error) { if _, err := fileutil.ReadDir(s.dir); err != nil { return "", err } - if fn := s.dbFilePath(id); fileutil.Exist(fn) { + fn := s.dbFilePath(id) + if fileutil.Exist(fn) { return fn, nil } + if s.lg != nil { + s.lg.Warn( + "failed to find [SNAPSHOT-INDEX].snap.db", + zap.Uint64("snapshot-index", id), + zap.String("snapshot-file-path", fn), + zap.Error(ErrNoDBSnapshot), + ) + } return "", ErrNoDBSnapshot } diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/snap/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/doc.go new file mode 100644 index 000000000..dcc5db579 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package snap handles Raft nodes' states with snapshots. +// The snapshot logic is internal to etcd server and raft package. +package snap diff --git a/vendor/github.com/coreos/etcd/snap/message.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/message.go similarity index 96% rename from vendor/github.com/coreos/etcd/snap/message.go rename to vendor/go.etcd.io/etcd/etcdserver/api/snap/message.go index d73713ff1..c1151e27e 100644 --- a/vendor/github.com/coreos/etcd/snap/message.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/message.go @@ -17,8 +17,8 @@ package snap import ( "io" - "github.com/coreos/etcd/pkg/ioutil" - "github.com/coreos/etcd/raft/raftpb" + "go.etcd.io/etcd/pkg/ioutil" + "go.etcd.io/etcd/raft/raftpb" ) // Message is a struct that contains a raft Message and a ReadCloser. The type diff --git a/vendor/github.com/coreos/etcd/snap/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/metrics.go similarity index 65% rename from vendor/github.com/coreos/etcd/snap/metrics.go rename to vendor/go.etcd.io/etcd/etcdserver/api/snap/metrics.go index 0d3b7e63e..2affecf47 100644 --- a/vendor/github.com/coreos/etcd/snap/metrics.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/metrics.go @@ -17,21 +17,37 @@ package snap import "github.com/prometheus/client_golang/prometheus" var ( - // TODO: save_fsync latency? - saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + snapMarshallingSec = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "snap", + Name: "save_marshalling_duration_seconds", + Help: "The marshalling cost distributions of save called by snapshot.", + + // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 + // highest bucket start of 0.001 sec * 2^13 == 8.192 sec + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) + + snapSaveSec = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "etcd_debugging", Subsystem: "snap", Name: "save_total_duration_seconds", Help: "The total latency distributions of save called by snapshot.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + + // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 + // highest bucket start of 0.001 sec * 2^13 == 8.192 sec + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), }) - marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd_debugging", + snapFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd", Subsystem: "snap", - Name: "save_marshalling_duration_seconds", - Help: "The marshalling cost distributions of save called by snapshot.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + Name: "fsync_duration_seconds", + Help: "The latency distributions of fsync called by snap.", + + // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 + // highest bucket start of 0.001 sec * 2^13 == 8.192 sec + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), }) snapDBSaveSec = prometheus.NewHistogram(prometheus.HistogramOpts{ @@ -58,8 +74,9 @@ var ( ) func init() { - prometheus.MustRegister(saveDurations) - prometheus.MustRegister(marshallingDurations) + prometheus.MustRegister(snapMarshallingSec) + prometheus.MustRegister(snapSaveSec) + prometheus.MustRegister(snapFsyncSec) prometheus.MustRegister(snapDBSaveSec) prometheus.MustRegister(snapDBFsyncSec) } diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.pb.go similarity index 75% rename from vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go rename to vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.pb.go index 46897b45e..e72b577f5 100644 --- a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.pb.go @@ -1,16 +1,27 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: snap.proto +/* + Package snappb is a generated protocol buffer package. + + It is generated from these files: + snap.proto + + It has these top-level messages: + Snapshot +*/ package snappb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -25,68 +36,23 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Snapshot struct { - Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"` - Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_f2e3c045ebf84d00, []int{0} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(m, src) -} -func (m *Snapshot) XXX_Size() int { - return m.Size() -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) + Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"` + Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_Snapshot proto.InternalMessageInfo +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnap, []int{0} } func init() { proto.RegisterType((*Snapshot)(nil), "snappb.snapshot") } - -func init() { proto.RegisterFile("snap.proto", fileDescriptor_f2e3c045ebf84d00) } - -var fileDescriptor_f2e3c045ebf84d00 = []byte{ - // 126 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, - 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, - 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, - 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, - 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24, - 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1, - 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e, - 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00, -} - func (m *Snapshot) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -94,47 +60,35 @@ func (m *Snapshot) Marshal() (dAtA []byte, err error) { } func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } + dAtA[i] = 0x8 + i++ + i = encodeVarintSnap(dAtA, i, uint64(m.Crc)) if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintSnap(dAtA, i, uint64(len(m.Data))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintSnap(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - i = encodeVarintSnap(dAtA, i, uint64(m.Crc)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func encodeVarintSnap(dAtA []byte, offset int, v uint64) int { - offset -= sovSnap(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Snapshot) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovSnap(uint64(m.Crc)) @@ -149,7 +103,14 @@ func (m *Snapshot) Size() (n int) { } func sovSnap(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozSnap(x uint64) (n int) { return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -169,7 +130,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -197,7 +158,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Crc |= uint32(b&0x7F) << shift + m.Crc |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -216,7 +177,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -225,9 +186,6 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { return ErrInvalidLengthSnap } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSnap - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -245,9 +203,6 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthSnap } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthSnap - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -315,11 +270,8 @@ func skipSnap(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthSnap - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthSnap } return iNdEx, nil @@ -350,9 +302,6 @@ func skipSnap(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthSnap - } } return iNdEx, nil case 4: @@ -371,3 +320,17 @@ var ( ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) } + +var fileDescriptorSnap = []byte{ + // 126 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, + 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, + 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, + 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, + 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24, + 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1, + 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e, + 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.proto b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.proto similarity index 100% rename from vendor/github.com/coreos/etcd/snap/snappb/snap.proto rename to vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.proto diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/snap/snapshotter.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snapshotter.go new file mode 100644 index 000000000..7e7933374 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snapshotter.go @@ -0,0 +1,255 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package snap + +import ( + "errors" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "go.etcd.io/etcd/etcdserver/api/snap/snappb" + pioutil "go.etcd.io/etcd/pkg/ioutil" + "go.etcd.io/etcd/pkg/pbutil" + "go.etcd.io/etcd/raft" + "go.etcd.io/etcd/raft/raftpb" + + "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" +) + +const snapSuffix = ".snap" + +var ( + plog = capnslog.NewPackageLogger("go.etcd.io/etcd/v3", "snap") + + ErrNoSnapshot = errors.New("snap: no available snapshot") + ErrEmptySnapshot = errors.New("snap: empty snapshot") + ErrCRCMismatch = errors.New("snap: crc mismatch") + crcTable = crc32.MakeTable(crc32.Castagnoli) + + // A map of valid files that can be present in the snap folder. + validFiles = map[string]bool{ + "db": true, + } +) + +type Snapshotter struct { + lg *zap.Logger + dir string +} + +func New(lg *zap.Logger, dir string) *Snapshotter { + return &Snapshotter{ + lg: lg, + dir: dir, + } +} + +func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error { + if raft.IsEmptySnap(snapshot) { + return nil + } + return s.save(&snapshot) +} + +func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { + start := time.Now() + + fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix) + b := pbutil.MustMarshal(snapshot) + crc := crc32.Update(0, crcTable, b) + snap := snappb.Snapshot{Crc: crc, Data: b} + d, err := snap.Marshal() + if err != nil { + return err + } + snapMarshallingSec.Observe(time.Since(start).Seconds()) + + spath := filepath.Join(s.dir, fname) + + fsyncStart := time.Now() + err = pioutil.WriteAndSyncFile(spath, d, 0666) + snapFsyncSec.Observe(time.Since(fsyncStart).Seconds()) + + if err != nil { + if s.lg != nil { + s.lg.Warn("failed to write a snap file", zap.String("path", spath), zap.Error(err)) + } + rerr := os.Remove(spath) + if rerr != nil { + if s.lg != nil { + s.lg.Warn("failed to remove a broken snap file", zap.String("path", spath), zap.Error(err)) + } else { + plog.Errorf("failed to remove broken snapshot file %s", spath) + } + } + return err + } + + snapSaveSec.Observe(time.Since(start).Seconds()) + return nil +} + +func (s *Snapshotter) Load() (*raftpb.Snapshot, error) { + names, err := s.snapNames() + if err != nil { + return nil, err + } + var snap *raftpb.Snapshot + for _, name := range names { + if snap, err = loadSnap(s.lg, s.dir, name); err == nil { + break + } + } + if err != nil { + return nil, ErrNoSnapshot + } + return snap, nil +} + +func loadSnap(lg *zap.Logger, dir, name string) (*raftpb.Snapshot, error) { + fpath := filepath.Join(dir, name) + snap, err := Read(lg, fpath) + if err != nil { + brokenPath := fpath + ".broken" + if lg != nil { + lg.Warn("failed to read a snap file", zap.String("path", fpath), zap.Error(err)) + } + if rerr := os.Rename(fpath, brokenPath); rerr != nil { + if lg != nil { + lg.Warn("failed to rename a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath), zap.Error(rerr)) + } else { + plog.Warningf("cannot rename broken snapshot file %v to %v: %v", fpath, brokenPath, rerr) + } + } else { + if lg != nil { + lg.Warn("renamed to a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath)) + } + } + } + return snap, err +} + +// Read reads the snapshot named by snapname and returns the snapshot. +func Read(lg *zap.Logger, snapname string) (*raftpb.Snapshot, error) { + b, err := ioutil.ReadFile(snapname) + if err != nil { + if lg != nil { + lg.Warn("failed to read a snap file", zap.String("path", snapname), zap.Error(err)) + } else { + plog.Errorf("cannot read file %v: %v", snapname, err) + } + return nil, err + } + + if len(b) == 0 { + if lg != nil { + lg.Warn("failed to read empty snapshot file", zap.String("path", snapname)) + } else { + plog.Errorf("unexpected empty snapshot") + } + return nil, ErrEmptySnapshot + } + + var serializedSnap snappb.Snapshot + if err = serializedSnap.Unmarshal(b); err != nil { + if lg != nil { + lg.Warn("failed to unmarshal snappb.Snapshot", zap.String("path", snapname), zap.Error(err)) + } else { + plog.Errorf("corrupted snapshot file %v: %v", snapname, err) + } + return nil, err + } + + if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 { + if lg != nil { + lg.Warn("failed to read empty snapshot data", zap.String("path", snapname)) + } else { + plog.Errorf("unexpected empty snapshot") + } + return nil, ErrEmptySnapshot + } + + crc := crc32.Update(0, crcTable, serializedSnap.Data) + if crc != serializedSnap.Crc { + if lg != nil { + lg.Warn("snap file is corrupt", + zap.String("path", snapname), + zap.Uint32("prev-crc", serializedSnap.Crc), + zap.Uint32("new-crc", crc), + ) + } else { + plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname) + } + return nil, ErrCRCMismatch + } + + var snap raftpb.Snapshot + if err = snap.Unmarshal(serializedSnap.Data); err != nil { + if lg != nil { + lg.Warn("failed to unmarshal raftpb.Snapshot", zap.String("path", snapname), zap.Error(err)) + } else { + plog.Errorf("corrupted snapshot file %v: %v", snapname, err) + } + return nil, err + } + return &snap, nil +} + +// snapNames returns the filename of the snapshots in logical time order (from newest to oldest). +// If there is no available snapshots, an ErrNoSnapshot will be returned. +func (s *Snapshotter) snapNames() ([]string, error) { + dir, err := os.Open(s.dir) + if err != nil { + return nil, err + } + defer dir.Close() + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + snaps := checkSuffix(s.lg, names) + if len(snaps) == 0 { + return nil, ErrNoSnapshot + } + sort.Sort(sort.Reverse(sort.StringSlice(snaps))) + return snaps, nil +} + +func checkSuffix(lg *zap.Logger, names []string) []string { + snaps := []string{} + for i := range names { + if strings.HasSuffix(names[i], snapSuffix) { + snaps = append(snaps, names[i]) + } else { + // If we find a file which is not a snapshot then check if it's + // a vaild file. If not throw out a warning. + if _, ok := validFiles[names[i]]; !ok { + if lg != nil { + lg.Warn("found unexpected non-snap file; skipping", zap.String("path", names[i])) + } else { + plog.Warningf("skipped unexpected non snapshot file %v", names[i]) + } + } + } + } + return snaps +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth.go similarity index 74% rename from vendor/github.com/coreos/etcd/etcdserver/auth/auth.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth.go index 8991675cc..b438074a4 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth.go @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package auth implements etcd authentication. -package auth +// Package v2auth implements etcd authentication. +package v2auth import ( "context" @@ -26,12 +26,13 @@ import ( "strings" "time" - etcderr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/pkg/capnslog" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/v2error" + "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/types" + "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" "golang.org/x/crypto/bcrypt" ) @@ -47,7 +48,7 @@ const ( ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/auth") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd/v3", "etcdserver/auth") ) var rootRole = Role{ @@ -98,6 +99,7 @@ type PasswordStore interface { } type store struct { + lg *zap.Logger server doer timeout time.Duration ensuredOnce bool @@ -145,8 +147,9 @@ func authErr(hs int, s string, v ...interface{}) Error { return Error{Status: hs, Errmsg: fmt.Sprintf("auth: "+s, v...)} } -func NewStore(server doer, timeout time.Duration) Store { +func NewStore(lg *zap.Logger, server doer, timeout time.Duration) Store { s := &store{ + lg: lg, server: server, timeout: timeout, PasswordStore: passwordStore{}, @@ -157,21 +160,21 @@ func NewStore(server doer, timeout time.Duration) Store { // passwordStore implements PasswordStore using bcrypt to hash user passwords type passwordStore struct{} -func (_ passwordStore) CheckPassword(user User, password string) bool { +func (passwordStore) CheckPassword(user User, password string) bool { err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)) return err == nil } -func (_ passwordStore) HashPassword(password string) (string, error) { +func (passwordStore) HashPassword(password string) (string, error) { hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) return string(hash), err } func (s *store) AllUsers() ([]string, error) { - resp, err := s.requestResource("/users/", false, false) + resp, err := s.requestResource("/users/", false) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { return []string{}, nil } } @@ -208,7 +211,11 @@ func (s *store) CreateUser(user User) (User, error) { } u, err := s.createUserInternal(user) if err == nil { - plog.Noticef("created user %s", user.User) + if s.lg != nil { + s.lg.Info("created a user", zap.String("user-name", user.User)) + } else { + plog.Noticef("created user %s", user.User) + } } return u, err } @@ -225,8 +232,8 @@ func (s *store) createUserInternal(user User) (User, error) { _, err = s.createResource("/users/"+user.User, user) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeNodeExist { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeNodeExist { return user, authErr(http.StatusConflict, "User %s already exists.", user.User) } } @@ -238,31 +245,35 @@ func (s *store) DeleteUser(name string) error { if s.AuthEnabled() && name == "root" { return authErr(http.StatusForbidden, "Cannot delete root user while auth is enabled.") } - _, err := s.deleteResource("/users/" + name) + err := s.deleteResource("/users/" + name) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { return authErr(http.StatusNotFound, "User %s does not exist", name) } } return err } - plog.Noticef("deleted user %s", name) + if s.lg != nil { + s.lg.Info("deleted a user", zap.String("user-name", name)) + } else { + plog.Noticef("deleted user %s", name) + } return nil } func (s *store) UpdateUser(user User) (User, error) { old, err := s.getUser(user.User, true) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { return user, authErr(http.StatusNotFound, "User %s doesn't exist.", user.User) } } return old, err } - newUser, err := old.merge(user, s.PasswordStore) + newUser, err := old.merge(s.lg, user, s.PasswordStore) if err != nil { return old, err } @@ -271,17 +282,21 @@ func (s *store) UpdateUser(user User) (User, error) { } _, err = s.updateResource("/users/"+user.User, newUser) if err == nil { - plog.Noticef("updated user %s", user.User) + if s.lg != nil { + s.lg.Info("updated a user", zap.String("user-name", user.User)) + } else { + plog.Noticef("updated user %s", user.User) + } } return newUser, err } func (s *store) AllRoles() ([]string, error) { nodes := []string{RootRoleName} - resp, err := s.requestResource("/roles/", false, false) + resp, err := s.requestResource("/roles/", false) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { return nodes, nil } } @@ -303,14 +318,18 @@ func (s *store) CreateRole(role Role) error { } _, err := s.createResource("/roles/"+role.Role, role) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeNodeExist { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeNodeExist { return authErr(http.StatusConflict, "Role %s already exists.", role.Role) } } } if err == nil { - plog.Noticef("created new role %s", role.Role) + if s.lg != nil { + s.lg.Info("created a new role", zap.String("role-name", role.Role)) + } else { + plog.Noticef("created new role %s", role.Role) + } } return err } @@ -319,16 +338,20 @@ func (s *store) DeleteRole(name string) error { if name == RootRoleName { return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", name) } - _, err := s.deleteResource("/roles/" + name) + err := s.deleteResource("/roles/" + name) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { return authErr(http.StatusNotFound, "Role %s doesn't exist.", name) } } } if err == nil { - plog.Noticef("deleted role %s", name) + if s.lg != nil { + s.lg.Info("delete a new role", zap.String("role-name", name)) + } else { + plog.Noticef("deleted role %s", name) + } } return err } @@ -339,14 +362,14 @@ func (s *store) UpdateRole(role Role) (Role, error) { } old, err := s.getRole(role.Role, true) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { return role, authErr(http.StatusNotFound, "Role %s doesn't exist.", role.Role) } } return old, err } - newRole, err := old.merge(role) + newRole, err := old.merge(s.lg, role) if err != nil { return old, err } @@ -355,7 +378,11 @@ func (s *store) UpdateRole(role Role) (Role, error) { } _, err = s.updateResource("/roles/"+role.Role, newRole) if err == nil { - plog.Noticef("updated role %s", role.Role) + if s.lg != nil { + s.lg.Info("updated a new role", zap.String("role-name", role.Role)) + } else { + plog.Noticef("updated role %s", role.Role) + } } return newRole, err } @@ -373,19 +400,42 @@ func (s *store) EnableAuth() error { return authErr(http.StatusConflict, "No root user available, please create one") } if _, err := s.getRole(GuestRoleName, true); err != nil { - plog.Printf("no guest role access found, creating default") + if s.lg != nil { + s.lg.Info( + "no guest role access found; creating default", + zap.String("role-name", GuestRoleName), + ) + } else { + plog.Printf("no guest role access found, creating default") + } if err := s.CreateRole(guestRole); err != nil { - plog.Errorf("error creating guest role. aborting auth enable.") + if s.lg != nil { + s.lg.Warn( + "failed to create a guest role; aborting auth enable", + zap.String("role-name", GuestRoleName), + zap.Error(err), + ) + } else { + plog.Errorf("error creating guest role. aborting auth enable.") + } return err } } if err := s.enableAuth(); err != nil { - plog.Errorf("error enabling auth (%v)", err) + if s.lg != nil { + s.lg.Warn("failed to enable auth", zap.Error(err)) + } else { + plog.Errorf("error enabling auth (%v)", err) + } return err } - plog.Noticef("auth: enabled auth") + if s.lg != nil { + s.lg.Info("enabled auth") + } else { + plog.Noticef("auth: enabled auth") + } return nil } @@ -396,9 +446,17 @@ func (s *store) DisableAuth() error { err := s.disableAuth() if err == nil { - plog.Noticef("auth: disabled auth") + if s.lg != nil { + s.lg.Info("disabled auth") + } else { + plog.Noticef("auth: disabled auth") + } } else { - plog.Errorf("error disabling auth (%v)", err) + if s.lg != nil { + s.lg.Warn("failed to disable auth", zap.Error(err)) + } else { + plog.Errorf("error disabling auth (%v)", err) + } } return err } @@ -407,7 +465,7 @@ func (s *store) DisableAuth() error { // is called and returns a new User with these modifications applied. Think of // all Users as immutable sets of data. Merge allows you to perform the set // operations (desired grants and revokes) atomically -func (ou User) merge(nu User, s PasswordStore) (User, error) { +func (ou User) merge(lg *zap.Logger, nu User, s PasswordStore) (User, error) { var out User if ou.User != nu.User { return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User) @@ -425,14 +483,30 @@ func (ou User) merge(nu User, s PasswordStore) (User, error) { currentRoles := types.NewUnsafeSet(ou.Roles...) for _, g := range nu.Grant { if currentRoles.Contains(g) { - plog.Noticef("granting duplicate role %s for user %s", g, nu.User) + if lg != nil { + lg.Warn( + "attempted to grant a duplicate role for a user", + zap.String("user-name", nu.User), + zap.String("role-name", g), + ) + } else { + plog.Noticef("granting duplicate role %s for user %s", g, nu.User) + } return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User)) } currentRoles.Add(g) } for _, r := range nu.Revoke { if !currentRoles.Contains(r) { - plog.Noticef("revoking ungranted role %s for user %s", r, nu.User) + if lg != nil { + lg.Warn( + "attempted to revoke a ungranted role for a user", + zap.String("user-name", nu.User), + zap.String("role-name", r), + ) + } else { + plog.Noticef("revoking ungranted role %s for user %s", r, nu.User) + } return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User)) } currentRoles.Remove(r) @@ -444,7 +518,7 @@ func (ou User) merge(nu User, s PasswordStore) (User, error) { // merge for a role works the same as User above -- atomic Role application to // each of the substructures. -func (r Role) merge(n Role) (Role, error) { +func (r Role) merge(lg *zap.Logger, n Role) (Role, error) { var out Role var err error if r.Role != n.Role { @@ -455,7 +529,7 @@ func (r Role) merge(n Role) (Role, error) { if err != nil { return out, err } - out.Permissions, err = out.Permissions.Revoke(n.Revoke) + out.Permissions, err = out.Permissions.Revoke(lg, n.Revoke) return out, err } @@ -487,13 +561,13 @@ func (p Permissions) Grant(n *Permissions) (Permissions, error) { // Revoke removes a set of permissions to the permission object on which it is called, // returning a new permission object. -func (p Permissions) Revoke(n *Permissions) (Permissions, error) { +func (p Permissions) Revoke(lg *zap.Logger, n *Permissions) (Permissions, error) { var out Permissions var err error if n == nil { return p, nil } - out.KV, err = p.KV.Revoke(n.KV) + out.KV, err = p.KV.Revoke(lg, n.KV) return out, err } @@ -524,12 +598,19 @@ func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) { // Revoke removes a set of permissions to the permission object on which it is called, // returning a new permission object. -func (rw RWPermission) Revoke(n RWPermission) (RWPermission, error) { +func (rw RWPermission) Revoke(lg *zap.Logger, n RWPermission) (RWPermission, error) { var out RWPermission currentRead := types.NewUnsafeSet(rw.Read...) for _, r := range n.Read { if !currentRead.Contains(r) { - plog.Noticef("revoking ungranted read permission %s", r) + if lg != nil { + lg.Info( + "revoking ungranted read permission", + zap.String("read-permission", r), + ) + } else { + plog.Noticef("revoking ungranted read permission %s", r) + } continue } currentRead.Remove(r) @@ -537,7 +618,14 @@ func (rw RWPermission) Revoke(n RWPermission) (RWPermission, error) { currentWrite := types.NewUnsafeSet(rw.Write...) for _, w := range n.Write { if !currentWrite.Contains(w) { - plog.Noticef("revoking ungranted write permission %s", w) + if lg != nil { + lg.Info( + "revoking ungranted write permission", + zap.String("write-permission", w), + ) + } else { + plog.Noticef("revoking ungranted write permission %s", w) + } continue } currentWrite.Remove(w) @@ -608,10 +696,10 @@ func attachRootRole(u User) User { } func (s *store) getUser(name string, quorum bool) (User, error) { - resp, err := s.requestResource("/users/"+name, false, quorum) + resp, err := s.requestResource("/users/"+name, quorum) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name) } } @@ -633,10 +721,10 @@ func (s *store) getRole(name string, quorum bool) (Role, error) { if name == RootRoleName { return rootRole, nil } - resp, err := s.requestResource("/roles/"+name, false, quorum) + resp, err := s.requestResource("/roles/"+name, quorum) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name) } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth_requests.go similarity index 71% rename from vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth_requests.go index 2464828e6..d6574ecca 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth_requests.go @@ -12,16 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -package auth +package v2auth import ( "context" "encoding/json" "path" - etcderr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/v2error" + "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "go.uber.org/zap" ) func (s *store) ensureAuthDirectories() error { @@ -30,7 +32,6 @@ func (s *store) ensureAuthDirectories() error { } for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} { ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() pe := false rr := etcdserverpb.Request{ Method: "PUT", @@ -39,13 +40,21 @@ func (s *store) ensureAuthDirectories() error { PrevExist: &pe, } _, err := s.server.Do(ctx, rr) + cancel() if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeNodeExist { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeNodeExist { continue } } - plog.Errorf("failed to create auth directories in the store (%v)", err) + if s.lg != nil { + s.lg.Warn( + "failed to create auth directories", + zap.Error(err), + ) + } else { + plog.Errorf("failed to create auth directories in the store (%v)", err) + } return err } } @@ -60,8 +69,8 @@ func (s *store) ensureAuthDirectories() error { } _, err := s.server.Do(ctx, rr) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeNodeExist { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeNodeExist { s.ensuredOnce = true return nil } @@ -85,27 +94,41 @@ func (s *store) detectAuth() bool { if s.server == nil { return false } - value, err := s.requestResource("/enabled", false, false) + value, err := s.requestResource("/enabled", false) if err != nil { - if e, ok := err.(*etcderr.Error); ok { - if e.ErrorCode == etcderr.EcodeKeyNotFound { + if e, ok := err.(*v2error.Error); ok { + if e.ErrorCode == v2error.EcodeKeyNotFound { return false } } - plog.Errorf("failed to detect auth settings (%s)", err) + if s.lg != nil { + s.lg.Warn( + "failed to detect auth settings", + zap.Error(err), + ) + } else { + plog.Errorf("failed to detect auth settings (%s)", err) + } return false } var u bool err = json.Unmarshal([]byte(*value.Event.Node.Value), &u) if err != nil { - plog.Errorf("internal bookkeeping value for enabled isn't valid JSON (%v)", err) + if s.lg != nil { + s.lg.Warn( + "internal bookkeeping value for enabled isn't valid JSON", + zap.Error(err), + ) + } else { + plog.Errorf("internal bookkeeping value for enabled isn't valid JSON (%v)", err) + } return false } return u } -func (s *store) requestResource(res string, dir, quorum bool) (etcdserver.Response, error) { +func (s *store) requestResource(res string, quorum bool) (etcdserver.Response, error) { ctx, cancel := context.WithTimeout(context.Background(), s.timeout) defer cancel() p := path.Join(StorePermsPrefix, res) @@ -116,7 +139,7 @@ func (s *store) requestResource(res string, dir, quorum bool) (etcdserver.Respon rr := etcdserverpb.Request{ Method: method, Path: p, - Dir: dir, + Dir: false, // TODO: always false? } return s.server.Do(ctx, rr) } @@ -148,19 +171,19 @@ func (s *store) setResource(res string, value interface{}, prevexist bool) (etcd return s.server.Do(ctx, rr) } -func (s *store) deleteResource(res string) (etcdserver.Response, error) { +func (s *store) deleteResource(res string) error { err := s.ensureAuthDirectories() if err != nil { - return etcdserver.Response{}, err + return err } ctx, cancel := context.WithTimeout(context.Background(), s.timeout) defer cancel() pex := true p := path.Join(StorePermsPrefix, res) - rr := etcdserverpb.Request{ + _, err = s.server.Do(ctx, etcdserverpb.Request{ Method: "DELETE", Path: p, PrevExist: &pex, - } - return s.server.Do(ctx, rr) + }) + return err } diff --git a/vendor/github.com/coreos/etcd/discovery/discovery.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2discovery/discovery.go similarity index 73% rename from vendor/github.com/coreos/etcd/discovery/discovery.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2discovery/discovery.go index 7d1fa0d05..cf770b378 100644 --- a/vendor/github.com/coreos/etcd/discovery/discovery.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2discovery/discovery.go @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package discovery provides an implementation of the cluster discovery that -// is used by etcd. -package discovery +// Package v2discovery provides an implementation of the cluster discovery that +// is used by etcd with v2 client. +package v2discovery import ( "context" @@ -29,16 +29,17 @@ import ( "strings" "time" - "github.com/coreos/etcd/client" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/client" + "go.etcd.io/etcd/pkg/transport" + "go.etcd.io/etcd/pkg/types" "github.com/coreos/pkg/capnslog" "github.com/jonboulle/clockwork" + "go.uber.org/zap" ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "discovery") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "discovery") ErrInvalidURL = errors.New("discovery: invalid URL") ErrBadSizeKey = errors.New("discovery: size key is bad") @@ -59,8 +60,8 @@ var ( // JoinCluster will connect to the discovery service at the given url, and // register the server represented by the given id and config to the cluster -func JoinCluster(durl, dproxyurl string, id types.ID, config string) (string, error) { - d, err := newDiscovery(durl, dproxyurl, id) +func JoinCluster(lg *zap.Logger, durl, dproxyurl string, id types.ID, config string) (string, error) { + d, err := newDiscovery(lg, durl, dproxyurl, id) if err != nil { return "", err } @@ -69,8 +70,8 @@ func JoinCluster(durl, dproxyurl string, id types.ID, config string) (string, er // GetCluster will connect to the discovery service at the given url and // retrieve a string describing the cluster -func GetCluster(durl, dproxyurl string) (string, error) { - d, err := newDiscovery(durl, dproxyurl, 0) +func GetCluster(lg *zap.Logger, durl, dproxyurl string) (string, error) { + d, err := newDiscovery(lg, durl, dproxyurl, 0) if err != nil { return "", err } @@ -78,6 +79,7 @@ func GetCluster(durl, dproxyurl string) (string, error) { } type discovery struct { + lg *zap.Logger cluster string id types.ID c client.KeysAPI @@ -90,7 +92,7 @@ type discovery struct { // newProxyFunc builds a proxy function from the given string, which should // represent a URL that can be used as a proxy. It performs basic // sanitization of the URL and returns any error encountered. -func newProxyFunc(proxy string) (func(*http.Request) (*url.URL, error), error) { +func newProxyFunc(lg *zap.Logger, proxy string) (func(*http.Request) (*url.URL, error), error) { if proxy == "" { return nil, nil } @@ -111,18 +113,22 @@ func newProxyFunc(proxy string) (func(*http.Request) (*url.URL, error), error) { return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) } - plog.Infof("using proxy %q", proxyURL.String()) + if lg != nil { + lg.Info("running proxy with discovery", zap.String("proxy-url", proxyURL.String())) + } else { + plog.Infof("using proxy %q", proxyURL.String()) + } return http.ProxyURL(proxyURL), nil } -func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) { +func newDiscovery(lg *zap.Logger, durl, dproxyurl string, id types.ID) (*discovery, error) { u, err := url.Parse(durl) if err != nil { return nil, err } token := u.Path u.Path = "" - pf, err := newProxyFunc(dproxyurl) + pf, err := newProxyFunc(lg, dproxyurl) if err != nil { return nil, err } @@ -143,6 +149,7 @@ func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) { } dc := client.NewKeysAPIWithPrefix(c, "") return &discovery{ + lg: lg, cluster: token, c: dc, id: id, @@ -225,7 +232,17 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) { return nil, 0, 0, ErrBadDiscoveryEndpoint } if ce, ok := err.(*client.ClusterError); ok { - plog.Error(ce.Detail()) + if d.lg != nil { + d.lg.Warn( + "failed to get from discovery server", + zap.String("discovery-url", d.url.String()), + zap.String("path", path.Join(configKey, "size")), + zap.Error(err), + zap.String("err-detail", ce.Detail()), + ) + } else { + plog.Error(ce.Detail()) + } return d.checkClusterRetry() } return nil, 0, 0, err @@ -240,7 +257,17 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) { cancel() if err != nil { if ce, ok := err.(*client.ClusterError); ok { - plog.Error(ce.Detail()) + if d.lg != nil { + d.lg.Warn( + "failed to get from discovery server", + zap.String("discovery-url", d.url.String()), + zap.String("path", d.cluster), + zap.Error(err), + zap.String("err-detail", ce.Detail()), + ) + } else { + plog.Error(ce.Detail()) + } return d.checkClusterRetry() } return nil, 0, 0, err @@ -248,7 +275,7 @@ func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) { var nodes []*client.Node // append non-config keys to nodes for _, n := range resp.Node.Nodes { - if !(path.Base(n.Key) == path.Base(configKey)) { + if path.Base(n.Key) != path.Base(configKey) { nodes = append(nodes, n) } } @@ -276,7 +303,16 @@ func (d *discovery) logAndBackoffForRetry(step string) { retries = maxExpoentialRetries } retryTimeInSecond := time.Duration(0x1< 0 { i, err := getUint64(r.Form, "ttl") if err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeTTLNaN, + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeTTLNaN, `invalid value for "ttl"`, ) } @@ -430,8 +481,8 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque if _, ok := r.Form["prevExist"]; ok { bv, err := getBool(r.Form, "prevExist") if err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, "invalid value for prevExist", ) } @@ -443,8 +494,8 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque if _, ok := r.Form["refresh"]; ok { bv, err := getBool(r.Form, "refresh") if err != nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeInvalidField, + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeInvalidField, "invalid value for refresh", ) } @@ -452,14 +503,14 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque if refresh != nil && *refresh { val := r.FormValue("value") if _, ok := r.Form["value"]; ok && val != "" { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeRefreshValue, + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeRefreshValue, `A value was provided on a refresh`, ) } if ttl == nil { - return emptyReq, false, etcdErr.NewRequestError( - etcdErr.EcodeRefreshTTLRequired, + return emptyReq, false, v2error.NewRequestError( + v2error.EcodeRefreshTTLRequired, `No TTL value set`, ) } @@ -505,7 +556,7 @@ func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Reque func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error { ev := resp.Event if ev == nil { - return errors.New("cannot write empty Event!") + return errors.New("cannot write empty Event") } w.Header().Set("Content-Type", "application/json") w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex)) @@ -518,8 +569,8 @@ func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuc ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) if noValueOnSuccess && - (ev.Action == store.Set || ev.Action == store.CompareAndSwap || - ev.Action == store.Create || ev.Action == store.Update) { + (ev.Action == v2store.Set || ev.Action == v2store.CompareAndSwap || + ev.Action == v2store.Create || ev.Action == v2store.Update) { ev.Node = nil ev.PrevNode = nil } @@ -527,32 +578,46 @@ func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuc } func writeKeyNoAuth(w http.ResponseWriter) { - e := etcdErr.NewError(etcdErr.EcodeUnauthorized, "Insufficient credentials", 0) + e := v2error.NewError(v2error.EcodeUnauthorized, "Insufficient credentials", 0) e.WriteTo(w) } // writeKeyError logs and writes the given Error to the ResponseWriter. // If Error is not an etcdErr, the error will be converted to an etcd error. -func writeKeyError(w http.ResponseWriter, err error) { +func writeKeyError(lg *zap.Logger, w http.ResponseWriter, err error) { if err == nil { return } switch e := err.(type) { - case *etcdErr.Error: + case *v2error.Error: e.WriteTo(w) default: switch err { case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost: - mlog.MergeError(err) + if lg != nil { + lg.Warn( + "v2 response error", + zap.String("internal-server-error", err.Error()), + ) + } else { + mlog.MergeError(err) + } default: - mlog.MergeErrorf("got unexpected response error (%v)", err) + if lg != nil { + lg.Warn( + "unexpected v2 response error", + zap.String("internal-server-error", err.Error()), + ) + } else { + mlog.MergeErrorf("got unexpected response error (%v)", err) + } } - ee := etcdErr.NewError(etcdErr.EcodeRaftInternal, err.Error(), 0) + ee := v2error.NewError(v2error.EcodeRaftInternal, err.Error(), 0) ee.WriteTo(w) } } -func handleKeyWatch(ctx context.Context, w http.ResponseWriter, resp etcdserver.Response, stream bool) { +func handleKeyWatch(ctx context.Context, lg *zap.Logger, w http.ResponseWriter, resp etcdserver.Response, stream bool) { wa := resp.Watcher defer wa.Remove() ech := wa.EventChan() @@ -588,7 +653,11 @@ func handleKeyWatch(ctx context.Context, w http.ResponseWriter, resp etcdserver. ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) if err := json.NewEncoder(w).Encode(ev); err != nil { // Should never be reached - plog.Warningf("error writing event (%v)", err) + if lg != nil { + lg.Warn("failed to encode event", zap.Error(err)) + } else { + plog.Warningf("error writing event (%v)", err) + } return } if !stream { @@ -599,7 +668,7 @@ func handleKeyWatch(ctx context.Context, w http.ResponseWriter, resp etcdserver. } } -func trimEventPrefix(ev *store.Event, prefix string) *store.Event { +func trimEventPrefix(ev *v2store.Event, prefix string) *v2store.Event { if ev == nil { return nil } @@ -611,7 +680,7 @@ func trimEventPrefix(ev *store.Event, prefix string) *store.Event { return e } -func trimNodeExternPrefix(n *store.NodeExtern, prefix string) { +func trimNodeExternPrefix(n *v2store.NodeExtern, prefix string) { if n == nil { return } @@ -622,35 +691,35 @@ func trimNodeExternPrefix(n *store.NodeExtern, prefix string) { } func trimErrorPrefix(err error, prefix string) error { - if e, ok := err.(*etcdErr.Error); ok { + if e, ok := err.(*v2error.Error); ok { e.Cause = strings.TrimPrefix(e.Cause, prefix) } return err } -func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool { +func unmarshalRequest(lg *zap.Logger, r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool { ctype := r.Header.Get("Content-Type") semicolonPosition := strings.Index(ctype, ";") if semicolonPosition != -1 { ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition])) } if ctype != "application/json" { - writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype))) + writeError(lg, w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype))) return false } b, err := ioutil.ReadAll(r.Body) if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) + writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) return false } if err := req.UnmarshalJSON(b); err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) + writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) return false } return true } -func getID(p string, w http.ResponseWriter) (types.ID, bool) { +func getID(lg *zap.Logger, p string, w http.ResponseWriter) (types.ID, bool) { idStr := trimPrefix(p, membersPrefix) if idStr == "" { http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) @@ -658,7 +727,7 @@ func getID(p string, w http.ResponseWriter) (types.ID, bool) { } id, err := types.IDFromString(idStr) if err != nil { - writeError(w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr))) + writeError(lg, w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr))) return 0, false } return id, true diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/client_auth.go similarity index 53% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2http/client_auth.go index 606e2e00b..d8d6a883a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/client_auth.go @@ -20,28 +20,35 @@ import ( "path" "strings" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/auth" + "go.etcd.io/etcd/etcdserver/api" + "go.etcd.io/etcd/etcdserver/api/v2auth" + "go.etcd.io/etcd/etcdserver/api/v2http/httptypes" + + "go.uber.org/zap" ) type authHandler struct { - sec auth.Store + lg *zap.Logger + sec v2auth.Store cluster api.Cluster clientCertAuthEnabled bool } -func hasWriteRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { +func hasWriteRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { if r.Method == "GET" || r.Method == "HEAD" { return true } - return hasRootAccess(sec, r, clientCertAuthEnabled) + return hasRootAccess(lg, sec, r, clientCertAuthEnabled) } -func userFromBasicAuth(sec auth.Store, r *http.Request) *auth.User { +func userFromBasicAuth(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User { username, password, ok := r.BasicAuth() if !ok { - plog.Warningf("auth: malformed basic auth encoding") + if lg != nil { + lg.Warn("malformed basic auth encoding") + } else { + plog.Warningf("auth: malformed basic auth encoding") + } return nil } user, err := sec.GetUser(username) @@ -51,23 +58,39 @@ func userFromBasicAuth(sec auth.Store, r *http.Request) *auth.User { ok = sec.CheckPassword(user, password) if !ok { - plog.Warningf("auth: incorrect password for user: %s", username) + if lg != nil { + lg.Warn("incorrect password", zap.String("user-name", username)) + } else { + plog.Warningf("auth: incorrect password for user: %s", username) + } return nil } return &user } -func userFromClientCertificate(sec auth.Store, r *http.Request) *auth.User { +func userFromClientCertificate(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User { if r.TLS == nil { return nil } for _, chains := range r.TLS.VerifiedChains { for _, chain := range chains { - plog.Debugf("auth: found common name %s.\n", chain.Subject.CommonName) + if lg != nil { + lg.Debug("found common name", zap.String("common-name", chain.Subject.CommonName)) + } else { + plog.Debugf("auth: found common name %s.\n", chain.Subject.CommonName) + } user, err := sec.GetUser(chain.Subject.CommonName) if err == nil { - plog.Debugf("auth: authenticated user %s by cert common name.", user.User) + if lg != nil { + lg.Debug( + "authenticated a user via common name", + zap.String("user-name", user.User), + zap.String("common-name", chain.Subject.CommonName), + ) + } else { + plog.Debugf("auth: authenticated user %s by cert common name.", user.User) + } return &user } } @@ -75,7 +98,7 @@ func userFromClientCertificate(sec auth.Store, r *http.Request) *auth.User { return nil } -func hasRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { +func hasRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { if sec == nil { // No store means no auth available, eg, tests. return true @@ -84,29 +107,39 @@ func hasRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) return true } - var rootUser *auth.User + var rootUser *v2auth.User if r.Header.Get("Authorization") == "" && clientCertAuthEnabled { - rootUser = userFromClientCertificate(sec, r) + rootUser = userFromClientCertificate(lg, sec, r) if rootUser == nil { return false } } else { - rootUser = userFromBasicAuth(sec, r) + rootUser = userFromBasicAuth(lg, sec, r) if rootUser == nil { return false } } for _, role := range rootUser.Roles { - if role == auth.RootRoleName { + if role == v2auth.RootRoleName { return true } } - plog.Warningf("auth: user %s does not have the %s role for resource %s.", rootUser.User, auth.RootRoleName, r.URL.Path) + + if lg != nil { + lg.Warn( + "a user does not have root role for resource", + zap.String("root-user", rootUser.User), + zap.String("root-role-name", v2auth.RootRoleName), + zap.String("resource-path", r.URL.Path), + ) + } else { + plog.Warningf("auth: user %s does not have the %s role for resource %s.", rootUser.User, v2auth.RootRoleName, r.URL.Path) + } return false } -func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool { +func hasKeyPrefixAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool { if sec == nil { // No store means no auth available, eg, tests. return true @@ -115,16 +148,16 @@ func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive, return true } - var user *auth.User + var user *v2auth.User if r.Header.Get("Authorization") == "" { if clientCertAuthEnabled { - user = userFromClientCertificate(sec, r) + user = userFromClientCertificate(lg, sec, r) } if user == nil { - return hasGuestAccess(sec, r, key) + return hasGuestAccess(lg, sec, r, key) } } else { - user = userFromBasicAuth(sec, r) + user = userFromBasicAuth(lg, sec, r) if user == nil { return false } @@ -144,44 +177,70 @@ func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive, return true } } - plog.Warningf("auth: invalid access for user %s on key %s.", user.User, key) + + if lg != nil { + lg.Warn( + "invalid access for user on key", + zap.String("user-name", user.User), + zap.String("key", key), + ) + } else { + plog.Warningf("auth: invalid access for user %s on key %s.", user.User, key) + } return false } -func hasGuestAccess(sec auth.Store, r *http.Request, key string) bool { +func hasGuestAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string) bool { writeAccess := r.Method != "GET" && r.Method != "HEAD" - role, err := sec.GetRole(auth.GuestRoleName) + role, err := sec.GetRole(v2auth.GuestRoleName) if err != nil { return false } if role.HasKeyAccess(key, writeAccess) { return true } - plog.Warningf("auth: invalid access for unauthenticated user on resource %s.", key) + + if lg != nil { + lg.Warn( + "invalid access for a guest role on key", + zap.String("role-name", v2auth.GuestRoleName), + zap.String("key", key), + ) + } else { + plog.Warningf("auth: invalid access for unauthenticated user on resource %s.", key) + } return false } -func writeNoAuth(w http.ResponseWriter, r *http.Request) { +func writeNoAuth(lg *zap.Logger, w http.ResponseWriter, r *http.Request) { herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials") if err := herr.WriteTo(w); err != nil { - plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr) + if lg != nil { + lg.Debug( + "failed to write v2 HTTP error", + zap.String("remote-addr", r.RemoteAddr), + zap.Error(err), + ) + } else { + plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr) + } } } func handleAuth(mux *http.ServeMux, sh *authHandler) { - mux.HandleFunc(authPrefix+"/roles", capabilityHandler(api.AuthCapability, sh.baseRoles)) - mux.HandleFunc(authPrefix+"/roles/", capabilityHandler(api.AuthCapability, sh.handleRoles)) - mux.HandleFunc(authPrefix+"/users", capabilityHandler(api.AuthCapability, sh.baseUsers)) - mux.HandleFunc(authPrefix+"/users/", capabilityHandler(api.AuthCapability, sh.handleUsers)) - mux.HandleFunc(authPrefix+"/enable", capabilityHandler(api.AuthCapability, sh.enableDisable)) + mux.HandleFunc(authPrefix+"/roles", authCapabilityHandler(sh.baseRoles)) + mux.HandleFunc(authPrefix+"/roles/", authCapabilityHandler(sh.handleRoles)) + mux.HandleFunc(authPrefix+"/users", authCapabilityHandler(sh.baseUsers)) + mux.HandleFunc(authPrefix+"/users/", authCapabilityHandler(sh.handleUsers)) + mux.HandleFunc(authPrefix+"/enable", authCapabilityHandler(sh.enableDisable)) } func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "GET") { return } - if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(w, r) + if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { + writeNoAuth(sh.lg, w, r) return } @@ -190,7 +249,7 @@ func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) { roles, err := sh.sec.AllRoles() if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } if roles == nil { @@ -199,18 +258,18 @@ func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) { err = r.ParseForm() if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } var rolesCollections struct { - Roles []auth.Role `json:"roles"` + Roles []v2auth.Role `json:"roles"` } for _, roleName := range roles { - var role auth.Role + var role v2auth.Role role, err = sh.sec.GetRole(roleName) if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } rolesCollections.Roles = append(rolesCollections.Roles, role) @@ -218,8 +277,16 @@ func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) { err = json.NewEncoder(w).Encode(rolesCollections) if err != nil { - plog.Warningf("baseRoles error encoding on %s", r.URL) - writeError(w, r, err) + if sh.lg != nil { + sh.lg.Warn( + "failed to encode base roles", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + } else { + plog.Warningf("baseRoles error encoding on %s", r.URL) + } + writeError(sh.lg, w, r, err) return } } @@ -234,7 +301,7 @@ func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) { return } if len(pieces) != 3 { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) return } sh.forRole(w, r, pieces[2]) @@ -244,8 +311,8 @@ func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role stri if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { return } - if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(w, r) + if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { + writeNoAuth(sh.lg, w, r) return } w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) @@ -255,46 +322,55 @@ func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role stri case "GET": data, err := sh.sec.GetRole(role) if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } err = json.NewEncoder(w).Encode(data) if err != nil { - plog.Warningf("forRole error encoding on %s", r.URL) + if sh.lg != nil { + sh.lg.Warn( + "failed to encode a role", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + } else { + plog.Warningf("forRole error encoding on %s", r.URL) + } return } return + case "PUT": - var in auth.Role + var in v2auth.Role err := json.NewDecoder(r.Body).Decode(&in) if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) return } if in.Role != role { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL")) + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL")) return } - var out auth.Role + var out v2auth.Role // create if in.Grant.IsEmpty() && in.Revoke.IsEmpty() { err = sh.sec.CreateRole(in) if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } w.WriteHeader(http.StatusCreated) out = in } else { if !in.Permissions.IsEmpty() { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke")) + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke")) return } out, err = sh.sec.UpdateRole(in) if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } w.WriteHeader(http.StatusOK) @@ -302,22 +378,31 @@ func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role stri err = json.NewEncoder(w).Encode(out) if err != nil { - plog.Warningf("forRole error encoding on %s", r.URL) + if sh.lg != nil { + sh.lg.Warn( + "failed to encode a role", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + } else { + plog.Warningf("forRole error encoding on %s", r.URL) + } return } return + case "DELETE": err := sh.sec.DeleteRole(role) if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } } } type userWithRoles struct { - User string `json:"user"` - Roles []auth.Role `json:"roles,omitempty"` + User string `json:"user"` + Roles []v2auth.Role `json:"roles,omitempty"` } type usersCollections struct { @@ -328,8 +413,8 @@ func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "GET") { return } - if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(w, r) + if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { + writeNoAuth(sh.lg, w, r) return } w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) @@ -337,7 +422,7 @@ func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) { users, err := sh.sec.AllUsers() if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } if users == nil { @@ -346,22 +431,22 @@ func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) { err = r.ParseForm() if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } ucs := usersCollections{} for _, userName := range users { - var user auth.User + var user v2auth.User user, err = sh.sec.GetUser(userName) if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } uwr := userWithRoles{User: user.User} for _, roleName := range user.Roles { - var role auth.Role + var role v2auth.Role role, err = sh.sec.GetRole(roleName) if err != nil { continue @@ -374,8 +459,16 @@ func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) { err = json.NewEncoder(w).Encode(ucs) if err != nil { - plog.Warningf("baseUsers error encoding on %s", r.URL) - writeError(w, r, err) + if sh.lg != nil { + sh.lg.Warn( + "failed to encode users", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + } else { + plog.Warningf("baseUsers error encoding on %s", r.URL) + } + writeError(sh.lg, w, r, err) return } } @@ -390,7 +483,7 @@ func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) { return } if len(pieces) != 3 { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) return } sh.forUser(w, r, pieces[2]) @@ -400,8 +493,8 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { return } - if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(w, r) + if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { + writeNoAuth(sh.lg, w, r) return } w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) @@ -411,22 +504,22 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri case "GET": u, err := sh.sec.GetUser(user) if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } err = r.ParseForm() if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } uwr := userWithRoles{User: u.User} for _, roleName := range u.Roles { - var role auth.Role + var role v2auth.Role role, err = sh.sec.GetRole(roleName) if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } uwr.Roles = append(uwr.Roles, role) @@ -434,24 +527,33 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri err = json.NewEncoder(w).Encode(uwr) if err != nil { - plog.Warningf("forUser error encoding on %s", r.URL) + if sh.lg != nil { + sh.lg.Warn( + "failed to encode roles", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + } else { + plog.Warningf("forUser error encoding on %s", r.URL) + } return } return + case "PUT": - var u auth.User + var u v2auth.User err := json.NewDecoder(r.Body).Decode(&u) if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) return } if u.User != user { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL")) + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL")) return } var ( - out auth.User + out v2auth.User created bool ) @@ -466,18 +568,18 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri } if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } } else { // update case if len(u.Roles) != 0 { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke")) + writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke")) return } out, err = sh.sec.UpdateUser(u) if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } } @@ -492,14 +594,23 @@ func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user stri err = json.NewEncoder(w).Encode(out) if err != nil { - plog.Warningf("forUser error encoding on %s", r.URL) + if sh.lg != nil { + sh.lg.Warn( + "failed to encode a user", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + } else { + plog.Warningf("forUser error encoding on %s", r.URL) + } return } return + case "DELETE": err := sh.sec.DeleteUser(user) if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } } @@ -513,8 +624,8 @@ func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { return } - if !hasWriteRootAccess(sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(w, r) + if !hasWriteRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { + writeNoAuth(sh.lg, w, r) return } w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) @@ -525,18 +636,28 @@ func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) { jsonDict := enabled{isEnabled} err := json.NewEncoder(w).Encode(jsonDict) if err != nil { - plog.Warningf("error encoding auth state on %s", r.URL) + if sh.lg != nil { + sh.lg.Warn( + "failed to encode a auth state", + zap.String("url", r.URL.String()), + zap.Error(err), + ) + } else { + plog.Warningf("error encoding auth state on %s", r.URL) + } } + case "PUT": err := sh.sec.EnableAuth() if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } + case "DELETE": err := sh.sec.DisableAuth() if err != nil { - writeError(w, r, err) + writeError(sh.lg, w, r, err) return } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2http/doc.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/http.go similarity index 59% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2http/http.go index 589c172db..c6956893e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/http.go @@ -20,12 +20,13 @@ import ( "strings" "time" - "github.com/coreos/etcd/etcdserver/api/etcdhttp" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/auth" - "github.com/coreos/etcd/pkg/logutil" + "go.etcd.io/etcd/etcdserver/api/etcdhttp" + "go.etcd.io/etcd/etcdserver/api/v2auth" + "go.etcd.io/etcd/etcdserver/api/v2http/httptypes" + "go.etcd.io/etcd/pkg/logutil" "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" ) const ( @@ -34,22 +35,31 @@ const ( ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/api/v2http") mlog = logutil.NewMergeLogger(plog) ) -func writeError(w http.ResponseWriter, r *http.Request, err error) { +func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) { if err == nil { return } - if e, ok := err.(auth.Error); ok { + if e, ok := err.(v2auth.Error); ok { herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error()) if et := herr.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + if lg != nil { + lg.Debug( + "failed to write v2 HTTP error", + zap.String("remote-addr", r.RemoteAddr), + zap.String("v2auth-error", e.Error()), + zap.Error(et), + ) + } else { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } } return } - etcdhttp.WriteError(w, r, err) + etcdhttp.WriteError(lg, w, r, err) } // allowMethod verifies that the given method is one of the allowed methods, @@ -66,9 +76,18 @@ func allowMethod(w http.ResponseWriter, m string, ms ...string) bool { return false } -func requestLogger(handler http.Handler) http.Handler { +func requestLogger(lg *zap.Logger, handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - plog.Debugf("[%s] %s remote:%s", r.Method, r.RequestURI, r.RemoteAddr) + if lg != nil { + lg.Debug( + "handling HTTP request", + zap.String("method", r.Method), + zap.String("request-uri", r.RequestURI), + zap.String("remote-addr", r.RemoteAddr), + ) + } else { + plog.Debugf("[%s] %s remote:%s", r.Method, r.RequestURI, r.RemoteAddr) + } handler.ServeHTTP(w, r) }) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/errors.go similarity index 93% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/errors.go index 0657604ca..245c0899e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/errors.go @@ -22,7 +22,7 @@ import ( ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http/httptypes") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/api/v2http/httptypes") ) type HTTPError struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/member.go similarity index 97% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/member.go index 738d74432..95fd443ff 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/member.go @@ -19,7 +19,7 @@ package httptypes import ( "encoding/json" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/pkg/types" ) type Member struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/metrics.go similarity index 77% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2http/metrics.go index fdfb0c607..14f7da0fe 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/metrics.go @@ -20,10 +20,10 @@ import ( "net/http" - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/etcdserver/api/v2error" + "go.etcd.io/etcd/etcdserver/api/v2http/httptypes" + "go.etcd.io/etcd/etcdserver/etcdserverpb" + "github.com/prometheus/client_golang/prometheus" ) @@ -44,29 +44,32 @@ var ( Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).", }, []string{"method", "code"}) - successfulEventsHandlingTime = prometheus.NewHistogramVec( + successfulEventsHandlingSec = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "etcd", Subsystem: "http", Name: "successful_duration_seconds", Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).", - Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13), + + // lowest bucket start of upper bound 0.0005 sec (0.5 ms) with factor 2 + // highest bucket start of 0.0005 sec * 2^12 == 2.048 sec + Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13), }, []string{"method"}) ) func init() { prometheus.MustRegister(incomingEvents) prometheus.MustRegister(failedEvents) - prometheus.MustRegister(successfulEventsHandlingTime) + prometheus.MustRegister(successfulEventsHandlingSec) } func reportRequestReceived(request etcdserverpb.Request) { incomingEvents.WithLabelValues(methodFromRequest(request)).Inc() } -func reportRequestCompleted(request etcdserverpb.Request, response etcdserver.Response, startTime time.Time) { +func reportRequestCompleted(request etcdserverpb.Request, startTime time.Time) { method := methodFromRequest(request) - successfulEventsHandlingTime.WithLabelValues(method).Observe(time.Since(startTime).Seconds()) + successfulEventsHandlingSec.WithLabelValues(method).Observe(time.Since(startTime).Seconds()) } func reportRequestFailed(request etcdserverpb.Request, err error) { @@ -86,10 +89,10 @@ func codeFromError(err error) int { return http.StatusInternalServerError } switch e := err.(type) { - case *etcdErr.Error: - return (*etcdErr.Error)(e).StatusCode() + case *v2error.Error: + return e.StatusCode() case *httptypes.HTTPError: - return (*httptypes.HTTPError)(e).Code + return e.Code default: return http.StatusInternalServerError } diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/leader.go similarity index 99% rename from vendor/github.com/coreos/etcd/etcdserver/stats/leader.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2stats/leader.go index 8f6a54ff7..ca47f0f37 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/leader.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package stats +package v2stats import ( "encoding/json" diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/queue.go similarity index 99% rename from vendor/github.com/coreos/etcd/etcdserver/stats/queue.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2stats/queue.go index 635074c48..2c3dff3d0 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/queue.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package stats +package v2stats import ( "sync" diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/server.go similarity index 98% rename from vendor/github.com/coreos/etcd/etcdserver/stats/server.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2stats/server.go index b026e4480..c4accc735 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/server.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package stats +package v2stats import ( "encoding/json" @@ -20,7 +20,7 @@ import ( "sync" "time" - "github.com/coreos/etcd/raft" + "go.etcd.io/etcd/raft" ) // ServerStats encapsulates various statistics about an EtcdServer and its diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/stats.go similarity index 84% rename from vendor/github.com/coreos/etcd/etcdserver/stats/stats.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2stats/stats.go index 2b5f7071a..c50a20076 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/stats.go @@ -12,14 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package stats defines a standard interface for etcd cluster statistics. -package stats +// Package v2stats defines a standard interface for etcd cluster statistics. +package v2stats import "github.com/coreos/pkg/capnslog" -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/stats") -) +var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/stats") type Stats interface { // SelfStats returns the struct representing statistics of this server diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/doc.go new file mode 100644 index 000000000..1933e4cd5 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v2store defines etcd's in-memory key/value store in v2 API. +// To be deprecated in favor of v3 storage. +package v2store diff --git a/vendor/github.com/coreos/etcd/store/event.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event.go similarity index 99% rename from vendor/github.com/coreos/etcd/store/event.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/event.go index efcddb0e0..33e901744 100644 --- a/vendor/github.com/coreos/etcd/store/event.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store const ( Get = "get" diff --git a/vendor/github.com/coreos/etcd/store/event_history.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_history.go similarity index 94% rename from vendor/github.com/coreos/etcd/store/event_history.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_history.go index 235d87a26..e4a969f37 100644 --- a/vendor/github.com/coreos/etcd/store/event_history.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_history.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store import ( "fmt" @@ -20,7 +20,7 @@ import ( "strings" "sync" - etcdErr "github.com/coreos/etcd/error" + "go.etcd.io/etcd/etcdserver/api/v2error" ) type EventHistory struct { @@ -55,14 +55,14 @@ func (eh *EventHistory) addEvent(e *Event) *Event { // scan enumerates events from the index history and stops at the first point // where the key matches. -func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, *etcdErr.Error) { +func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, *v2error.Error) { eh.rwl.RLock() defer eh.rwl.RUnlock() // index should be after the event history's StartIndex if index < eh.StartIndex { return nil, - etcdErr.NewError(etcdErr.EcodeEventIndexCleared, + v2error.NewError(v2error.EcodeEventIndexCleared, fmt.Sprintf("the requested history has been cleared [%v/%v]", eh.StartIndex, index), 0) } @@ -79,7 +79,7 @@ func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, e := eh.Queue.Events[i] if !e.Refresh { - ok := (e.Node.Key == key) + ok := e.Node.Key == key if recursive { // add tailing slash diff --git a/vendor/github.com/coreos/etcd/store/event_queue.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_queue.go similarity index 98% rename from vendor/github.com/coreos/etcd/store/event_queue.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_queue.go index 767b83591..7ea03de8c 100644 --- a/vendor/github.com/coreos/etcd/store/event_queue.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_queue.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store type eventQueue struct { Events []*Event diff --git a/vendor/github.com/coreos/etcd/store/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/metrics.go similarity index 84% rename from vendor/github.com/coreos/etcd/store/metrics.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/metrics.go index 077c0fa23..5adea1efd 100644 --- a/vendor/github.com/coreos/etcd/store/metrics.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/metrics.go @@ -12,11 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store -import ( - "github.com/prometheus/client_golang/prometheus" -) +import "github.com/prometheus/client_golang/prometheus" // Set of raw Prometheus metrics. // Labels @@ -97,22 +95,22 @@ func init() { prometheus.MustRegister(watcherCount) } -func reportReadSuccess(read_action string) { - readCounter.WithLabelValues(read_action).Inc() +func reportReadSuccess(readAction string) { + readCounter.WithLabelValues(readAction).Inc() } -func reportReadFailure(read_action string) { - readCounter.WithLabelValues(read_action).Inc() - readFailedCounter.WithLabelValues(read_action).Inc() +func reportReadFailure(readAction string) { + readCounter.WithLabelValues(readAction).Inc() + readFailedCounter.WithLabelValues(readAction).Inc() } -func reportWriteSuccess(write_action string) { - writeCounter.WithLabelValues(write_action).Inc() +func reportWriteSuccess(writeAction string) { + writeCounter.WithLabelValues(writeAction).Inc() } -func reportWriteFailure(write_action string) { - writeCounter.WithLabelValues(write_action).Inc() - writeFailedCounter.WithLabelValues(write_action).Inc() +func reportWriteFailure(writeAction string) { + writeCounter.WithLabelValues(writeAction).Inc() + writeFailedCounter.WithLabelValues(writeAction).Inc() } func reportExpiredKey() { diff --git a/vendor/github.com/coreos/etcd/store/node.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/node.go similarity index 90% rename from vendor/github.com/coreos/etcd/store/node.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/node.go index c3c87431c..38a6984fb 100644 --- a/vendor/github.com/coreos/etcd/store/node.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/node.go @@ -12,14 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store import ( "path" "sort" "time" - etcdErr "github.com/coreos/etcd/error" + "go.etcd.io/etcd/etcdserver/api/v2error" + "github.com/jonboulle/clockwork" ) @@ -106,9 +107,9 @@ func (n *node) IsDir() bool { // Read function gets the value of the node. // If the receiver node is not a key-value pair, a "Not A File" error will be returned. -func (n *node) Read() (string, *etcdErr.Error) { +func (n *node) Read() (string, *v2error.Error) { if n.IsDir() { - return "", etcdErr.NewError(etcdErr.EcodeNotFile, "", n.store.CurrentIndex) + return "", v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex) } return n.Value, nil @@ -116,9 +117,9 @@ func (n *node) Read() (string, *etcdErr.Error) { // Write function set the value of the node to the given value. // If the receiver node is a directory, a "Not A File" error will be returned. -func (n *node) Write(value string, index uint64) *etcdErr.Error { +func (n *node) Write(value string, index uint64) *v2error.Error { if n.IsDir() { - return etcdErr.NewError(etcdErr.EcodeNotFile, "", n.store.CurrentIndex) + return v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex) } n.Value = value @@ -149,9 +150,9 @@ func (n *node) expirationAndTTL(clock clockwork.Clock) (*time.Time, int64) { // List function return a slice of nodes under the receiver node. // If the receiver node is not a directory, a "Not A Directory" error will be returned. -func (n *node) List() ([]*node, *etcdErr.Error) { +func (n *node) List() ([]*node, *v2error.Error) { if !n.IsDir() { - return nil, etcdErr.NewError(etcdErr.EcodeNotDir, "", n.store.CurrentIndex) + return nil, v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex) } nodes := make([]*node, len(n.Children)) @@ -167,9 +168,9 @@ func (n *node) List() ([]*node, *etcdErr.Error) { // GetChild function returns the child node under the directory node. // On success, it returns the file node -func (n *node) GetChild(name string) (*node, *etcdErr.Error) { +func (n *node) GetChild(name string) (*node, *v2error.Error) { if !n.IsDir() { - return nil, etcdErr.NewError(etcdErr.EcodeNotDir, n.Path, n.store.CurrentIndex) + return nil, v2error.NewError(v2error.EcodeNotDir, n.Path, n.store.CurrentIndex) } child, ok := n.Children[name] @@ -185,15 +186,15 @@ func (n *node) GetChild(name string) (*node, *etcdErr.Error) { // If the receiver is not a directory, a "Not A Directory" error will be returned. // If there is an existing node with the same name under the directory, a "Already Exist" // error will be returned -func (n *node) Add(child *node) *etcdErr.Error { +func (n *node) Add(child *node) *v2error.Error { if !n.IsDir() { - return etcdErr.NewError(etcdErr.EcodeNotDir, "", n.store.CurrentIndex) + return v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex) } _, name := path.Split(child.Path) if _, ok := n.Children[name]; ok { - return etcdErr.NewError(etcdErr.EcodeNodeExist, "", n.store.CurrentIndex) + return v2error.NewError(v2error.EcodeNodeExist, "", n.store.CurrentIndex) } n.Children[name] = child @@ -202,7 +203,7 @@ func (n *node) Add(child *node) *etcdErr.Error { } // Remove function remove the node. -func (n *node) Remove(dir, recursive bool, callback func(path string)) *etcdErr.Error { +func (n *node) Remove(dir, recursive bool, callback func(path string)) *v2error.Error { if !n.IsDir() { // key-value pair _, name := path.Split(n.Path) @@ -224,13 +225,13 @@ func (n *node) Remove(dir, recursive bool, callback func(path string)) *etcdErr. if !dir { // cannot delete a directory without dir set to true - return etcdErr.NewError(etcdErr.EcodeNotFile, n.Path, n.store.CurrentIndex) + return v2error.NewError(v2error.EcodeNotFile, n.Path, n.store.CurrentIndex) } if len(n.Children) != 0 && !recursive { // cannot delete a directory if it is not empty and the operation // is not recursive - return etcdErr.NewError(etcdErr.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex) + return v2error.NewError(v2error.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex) } for _, child := range n.Children { // delete all children @@ -337,8 +338,8 @@ func (n *node) UpdateTTL(expireTime time.Time) { // Compare function compares node index and value with provided ones. // second result value explains result and equals to one of Compare.. constants func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) { - indexMatch := (prevIndex == 0 || n.ModifiedIndex == prevIndex) - valueMatch := (prevValue == "" || n.Value == prevValue) + indexMatch := prevIndex == 0 || n.ModifiedIndex == prevIndex + valueMatch := prevValue == "" || n.Value == prevValue ok = valueMatch && indexMatch switch { case valueMatch && indexMatch: diff --git a/vendor/github.com/coreos/etcd/store/node_extern.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/node_extern.go similarity index 99% rename from vendor/github.com/coreos/etcd/store/node_extern.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/node_extern.go index 7ba870cbe..b3bf5f3c9 100644 --- a/vendor/github.com/coreos/etcd/store/node_extern.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/node_extern.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store import ( "sort" diff --git a/vendor/github.com/coreos/etcd/store/stats.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/stats.go similarity index 99% rename from vendor/github.com/coreos/etcd/store/stats.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/stats.go index ce464dda6..45bc97f01 100644 --- a/vendor/github.com/coreos/etcd/store/stats.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/stats.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store import ( "encoding/json" diff --git a/vendor/github.com/coreos/etcd/store/store.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/store.go similarity index 92% rename from vendor/github.com/coreos/etcd/store/store.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/store.go index edf7f2194..ce940436e 100644 --- a/vendor/github.com/coreos/etcd/store/store.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/store.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store import ( "encoding/json" @@ -23,8 +23,9 @@ import ( "sync" "time" - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/etcdserver/api/v2error" + "go.etcd.io/etcd/pkg/types" + "github.com/jonboulle/clockwork" ) @@ -119,7 +120,7 @@ func (s *store) Index() uint64 { // If recursive is true, it will return all the content under the node path. // If sorted is true, it will sort the content by keys. func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) { - var err *etcdErr.Error + var err *v2error.Error s.worldLock.RLock() defer s.worldLock.RUnlock() @@ -159,7 +160,7 @@ func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) { // If the node has already existed, create will fail. // If any node on the path is a file, create will fail. func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) { - var err *etcdErr.Error + var err *v2error.Error s.worldLock.Lock() defer s.worldLock.Unlock() @@ -188,7 +189,7 @@ func (s *store) Create(nodePath string, dir bool, value string, unique bool, exp // Set creates or replace the node at nodePath. func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) { - var err *etcdErr.Error + var err *v2error.Error s.worldLock.Lock() defer s.worldLock.Unlock() @@ -206,7 +207,7 @@ func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptio // Get prevNode value n, getErr := s.internalGet(nodePath) - if getErr != nil && getErr.ErrorCode != etcdErr.EcodeKeyNotFound { + if getErr != nil && getErr.ErrorCode != v2error.EcodeKeyNotFound { err = getErr return nil, err } @@ -215,9 +216,8 @@ func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptio if getErr != nil { err = getErr return nil, err - } else { - value = n.Value } + value = n.Value } // Set new value @@ -259,7 +259,7 @@ func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64, value string, expireOpts TTLOptionSet) (*Event, error) { - var err *etcdErr.Error + var err *v2error.Error s.worldLock.Lock() defer s.worldLock.Unlock() @@ -278,7 +278,7 @@ func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint nodePath = path.Clean(path.Join("/", nodePath)) // we do not allow the user to change "/" if s.readonlySet.Contains(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex) + return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex) } n, err := s.internalGet(nodePath) @@ -286,7 +286,7 @@ func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint return nil, err } if n.IsDir() { // can only compare and swap file - err = etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, s.CurrentIndex) + err = v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex) return nil, err } @@ -294,7 +294,7 @@ func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint // Command will be executed, only if both of the tests are successful. if ok, which := n.Compare(prevValue, prevIndex); !ok { cause := getCompareFailCause(n, which, prevValue, prevIndex) - err = etcdErr.NewError(etcdErr.EcodeTestFailed, cause, s.CurrentIndex) + err = v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex) return nil, err } @@ -332,7 +332,7 @@ func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint // Delete deletes the node at the given path. // If the node is a directory, recursive must be true to delete it. func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) { - var err *etcdErr.Error + var err *v2error.Error s.worldLock.Lock() defer s.worldLock.Unlock() @@ -351,7 +351,7 @@ func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) { nodePath = path.Clean(path.Join("/", nodePath)) // we do not allow the user to change "/" if s.readonlySet.Contains(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex) + return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex) } // recursive implies dir @@ -393,7 +393,7 @@ func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) { } func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) { - var err *etcdErr.Error + var err *v2error.Error s.worldLock.Lock() defer s.worldLock.Unlock() @@ -416,14 +416,14 @@ func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex ui return nil, err } if n.IsDir() { // can only compare and delete file - return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, s.CurrentIndex) + return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex) } // If both of the prevValue and prevIndex are given, we will test both of them. // Command will be executed, only if both of the tests are successful. if ok, which := n.Compare(prevValue, prevIndex); !ok { cause := getCompareFailCause(n, which, prevValue, prevIndex) - return nil, etcdErr.NewError(etcdErr.EcodeTestFailed, cause, s.CurrentIndex) + return nil, v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex) } // update etcd index @@ -466,11 +466,11 @@ func (s *store) Watch(key string, recursive, stream bool, sinceIndex uint64) (Wa } // walk walks all the nodePath and apply the walkFunc on each directory -func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *etcdErr.Error)) (*node, *etcdErr.Error) { +func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *v2error.Error)) (*node, *v2error.Error) { components := strings.Split(nodePath, "/") curr := s.Root - var err *etcdErr.Error + var err *v2error.Error for i := 1; i < len(components); i++ { if len(components[i]) == 0 { // ignore empty string @@ -490,7 +490,7 @@ func (s *store) walk(nodePath string, walkFunc func(prev *node, component string // If the node is a file, the value and the ttl can be updated. // If the node is a directory, only the ttl can be updated. func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) { - var err *etcdErr.Error + var err *v2error.Error s.worldLock.Lock() defer s.worldLock.Unlock() @@ -509,7 +509,7 @@ func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet nodePath = path.Clean(path.Join("/", nodePath)) // we do not allow the user to change "/" if s.readonlySet.Contains(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex) + return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex) } currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 @@ -520,7 +520,7 @@ func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet } if n.IsDir() && len(newValue) != 0 { // if the node is a directory, we cannot update value to non-empty - return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, currIndex) + return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex) } if expireOpts.Refresh { @@ -560,7 +560,7 @@ func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet } func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool, - expireTime time.Time, action string) (*Event, *etcdErr.Error) { + expireTime time.Time, action string) (*Event, *v2error.Error) { currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1 @@ -572,7 +572,7 @@ func (s *store) internalCreate(nodePath string, dir bool, value string, unique, // we do not allow the user to change "/" if s.readonlySet.Contains(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", currIndex) + return nil, v2error.NewError(v2error.EcodeRootROnly, "/", currIndex) } // Assume expire times that are way in the past are @@ -602,13 +602,13 @@ func (s *store) internalCreate(nodePath string, dir bool, value string, unique, if n != nil { if replace { if n.IsDir() { - return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, currIndex) + return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex) } e.PrevNode = n.Repr(false, false, s.clock) n.Remove(false, false, nil) } else { - return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, currIndex) + return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, currIndex) } } @@ -641,13 +641,13 @@ func (s *store) internalCreate(nodePath string, dir bool, value string, unique, } // InternalGet gets the node of the given nodePath. -func (s *store) internalGet(nodePath string) (*node, *etcdErr.Error) { +func (s *store) internalGet(nodePath string) (*node, *v2error.Error) { nodePath = path.Clean(path.Join("/", nodePath)) - walkFunc := func(parent *node, name string) (*node, *etcdErr.Error) { + walkFunc := func(parent *node, name string) (*node, *v2error.Error) { if !parent.IsDir() { - err := etcdErr.NewError(etcdErr.EcodeNotDir, parent.Path, s.CurrentIndex) + err := v2error.NewError(v2error.EcodeNotDir, parent.Path, s.CurrentIndex) return nil, err } @@ -656,7 +656,7 @@ func (s *store) internalGet(nodePath string) (*node, *etcdErr.Error) { return child, nil } - return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex) + return nil, v2error.NewError(v2error.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex) } f, err := s.walk(nodePath, walkFunc) @@ -706,7 +706,7 @@ func (s *store) DeleteExpiredKeys(cutoff time.Time) { // If it is a directory, this function will return the pointer to that node. // If it does not exist, this function will create a new directory and return the pointer to that node. // If it is a file, this function will return error. -func (s *store) checkDir(parent *node, dirName string) (*node, *etcdErr.Error) { +func (s *store) checkDir(parent *node, dirName string) (*node, *v2error.Error) { node, ok := parent.Children[dirName] if ok { @@ -714,7 +714,7 @@ func (s *store) checkDir(parent *node, dirName string) (*node, *etcdErr.Error) { return node, nil } - return nil, etcdErr.NewError(etcdErr.EcodeNotDir, node.Path, s.CurrentIndex) + return nil, v2error.NewError(v2error.EcodeNotDir, node.Path, s.CurrentIndex) } n := newDir(s, path.Join(parent.Path, dirName), s.CurrentIndex+1, parent, Permanent) diff --git a/vendor/github.com/coreos/etcd/store/ttl_key_heap.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/ttl_key_heap.go similarity index 97% rename from vendor/github.com/coreos/etcd/store/ttl_key_heap.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/ttl_key_heap.go index 21ae9b7c6..477d2b9f3 100644 --- a/vendor/github.com/coreos/etcd/store/ttl_key_heap.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/ttl_key_heap.go @@ -12,11 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store -import ( - "container/heap" -) +import "container/heap" // An TTLKeyHeap is a min-heap of TTLKeys order by expiration time type ttlKeyHeap struct { diff --git a/vendor/github.com/coreos/etcd/store/watcher.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher.go similarity index 99% rename from vendor/github.com/coreos/etcd/store/watcher.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher.go index a236ec777..4b1e846a2 100644 --- a/vendor/github.com/coreos/etcd/store/watcher.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store type Watcher interface { EventChan() chan *Event diff --git a/vendor/github.com/coreos/etcd/store/watcher_hub.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher_hub.go similarity index 97% rename from vendor/github.com/coreos/etcd/store/watcher_hub.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher_hub.go index 13c23e391..a452e7e95 100644 --- a/vendor/github.com/coreos/etcd/store/watcher_hub.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher_hub.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package store +package v2store import ( "container/list" @@ -21,7 +21,7 @@ import ( "sync" "sync/atomic" - etcdErr "github.com/coreos/etcd/error" + "go.etcd.io/etcd/etcdserver/api/v2error" ) // A watcherHub contains all subscribed watchers @@ -56,7 +56,7 @@ func newWatchHub(capacity int) *watcherHub { // If recursive is true, the first change after index under key will be sent to the event channel of the watcher. // If recursive is false, the first change after index at key will be sent to the event channel of the watcher. // If index is zero, watch will start from the current index + 1. -func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *etcdErr.Error) { +func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *v2error.Error) { reportWatchRequest() event, err := wh.EventHistory.scan(key, recursive, index) @@ -151,7 +151,7 @@ func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) { w, _ := curr.Value.(*watcher) - originalPath := (e.Node.Key == nodePath) + originalPath := e.Node.Key == nodePath if (originalPath || !isHidden(nodePath, e.Node.Key)) && w.notify(e, originalPath, deleted) { if !w.stream { // do not remove the stream watcher // if we successfully notify a watcher diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/cluster.go similarity index 92% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2v3/cluster.go index b53e6d7c8..a22e4afad 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/cluster.go @@ -15,8 +15,8 @@ package v2v3 import ( - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/pkg/types" "github.com/coreos/go-semver/semver" ) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2v3/doc.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/server.go similarity index 79% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2v3/server.go index 2ef63ce68..5ff9b96c0 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/server.go @@ -19,14 +19,15 @@ import ( "net/http" "time" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api" + "go.etcd.io/etcd/etcdserver/api/membership" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/types" "github.com/coreos/go-semver/semver" + "go.uber.org/zap" ) type fakeStats struct{} @@ -36,13 +37,14 @@ func (s *fakeStats) LeaderStats() []byte { return nil } func (s *fakeStats) StoreStats() []byte { return nil } type v2v3Server struct { + lg *zap.Logger c *clientv3.Client store *v2v3Store fakeStats } -func NewServer(c *clientv3.Client, pfx string) etcdserver.ServerPeer { - return &v2v3Server{c: c, store: newStore(c, pfx)} +func NewServer(lg *zap.Logger, c *clientv3.Client, pfx string) etcdserver.ServerPeer { + return &v2v3Server{lg: lg, c: c, store: newStore(c, pfx)} } func (s *v2v3Server) ClientCertAuthEnabled() bool { return false } @@ -61,6 +63,7 @@ func (s *v2v3Server) Leader() types.ID { } func (s *v2v3Server) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { + // adding member as learner is not supported by V2 Server. resp, err := s.c.MemberAdd(ctx, memb.PeerURLs) if err != nil { return nil, err @@ -76,6 +79,14 @@ func (s *v2v3Server) RemoveMember(ctx context.Context, id uint64) ([]*membership return v3MembersToMembership(resp.Members), nil } +func (s *v2v3Server) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { + resp, err := s.c.MemberPromote(ctx, id) + if err != nil { + return nil, err + } + return v3MembersToMembership(resp.Members), nil +} + func (s *v2v3Server) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) { resp, err := s.c.MemberUpdate(ctx, uint64(m.ID), m.PeerURLs) if err != nil { @@ -90,7 +101,8 @@ func v3MembersToMembership(v3membs []*pb.Member) []*membership.Member { membs[i] = &membership.Member{ ID: types.ID(m.ID), RaftAttributes: membership.RaftAttributes{ - PeerURLs: m.PeerURLs, + PeerURLs: m.PeerURLs, + IsLearner: m.IsLearner, }, Attributes: membership.Attributes{ Name: m.Name, @@ -106,7 +118,7 @@ func (s *v2v3Server) Cluster() api.Cluster { return s } func (s *v2v3Server) Alarms() []*pb.AlarmMember { return nil } func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) { - applier := etcdserver.NewApplierV2(s.store, nil) + applier := etcdserver.NewApplierV2(s.lg, s.store, nil) reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier) req := (*etcdserver.RequestV2)(&r) resp, err := req.Handle(ctx, reqHandler) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/store.go similarity index 75% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2v3/store.go index 444f93f3a..f1c7ab378 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/store.go @@ -18,14 +18,15 @@ import ( "context" "fmt" "path" + "sort" "strings" "time" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/clientv3/concurrency" - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/store" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/clientv3/concurrency" + "go.etcd.io/etcd/etcdserver/api/v2error" + "go.etcd.io/etcd/etcdserver/api/v2store" + "go.etcd.io/etcd/mvcc/mvccpb" ) // store implements the Store interface for V2 using @@ -41,13 +42,13 @@ const maxPathDepth = 63 var errUnsupported = fmt.Errorf("TTLs are unsupported") -func NewStore(c *clientv3.Client, pfx string) store.Store { return newStore(c, pfx) } +func NewStore(c *clientv3.Client, pfx string) v2store.Store { return newStore(c, pfx) } func newStore(c *clientv3.Client, pfx string) *v2v3Store { return &v2v3Store{c, pfx, c.Ctx()} } func (s *v2v3Store) Index() uint64 { panic("STUB") } -func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*store.Event, error) { +func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*v2store.Event, error) { key := s.mkPath(nodePath) resp, err := s.c.Txn(s.ctx).Then( clientv3.OpGet(key+"/"), @@ -66,9 +67,9 @@ func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*store.Event, if len(kvs) > 0 { cidx, midx = mkV2Rev(kvs[0].CreateRevision), mkV2Rev(kvs[0].ModRevision) } - return &store.Event{ - Action: store.Get, - Node: &store.NodeExtern{ + return &v2store.Event{ + Action: v2store.Get, + Node: &v2store.NodeExtern{ Key: nodePath, Dir: true, Nodes: nodes, @@ -81,23 +82,26 @@ func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*store.Event, kvs := resp.Responses[1].GetResponseRange().Kvs if len(kvs) == 0 { - return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) + return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) } - return &store.Event{ - Action: store.Get, + return &v2store.Event{ + Action: v2store.Get, Node: s.mkV2Node(kvs[0]), EtcdIndex: mkV2Rev(resp.Header.Revision), }, nil } -func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*store.NodeExtern, error) { +func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*v2store.NodeExtern, error) { rootNodes, err := s.getDirDepth(nodePath, 1, rev) if err != nil || !recursive { + if sorted { + sort.Sort(v2store.NodeExterns(rootNodes)) + } return rootNodes, err } nextNodes := rootNodes - nodes := make(map[string]*store.NodeExtern) + nodes := make(map[string]*v2store.NodeExtern) // Breadth walk the subdirectories for i := 2; len(nextNodes) > 0; i++ { for _, n := range nextNodes { @@ -110,17 +114,21 @@ func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ( return nil, err } } + + if sorted { + sort.Sort(v2store.NodeExterns(rootNodes)) + } return rootNodes, nil } -func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*store.NodeExtern, error) { +func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*v2store.NodeExtern, error) { pd := s.mkPathDepth(nodePath, depth) resp, err := s.c.Get(s.ctx, pd, clientv3.WithPrefix(), clientv3.WithRev(rev)) if err != nil { return nil, err } - nodes := make([]*store.NodeExtern, len(resp.Kvs)) + nodes := make([]*v2store.NodeExtern, len(resp.Kvs)) for i, kv := range resp.Kvs { nodes[i] = s.mkV2Node(kv) } @@ -131,38 +139,48 @@ func (s *v2v3Store) Set( nodePath string, dir bool, value string, - expireOpts store.TTLOptionSet, -) (*store.Event, error) { + expireOpts v2store.TTLOptionSet, +) (*v2store.Event, error) { if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { return nil, errUnsupported } if isRoot(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0) + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) } ecode := 0 applyf := func(stm concurrency.STM) error { - parent := path.Dir(nodePath) - if !isRoot(parent) && stm.Rev(s.mkPath(parent)+"/") == 0 { - ecode = etcdErr.EcodeKeyNotFound - return nil + // build path if any directories in path do not exist + dirs := []string{} + for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) { + pp := s.mkPath(p) + if stm.Rev(pp) > 0 { + ecode = v2error.EcodeNotDir + return nil + } + if stm.Rev(pp+"/") == 0 { + dirs = append(dirs, pp+"/") + } + } + for _, d := range dirs { + stm.Put(d, "") } key := s.mkPath(nodePath) if dir { if stm.Rev(key) != 0 { // exists as non-dir - ecode = etcdErr.EcodeNotDir + ecode = v2error.EcodeNotDir return nil } key = key + "/" } else if stm.Rev(key+"/") != 0 { - ecode = etcdErr.EcodeNotFile + ecode = v2error.EcodeNotFile return nil } stm.Put(key, value, clientv3.WithPrevKV()) - stm.Put(s.mkActionKey(), store.Set) + stm.Put(s.mkActionKey(), v2store.Set) return nil } @@ -171,11 +189,11 @@ func (s *v2v3Store) Set( return nil, err } if ecode != 0 { - return nil, etcdErr.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision)) + return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision)) } createRev := resp.Header.Revision - var pn *store.NodeExtern + var pn *v2store.NodeExtern if pkv := prevKeyFromPuts(resp); pkv != nil { pn = s.mkV2Node(pkv) createRev = pkv.CreateRevision @@ -185,9 +203,9 @@ func (s *v2v3Store) Set( if dir { vp = nil } - return &store.Event{ - Action: store.Set, - Node: &store.NodeExtern{ + return &v2store.Event{ + Action: v2store.Set, + Node: &v2store.NodeExtern{ Key: nodePath, Value: vp, Dir: dir, @@ -199,9 +217,9 @@ func (s *v2v3Store) Set( }, nil } -func (s *v2v3Store) Update(nodePath, newValue string, expireOpts store.TTLOptionSet) (*store.Event, error) { +func (s *v2v3Store) Update(nodePath, newValue string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) { if isRoot(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0) + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) } if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { @@ -212,15 +230,15 @@ func (s *v2v3Store) Update(nodePath, newValue string, expireOpts store.TTLOption ecode := 0 applyf := func(stm concurrency.STM) error { if rev := stm.Rev(key + "/"); rev != 0 { - ecode = etcdErr.EcodeNotFile + ecode = v2error.EcodeNotFile return nil } if rev := stm.Rev(key); rev == 0 { - ecode = etcdErr.EcodeKeyNotFound + ecode = v2error.EcodeKeyNotFound return nil } stm.Put(key, newValue, clientv3.WithPrevKV()) - stm.Put(s.mkActionKey(), store.Update) + stm.Put(s.mkActionKey(), v2store.Update) return nil } @@ -229,13 +247,13 @@ func (s *v2v3Store) Update(nodePath, newValue string, expireOpts store.TTLOption return nil, err } if ecode != 0 { - return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) + return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) } pkv := prevKeyFromPuts(resp) - return &store.Event{ - Action: store.Update, - Node: &store.NodeExtern{ + return &v2store.Event{ + Action: v2store.Update, + Node: &v2store.NodeExtern{ Key: nodePath, Value: &newValue, ModifiedIndex: mkV2Rev(resp.Header.Revision), @@ -251,10 +269,10 @@ func (s *v2v3Store) Create( dir bool, value string, unique bool, - expireOpts store.TTLOptionSet, -) (*store.Event, error) { + expireOpts v2store.TTLOptionSet, +) (*v2store.Event, error) { if isRoot(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0) + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) } if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { return nil, errUnsupported @@ -275,7 +293,7 @@ func (s *v2v3Store) Create( } } if stm.Rev(key) > 0 || stm.Rev(key+"/") > 0 { - ecode = etcdErr.EcodeNodeExist + ecode = v2error.EcodeNodeExist return nil } // build path if any directories in path do not exist @@ -283,7 +301,7 @@ func (s *v2v3Store) Create( for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) { pp := s.mkPath(p) if stm.Rev(pp) > 0 { - ecode = etcdErr.EcodeNotDir + ecode = v2error.EcodeNotDir return nil } if stm.Rev(pp+"/") == 0 { @@ -299,7 +317,7 @@ func (s *v2v3Store) Create( key += "/" } stm.Put(key, value) - stm.Put(s.mkActionKey(), store.Create) + stm.Put(s.mkActionKey(), v2store.Create) return nil } @@ -308,7 +326,7 @@ func (s *v2v3Store) Create( return nil, err } if ecode != 0 { - return nil, etcdErr.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision)) + return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision)) } var v *string @@ -316,9 +334,9 @@ func (s *v2v3Store) Create( v = &value } - return &store.Event{ - Action: store.Create, - Node: &store.NodeExtern{ + return &v2store.Event{ + Action: v2store.Create, + Node: &v2store.NodeExtern{ Key: nodePath, Value: v, Dir: dir, @@ -334,10 +352,10 @@ func (s *v2v3Store) CompareAndSwap( prevValue string, prevIndex uint64, value string, - expireOpts store.TTLOptionSet, -) (*store.Event, error) { + expireOpts v2store.TTLOptionSet, +) (*v2store.Event, error) { if isRoot(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0) + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) } if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { return nil, errUnsupported @@ -348,7 +366,7 @@ func (s *v2v3Store) CompareAndSwap( s.mkCompare(nodePath, prevValue, prevIndex)..., ).Then( clientv3.OpPut(key, value, clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), store.CompareAndSwap), + clientv3.OpPut(s.mkActionKey(), v2store.CompareAndSwap), ).Else( clientv3.OpGet(key), clientv3.OpGet(key+"/"), @@ -362,9 +380,9 @@ func (s *v2v3Store) CompareAndSwap( } pkv := resp.Responses[0].GetResponsePut().PrevKv - return &store.Event{ - Action: store.CompareAndSwap, - Node: &store.NodeExtern{ + return &v2store.Event{ + Action: v2store.CompareAndSwap, + Node: &v2store.NodeExtern{ Key: nodePath, Value: &value, CreatedIndex: mkV2Rev(pkv.CreateRevision), @@ -375,9 +393,9 @@ func (s *v2v3Store) CompareAndSwap( }, nil } -func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*store.Event, error) { +func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*v2store.Event, error) { if isRoot(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0) + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) } if !dir && !recursive { return s.deleteNode(nodePath) @@ -391,7 +409,7 @@ func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*store.Event, for i := 1; i < maxPathDepth; i++ { dels[i] = clientv3.OpDelete(s.mkPathDepth(nodePath, i), clientv3.WithPrefix()) } - dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), store.Delete) + dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), v2store.Delete) resp, err := s.c.Txn(s.ctx).If( clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), ">", 0), @@ -403,61 +421,61 @@ func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*store.Event, return nil, err } if !resp.Succeeded { - return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision)) + return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision)) } dresp := resp.Responses[0].GetResponseDeleteRange() - return &store.Event{ - Action: store.Delete, + return &v2store.Event{ + Action: v2store.Delete, PrevNode: s.mkV2Node(dresp.PrevKvs[0]), EtcdIndex: mkV2Rev(resp.Header.Revision), }, nil } -func (s *v2v3Store) deleteEmptyDir(nodePath string) (*store.Event, error) { +func (s *v2v3Store) deleteEmptyDir(nodePath string) (*v2store.Event, error) { resp, err := s.c.Txn(s.ctx).If( clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, 1)), "=", 0).WithPrefix(), ).Then( clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), store.Delete), + clientv3.OpPut(s.mkActionKey(), v2store.Delete), ).Commit() if err != nil { return nil, err } if !resp.Succeeded { - return nil, etcdErr.NewError(etcdErr.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision)) + return nil, v2error.NewError(v2error.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision)) } dresp := resp.Responses[0].GetResponseDeleteRange() if len(dresp.PrevKvs) == 0 { - return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision)) + return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision)) } - return &store.Event{ - Action: store.Delete, + return &v2store.Event{ + Action: v2store.Delete, PrevNode: s.mkV2Node(dresp.PrevKvs[0]), EtcdIndex: mkV2Rev(resp.Header.Revision), }, nil } -func (s *v2v3Store) deleteNode(nodePath string) (*store.Event, error) { +func (s *v2v3Store) deleteNode(nodePath string) (*v2store.Event, error) { resp, err := s.c.Txn(s.ctx).If( clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), "=", 0), ).Then( clientv3.OpDelete(s.mkPath(nodePath), clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), store.Delete), + clientv3.OpPut(s.mkActionKey(), v2store.Delete), ).Commit() if err != nil { return nil, err } if !resp.Succeeded { - return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) + return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) } pkvs := resp.Responses[0].GetResponseDeleteRange().PrevKvs if len(pkvs) == 0 { - return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) + return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) } pkv := pkvs[0] - return &store.Event{ - Action: store.Delete, - Node: &store.NodeExtern{ + return &v2store.Event{ + Action: v2store.Delete, + Node: &v2store.NodeExtern{ Key: nodePath, CreatedIndex: mkV2Rev(pkv.CreateRevision), ModifiedIndex: mkV2Rev(resp.Header.Revision), @@ -467,9 +485,9 @@ func (s *v2v3Store) deleteNode(nodePath string) (*store.Event, error) { }, nil } -func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*store.Event, error) { +func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*v2store.Event, error) { if isRoot(nodePath) { - return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0) + return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) } key := s.mkPath(nodePath) @@ -477,7 +495,7 @@ func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint6 s.mkCompare(nodePath, prevValue, prevIndex)..., ).Then( clientv3.OpDelete(key, clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), store.CompareAndDelete), + clientv3.OpPut(s.mkActionKey(), v2store.CompareAndDelete), ).Else( clientv3.OpGet(key), clientv3.OpGet(key+"/"), @@ -492,9 +510,9 @@ func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint6 // len(pkvs) > 1 since txn only succeeds when key exists pkv := resp.Responses[0].GetResponseDeleteRange().PrevKvs[0] - return &store.Event{ - Action: store.CompareAndDelete, - Node: &store.NodeExtern{ + return &v2store.Event{ + Action: v2store.CompareAndDelete, + Node: &v2store.NodeExtern{ Key: nodePath, CreatedIndex: mkV2Rev(pkv.CreateRevision), ModifiedIndex: mkV2Rev(resp.Header.Revision), @@ -506,15 +524,15 @@ func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint6 func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.TxnResponse) error { if dkvs := resp.Responses[1].GetResponseRange().Kvs; len(dkvs) > 0 { - return etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) + return v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) } kvs := resp.Responses[0].GetResponseRange().Kvs if len(kvs) == 0 { - return etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) + return v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) } kv := kvs[0] - indexMatch := (prevIndex == 0 || kv.ModRevision == int64(prevIndex)) - valueMatch := (prevValue == "" || string(kv.Value) == prevValue) + indexMatch := prevIndex == 0 || kv.ModRevision == int64(prevIndex) + valueMatch := prevValue == "" || string(kv.Value) == prevValue var cause string switch { case indexMatch && !valueMatch: @@ -524,7 +542,7 @@ func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.Tx default: cause = fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, string(kv.Value), prevIndex, kv.ModRevision) } - return etcdErr.NewError(etcdErr.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision)) + return v2error.NewError(v2error.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision)) } func (s *v2v3Store) mkCompare(nodePath, prevValue string, prevIndex uint64) []clientv3.Cmp { @@ -548,7 +566,7 @@ func (s *v2v3Store) Version() int { return 2 } func (s *v2v3Store) Save() ([]byte, error) { panic("STUB") } func (s *v2v3Store) Recovery(state []byte) error { panic("STUB") } -func (s *v2v3Store) Clone() store.Store { panic("STUB") } +func (s *v2v3Store) Clone() v2store.Store { panic("STUB") } func (s *v2v3Store) SaveNoCopy() ([]byte, error) { panic("STUB") } func (s *v2v3Store) HasTTLKeys() bool { panic("STUB") } @@ -586,12 +604,12 @@ func mkV3Rev(v2Rev uint64) int64 { } // mkV2Node creates a V2 NodeExtern from a V3 KeyValue -func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *store.NodeExtern { +func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *v2store.NodeExtern { if kv == nil { return nil } - n := &store.NodeExtern{ - Key: string(s.mkNodePath(string(kv.Key))), + n := &v2store.NodeExtern{ + Key: s.mkNodePath(string(kv.Key)), Dir: kv.Key[len(kv.Key)-1] == '/', CreatedIndex: mkV2Rev(kv.CreateRevision), ModifiedIndex: mkV2Rev(kv.ModRevision), diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/watcher.go similarity index 87% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v2v3/watcher.go index 1c2680e74..e8a3557c1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/watcher.go @@ -18,12 +18,12 @@ import ( "context" "strings" - "github.com/coreos/etcd/clientv3" - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/store" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/etcdserver/api/v2error" + "go.etcd.io/etcd/etcdserver/api/v2store" ) -func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (store.Watcher, error) { +func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (v2store.Watcher, error) { ctx, cancel := context.WithCancel(s.ctx) wch := s.c.Watch( ctx, @@ -36,10 +36,10 @@ func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint resp, ok := <-wch if err := resp.Err(); err != nil || !ok { cancel() - return nil, etcdErr.NewError(etcdErr.EcodeRaftInternal, prefix, 0) + return nil, v2error.NewError(v2error.EcodeRaftInternal, prefix, 0) } - evc, donec := make(chan *store.Event), make(chan struct{}) + evc, donec := make(chan *v2store.Event), make(chan struct{}) go func() { defer func() { close(evc) @@ -82,7 +82,7 @@ func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint }, nil } -func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*store.Event) { +func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*v2store.Event) { ak := s.mkActionKey() for _, rev := range mkRevs(wr) { var act, key *clientv3.Event @@ -97,7 +97,7 @@ func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*store.Event) { key = ev } } - v2ev := &store.Event{ + v2ev := &v2store.Event{ Action: string(act.Kv.Value), Node: s.mkV2Node(key.Kv), PrevNode: s.mkV2Node(key.PrevKv), @@ -125,7 +125,7 @@ func mkRevs(wr clientv3.WatchResponse) (revs [][]*clientv3.Event) { type v2v3Watcher struct { startRev int64 - evc chan *store.Event + evc chan *v2store.Event donec chan struct{} cancel context.CancelFunc } @@ -137,4 +137,4 @@ func (w *v2v3Watcher) Remove() { <-w.donec } -func (w *v2v3Watcher) EventChan() chan *store.Event { return w.evc } +func (w *v2v3Watcher) EventChan() chan *v2store.Event { return w.evc } diff --git a/vendor/github.com/coreos/etcd/alarm/alarms.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3alarm/alarms.go similarity index 91% rename from vendor/github.com/coreos/etcd/alarm/alarms.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3alarm/alarms.go index 4f0ebe93f..2b085a8e7 100644 --- a/vendor/github.com/coreos/etcd/alarm/alarms.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3alarm/alarms.go @@ -12,21 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package alarm manages health status alarms in etcd. -package alarm +// Package v3alarm manages health status alarms in etcd. +package v3alarm import ( "sync" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/types" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/pkg/types" + "github.com/coreos/pkg/capnslog" ) var ( alarmBucketName = []byte("alarm") - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "alarm") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "alarm") ) type BackendGetter interface { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3client/doc.go similarity index 93% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3client/doc.go index 310715f5c..47922c433 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3client/doc.go @@ -19,8 +19,8 @@ // import ( // "context" // -// "github.com/coreos/etcd/embed" -// "github.com/coreos/etcd/etcdserver/api/v3client" +// "go.etcd.io/etcd/embed" +// "go.etcd.io/etcd/etcdserver/api/v3client" // ) // // ... diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3client/v3client.go similarity index 93% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3client/v3client.go index ab48ea75b..d2031213c 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3client/v3client.go @@ -18,10 +18,10 @@ import ( "context" "time" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc" - "github.com/coreos/etcd/proxy/grpcproxy/adapter" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/v3rpc" + "go.etcd.io/etcd/proxy/grpcproxy/adapter" ) // New creates a clientv3 client that wraps an in-process EtcdServer. Instead diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/compactor.go similarity index 74% rename from vendor/github.com/coreos/etcd/compactor/compactor.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/compactor.go index 8100b6938..73a96842d 100644 --- a/vendor/github.com/coreos/etcd/compactor/compactor.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/compactor.go @@ -12,20 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -package compactor +package v3compactor import ( "context" "fmt" "time" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "github.com/coreos/pkg/capnslog" + "github.com/jonboulle/clockwork" + "go.uber.org/zap" ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "compactor") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "compactor") ) const ( @@ -54,12 +56,19 @@ type RevGetter interface { Rev() int64 } -func New(mode string, retention time.Duration, rg RevGetter, c Compactable) (Compactor, error) { +// New returns a new Compactor based on given "mode". +func New( + lg *zap.Logger, + mode string, + retention time.Duration, + rg RevGetter, + c Compactable, +) (Compactor, error) { switch mode { case ModePeriodic: - return NewPeriodic(retention, rg, c), nil + return newPeriodic(lg, clockwork.NewRealClock(), retention, rg, c), nil case ModeRevision: - return NewRevision(int64(retention), rg, c), nil + return newRevision(lg, clockwork.NewRealClock(), int64(retention), rg, c), nil default: return nil, fmt.Errorf("unsupported compaction mode %s", mode) } diff --git a/vendor/github.com/coreos/etcd/compactor/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/doc.go similarity index 84% rename from vendor/github.com/coreos/etcd/compactor/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/doc.go index cb158340e..bb28046ce 100644 --- a/vendor/github.com/coreos/etcd/compactor/doc.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/doc.go @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package compactor implements automated policies for compacting etcd's mvcc storage. -package compactor +// Package v3compactor implements automated policies for compacting etcd's mvcc storage. +package v3compactor diff --git a/vendor/github.com/coreos/etcd/compactor/periodic.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/periodic.go similarity index 58% rename from vendor/github.com/coreos/etcd/compactor/periodic.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/periodic.go index 9d9164e9c..ab64cb706 100644 --- a/vendor/github.com/coreos/etcd/compactor/periodic.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/periodic.go @@ -12,22 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -package compactor +package v3compactor import ( "context" "sync" "time" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc" "github.com/jonboulle/clockwork" + "go.uber.org/zap" ) // Periodic compacts the log by purging revisions older than // the configured retention time. type Periodic struct { + lg *zap.Logger clock clockwork.Clock period time.Duration @@ -43,22 +45,19 @@ type Periodic struct { paused bool } -// NewPeriodic creates a new instance of Periodic compactor that purges +// newPeriodic creates a new instance of Periodic compactor that purges // the log older than h Duration. -func NewPeriodic(h time.Duration, rg RevGetter, c Compactable) *Periodic { - return newPeriodic(clockwork.NewRealClock(), h, rg, c) -} - -func newPeriodic(clock clockwork.Clock, h time.Duration, rg RevGetter, c Compactable) *Periodic { - t := &Periodic{ +func newPeriodic(lg *zap.Logger, clock clockwork.Clock, h time.Duration, rg RevGetter, c Compactable) *Periodic { + pc := &Periodic{ + lg: lg, clock: clock, period: h, rg: rg, c: c, revs: make([]int64, 0), } - t.ctx, t.cancel = context.WithCancel(context.Background()) - return t + pc.ctx, pc.cancel = context.WithCancel(context.Background()) + return pc } /* @@ -96,50 +95,77 @@ Compaction period 5-sec: */ // Run runs periodic compactor. -func (t *Periodic) Run() { - compactInterval := t.getCompactInterval() - retryInterval := t.getRetryInterval() - retentions := t.getRetentions() +func (pc *Periodic) Run() { + compactInterval := pc.getCompactInterval() + retryInterval := pc.getRetryInterval() + retentions := pc.getRetentions() go func() { - lastSuccess := t.clock.Now() - baseInterval := t.period + lastSuccess := pc.clock.Now() + baseInterval := pc.period for { - t.revs = append(t.revs, t.rg.Rev()) - if len(t.revs) > retentions { - t.revs = t.revs[1:] // t.revs[0] is always the rev at t.period ago + pc.revs = append(pc.revs, pc.rg.Rev()) + if len(pc.revs) > retentions { + pc.revs = pc.revs[1:] // pc.revs[0] is always the rev at pc.period ago } select { - case <-t.ctx.Done(): + case <-pc.ctx.Done(): return - case <-t.clock.After(retryInterval): - t.mu.Lock() - p := t.paused - t.mu.Unlock() + case <-pc.clock.After(retryInterval): + pc.mu.Lock() + p := pc.paused + pc.mu.Unlock() if p { continue } } - if t.clock.Now().Sub(lastSuccess) < baseInterval { + if pc.clock.Now().Sub(lastSuccess) < baseInterval { continue } // wait up to initial given period - if baseInterval == t.period { + if baseInterval == pc.period { baseInterval = compactInterval } - rev := t.revs[0] - - plog.Noticef("Starting auto-compaction at revision %d (retention: %v)", rev, t.period) - _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) + rev := pc.revs[0] + + if pc.lg != nil { + pc.lg.Info( + "starting auto periodic compaction", + zap.Int64("revision", rev), + zap.Duration("compact-period", pc.period), + ) + } else { + plog.Noticef("Starting auto-compaction at revision %d (retention: %v)", rev, pc.period) + } + _, err := pc.c.Compact(pc.ctx, &pb.CompactionRequest{Revision: rev}) if err == nil || err == mvcc.ErrCompacted { - lastSuccess = t.clock.Now() - plog.Noticef("Finished auto-compaction at revision %d", rev) + if pc.lg != nil { + pc.lg.Info( + "completed auto periodic compaction", + zap.Int64("revision", rev), + zap.Duration("compact-period", pc.period), + zap.Duration("took", time.Since(lastSuccess)), + ) + } else { + plog.Noticef("Finished auto-compaction at revision %d", rev) + } + lastSuccess = pc.clock.Now() } else { - plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err) - plog.Noticef("Retry after %v", retryInterval) + if pc.lg != nil { + pc.lg.Warn( + "failed auto periodic compaction", + zap.Int64("revision", rev), + zap.Duration("compact-period", pc.period), + zap.Duration("retry-interval", retryInterval), + zap.Error(err), + ) + } else { + plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err) + plog.Noticef("Retry after %v", retryInterval) + } } } }() @@ -149,22 +175,22 @@ func (t *Periodic) Run() { // (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='10m', then compact every 10-minute) // if given compaction period x is >1-hour, compact every hour. // (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='2h', then compact every 1-hour) -func (t *Periodic) getCompactInterval() time.Duration { - itv := t.period +func (pc *Periodic) getCompactInterval() time.Duration { + itv := pc.period if itv > time.Hour { itv = time.Hour } return itv } -func (t *Periodic) getRetentions() int { - return int(t.period/t.getRetryInterval()) + 1 +func (pc *Periodic) getRetentions() int { + return int(pc.period/pc.getRetryInterval()) + 1 } const retryDivisor = 10 -func (t *Periodic) getRetryInterval() time.Duration { - itv := t.period +func (pc *Periodic) getRetryInterval() time.Duration { + itv := pc.period if itv > time.Hour { itv = time.Hour } @@ -172,20 +198,20 @@ func (t *Periodic) getRetryInterval() time.Duration { } // Stop stops periodic compactor. -func (t *Periodic) Stop() { - t.cancel() +func (pc *Periodic) Stop() { + pc.cancel() } // Pause pauses periodic compactor. -func (t *Periodic) Pause() { - t.mu.Lock() - defer t.mu.Unlock() - t.paused = true +func (pc *Periodic) Pause() { + pc.mu.Lock() + pc.paused = true + pc.mu.Unlock() } // Resume resumes periodic compactor. -func (t *Periodic) Resume() { - t.mu.Lock() - defer t.mu.Unlock() - t.paused = false +func (pc *Periodic) Resume() { + pc.mu.Lock() + pc.paused = false + pc.mu.Unlock() } diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/revision.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/revision.go new file mode 100644 index 000000000..cf8ac4301 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/revision.go @@ -0,0 +1,143 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3compactor + +import ( + "context" + "sync" + "time" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc" + + "github.com/jonboulle/clockwork" + "go.uber.org/zap" +) + +// Revision compacts the log by purging revisions older than +// the configured reivison number. Compaction happens every 5 minutes. +type Revision struct { + lg *zap.Logger + + clock clockwork.Clock + retention int64 + + rg RevGetter + c Compactable + + ctx context.Context + cancel context.CancelFunc + + mu sync.Mutex + paused bool +} + +// newRevision creates a new instance of Revisonal compactor that purges +// the log older than retention revisions from the current revision. +func newRevision(lg *zap.Logger, clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision { + rc := &Revision{ + lg: lg, + clock: clock, + retention: retention, + rg: rg, + c: c, + } + rc.ctx, rc.cancel = context.WithCancel(context.Background()) + return rc +} + +const revInterval = 5 * time.Minute + +// Run runs revision-based compactor. +func (rc *Revision) Run() { + prev := int64(0) + go func() { + for { + select { + case <-rc.ctx.Done(): + return + case <-rc.clock.After(revInterval): + rc.mu.Lock() + p := rc.paused + rc.mu.Unlock() + if p { + continue + } + } + + rev := rc.rg.Rev() - rc.retention + if rev <= 0 || rev == prev { + continue + } + + now := time.Now() + if rc.lg != nil { + rc.lg.Info( + "starting auto revision compaction", + zap.Int64("revision", rev), + zap.Int64("revision-compaction-retention", rc.retention), + ) + } else { + plog.Noticef("Starting auto-compaction at revision %d (retention: %d revisions)", rev, rc.retention) + } + _, err := rc.c.Compact(rc.ctx, &pb.CompactionRequest{Revision: rev}) + if err == nil || err == mvcc.ErrCompacted { + prev = rev + if rc.lg != nil { + rc.lg.Info( + "completed auto revision compaction", + zap.Int64("revision", rev), + zap.Int64("revision-compaction-retention", rc.retention), + zap.Duration("took", time.Since(now)), + ) + } else { + plog.Noticef("Finished auto-compaction at revision %d", rev) + } + } else { + if rc.lg != nil { + rc.lg.Warn( + "failed auto revision compaction", + zap.Int64("revision", rev), + zap.Int64("revision-compaction-retention", rc.retention), + zap.Duration("retry-interval", revInterval), + zap.Error(err), + ) + } else { + plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err) + plog.Noticef("Retry after %v", revInterval) + } + } + } + }() +} + +// Stop stops revision-based compactor. +func (rc *Revision) Stop() { + rc.cancel() +} + +// Pause pauses revision-based compactor. +func (rc *Revision) Pause() { + rc.mu.Lock() + rc.paused = true + rc.mu.Unlock() +} + +// Resume resumes revision-based compactor. +func (rc *Revision) Resume() { + rc.mu.Lock() + rc.paused = false + rc.mu.Unlock() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3election/doc.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/election.go similarity index 96% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3election/election.go index c66d7a382..f5a3be3b2 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/election.go @@ -18,9 +18,9 @@ import ( "context" "errors" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/clientv3/concurrency" - epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/clientv3/concurrency" + epb "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb" ) // ErrMissingLeaderKey is returned when election API request diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go similarity index 95% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go index 58368bbfb..23551b54b 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go @@ -9,7 +9,7 @@ It translates gRPC into RESTful JSON APIs. package gw import ( - "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb" "io" "net/http" @@ -33,7 +33,7 @@ func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshale var protoReq v3electionpb.CampaignRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -46,7 +46,7 @@ func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshale var protoReq v3electionpb.ProclaimRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -59,7 +59,7 @@ func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq v3electionpb.LeaderRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -72,7 +72,7 @@ func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler var protoReq v3electionpb.LeaderRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -93,7 +93,7 @@ func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq v3electionpb.ResignRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -289,15 +289,15 @@ func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, c } var ( - pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "campaign"}, "")) + pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "campaign"}, "")) - pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "proclaim"}, "")) + pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "proclaim"}, "")) - pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "leader"}, "")) + pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "leader"}, "")) - pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "observe"}, "")) + pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "observe"}, "")) - pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "resign"}, "")) + pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "resign"}, "")) ) var ( diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go similarity index 61% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go index a31b04c84..1fc1bce44 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go @@ -1,23 +1,43 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: v3election.proto +/* + Package v3electionpb is a generated protocol buffer package. + + It is generated from these files: + v3election.proto + + It has these top-level messages: + CampaignRequest + CampaignResponse + LeaderKey + LeaderRequest + LeaderResponse + ResignRequest + ResignResponse + ProclaimRequest + ProclaimResponse +*/ package v3electionpb import ( - context "context" - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" - etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" + + etcdserverpb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + mvccpb "go.etcd.io/etcd/mvcc/mvccpb" + + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -40,44 +60,13 @@ type CampaignRequest struct { Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` // value is the initial proclaimed value set when the campaigner wins the // election. - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CampaignRequest) Reset() { *m = CampaignRequest{} } -func (m *CampaignRequest) String() string { return proto.CompactTextString(m) } -func (*CampaignRequest) ProtoMessage() {} -func (*CampaignRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{0} -} -func (m *CampaignRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CampaignRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CampaignRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CampaignRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CampaignRequest.Merge(m, src) -} -func (m *CampaignRequest) XXX_Size() int { - return m.Size() -} -func (m *CampaignRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CampaignRequest.DiscardUnknown(m) + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` } -var xxx_messageInfo_CampaignRequest proto.InternalMessageInfo +func (m *CampaignRequest) Reset() { *m = CampaignRequest{} } +func (m *CampaignRequest) String() string { return proto.CompactTextString(m) } +func (*CampaignRequest) ProtoMessage() {} +func (*CampaignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{0} } func (m *CampaignRequest) GetName() []byte { if m != nil { @@ -101,46 +90,15 @@ func (m *CampaignRequest) GetValue() []byte { } type CampaignResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // leader describes the resources used for holding leadereship of the election. - Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CampaignResponse) Reset() { *m = CampaignResponse{} } -func (m *CampaignResponse) String() string { return proto.CompactTextString(m) } -func (*CampaignResponse) ProtoMessage() {} -func (*CampaignResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{1} -} -func (m *CampaignResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CampaignResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CampaignResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CampaignResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CampaignResponse.Merge(m, src) -} -func (m *CampaignResponse) XXX_Size() int { - return m.Size() -} -func (m *CampaignResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CampaignResponse.DiscardUnknown(m) + Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` } -var xxx_messageInfo_CampaignResponse proto.InternalMessageInfo +func (m *CampaignResponse) Reset() { *m = CampaignResponse{} } +func (m *CampaignResponse) String() string { return proto.CompactTextString(m) } +func (*CampaignResponse) ProtoMessage() {} +func (*CampaignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{1} } func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader { if m != nil { @@ -167,44 +125,13 @@ type LeaderKey struct { // matches rev. Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"` // lease is the lease ID of the election leader. - Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaderKey) Reset() { *m = LeaderKey{} } -func (m *LeaderKey) String() string { return proto.CompactTextString(m) } -func (*LeaderKey) ProtoMessage() {} -func (*LeaderKey) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{2} -} -func (m *LeaderKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaderKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaderKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaderKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaderKey.Merge(m, src) -} -func (m *LeaderKey) XXX_Size() int { - return m.Size() -} -func (m *LeaderKey) XXX_DiscardUnknown() { - xxx_messageInfo_LeaderKey.DiscardUnknown(m) + Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"` } -var xxx_messageInfo_LeaderKey proto.InternalMessageInfo +func (m *LeaderKey) Reset() { *m = LeaderKey{} } +func (m *LeaderKey) String() string { return proto.CompactTextString(m) } +func (*LeaderKey) ProtoMessage() {} +func (*LeaderKey) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{2} } func (m *LeaderKey) GetName() []byte { if m != nil { @@ -236,44 +163,13 @@ func (m *LeaderKey) GetLease() int64 { type LeaderRequest struct { // name is the election identifier for the leadership information. - Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaderRequest) Reset() { *m = LeaderRequest{} } -func (m *LeaderRequest) String() string { return proto.CompactTextString(m) } -func (*LeaderRequest) ProtoMessage() {} -func (*LeaderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{3} -} -func (m *LeaderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaderRequest.Merge(m, src) -} -func (m *LeaderRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaderRequest.DiscardUnknown(m) + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -var xxx_messageInfo_LeaderRequest proto.InternalMessageInfo +func (m *LeaderRequest) Reset() { *m = LeaderRequest{} } +func (m *LeaderRequest) String() string { return proto.CompactTextString(m) } +func (*LeaderRequest) ProtoMessage() {} +func (*LeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{3} } func (m *LeaderRequest) GetName() []byte { if m != nil { @@ -283,46 +179,15 @@ func (m *LeaderRequest) GetName() []byte { } type LeaderResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kv is the key-value pair representing the latest leader update. - Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaderResponse) Reset() { *m = LeaderResponse{} } -func (m *LeaderResponse) String() string { return proto.CompactTextString(m) } -func (*LeaderResponse) ProtoMessage() {} -func (*LeaderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{4} -} -func (m *LeaderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaderResponse.Merge(m, src) -} -func (m *LeaderResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaderResponse.DiscardUnknown(m) + Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` } -var xxx_messageInfo_LeaderResponse proto.InternalMessageInfo +func (m *LeaderResponse) Reset() { *m = LeaderResponse{} } +func (m *LeaderResponse) String() string { return proto.CompactTextString(m) } +func (*LeaderResponse) ProtoMessage() {} +func (*LeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{4} } func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader { if m != nil { @@ -340,44 +205,13 @@ func (m *LeaderResponse) GetKv() *mvccpb.KeyValue { type ResignRequest struct { // leader is the leadership to relinquish by resignation. - Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResignRequest) Reset() { *m = ResignRequest{} } -func (m *ResignRequest) String() string { return proto.CompactTextString(m) } -func (*ResignRequest) ProtoMessage() {} -func (*ResignRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{5} -} -func (m *ResignRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResignRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResignRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResignRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResignRequest.Merge(m, src) -} -func (m *ResignRequest) XXX_Size() int { - return m.Size() -} -func (m *ResignRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResignRequest.DiscardUnknown(m) + Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"` } -var xxx_messageInfo_ResignRequest proto.InternalMessageInfo +func (m *ResignRequest) Reset() { *m = ResignRequest{} } +func (m *ResignRequest) String() string { return proto.CompactTextString(m) } +func (*ResignRequest) ProtoMessage() {} +func (*ResignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{5} } func (m *ResignRequest) GetLeader() *LeaderKey { if m != nil { @@ -387,44 +221,13 @@ func (m *ResignRequest) GetLeader() *LeaderKey { } type ResignResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResignResponse) Reset() { *m = ResignResponse{} } -func (m *ResignResponse) String() string { return proto.CompactTextString(m) } -func (*ResignResponse) ProtoMessage() {} -func (*ResignResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{6} -} -func (m *ResignResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResignResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResignResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResignResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResignResponse.Merge(m, src) -} -func (m *ResignResponse) XXX_Size() int { - return m.Size() -} -func (m *ResignResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ResignResponse.DiscardUnknown(m) + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_ResignResponse proto.InternalMessageInfo +func (m *ResignResponse) Reset() { *m = ResignResponse{} } +func (m *ResignResponse) String() string { return proto.CompactTextString(m) } +func (*ResignResponse) ProtoMessage() {} +func (*ResignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{6} } func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader { if m != nil { @@ -435,46 +238,15 @@ func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader { type ProclaimRequest struct { // leader is the leadership hold on the election. - Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"` + Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"` // value is an update meant to overwrite the leader's current value. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} } -func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) } -func (*ProclaimRequest) ProtoMessage() {} -func (*ProclaimRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{7} -} -func (m *ProclaimRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProclaimRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProclaimRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProclaimRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProclaimRequest.Merge(m, src) -} -func (m *ProclaimRequest) XXX_Size() int { - return m.Size() -} -func (m *ProclaimRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ProclaimRequest.DiscardUnknown(m) + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } -var xxx_messageInfo_ProclaimRequest proto.InternalMessageInfo +func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} } +func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) } +func (*ProclaimRequest) ProtoMessage() {} +func (*ProclaimRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{7} } func (m *ProclaimRequest) GetLeader() *LeaderKey { if m != nil { @@ -491,44 +263,13 @@ func (m *ProclaimRequest) GetValue() []byte { } type ProclaimResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} } -func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) } -func (*ProclaimResponse) ProtoMessage() {} -func (*ProclaimResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c9b1f26cc432a035, []int{8} -} -func (m *ProclaimResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProclaimResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProclaimResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProclaimResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProclaimResponse.Merge(m, src) -} -func (m *ProclaimResponse) XXX_Size() int { - return m.Size() -} -func (m *ProclaimResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ProclaimResponse.DiscardUnknown(m) + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_ProclaimResponse proto.InternalMessageInfo +func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} } +func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) } +func (*ProclaimResponse) ProtoMessage() {} +func (*ProclaimResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{8} } func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader { if m != nil { @@ -549,46 +290,6 @@ func init() { proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse") } -func init() { proto.RegisterFile("v3election.proto", fileDescriptor_c9b1f26cc432a035) } - -var fileDescriptor_c9b1f26cc432a035 = []byte{ - // 538 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x65, 0x9d, 0x10, 0xca, 0x90, 0xb6, 0x96, 0x55, 0xa9, 0x69, 0x48, 0xad, 0x68, 0x8b, 0x50, - 0x95, 0x83, 0x17, 0x35, 0x9c, 0x72, 0x42, 0x20, 0x50, 0xa5, 0x22, 0x01, 0x3e, 0x20, 0x38, 0xae, - 0xdd, 0x91, 0x1b, 0xc5, 0xf1, 0x1a, 0xdb, 0xb5, 0x94, 0x2b, 0xbf, 0xc0, 0x85, 0x7f, 0xe0, 0x47, - 0x38, 0x22, 0xf1, 0x03, 0x28, 0xf0, 0x21, 0x68, 0x77, 0x6d, 0xec, 0xb8, 0x21, 0x42, 0xe4, 0x62, - 0x8d, 0x67, 0x9e, 0xe7, 0xcd, 0x7b, 0x3b, 0x6b, 0x30, 0xf3, 0x31, 0x86, 0xe8, 0x67, 0x53, 0x11, - 0x39, 0x71, 0x22, 0x32, 0x61, 0x75, 0xab, 0x4c, 0xec, 0xf5, 0x0f, 0x02, 0x11, 0x08, 0x55, 0x60, - 0x32, 0xd2, 0x98, 0xfe, 0x43, 0xcc, 0xfc, 0x4b, 0x26, 0x1f, 0x29, 0x26, 0x39, 0x26, 0xb5, 0x30, - 0xf6, 0x58, 0x12, 0xfb, 0x05, 0xee, 0x48, 0xe1, 0xe6, 0xb9, 0xef, 0xab, 0x47, 0xec, 0xb1, 0x59, - 0x5e, 0x94, 0x06, 0x81, 0x10, 0x41, 0x88, 0x8c, 0xc7, 0x53, 0xc6, 0xa3, 0x48, 0x64, 0x5c, 0x32, - 0xa6, 0xba, 0x4a, 0xdf, 0xc0, 0xfe, 0x33, 0x3e, 0x8f, 0xf9, 0x34, 0x88, 0x5c, 0xfc, 0x70, 0x8d, - 0x69, 0x66, 0x59, 0xd0, 0x8e, 0xf8, 0x1c, 0x7b, 0x64, 0x48, 0x4e, 0xbb, 0xae, 0x8a, 0xad, 0x03, - 0xb8, 0x1d, 0x22, 0x4f, 0xb1, 0x67, 0x0c, 0xc9, 0x69, 0xcb, 0xd5, 0x2f, 0x32, 0x9b, 0xf3, 0xf0, - 0x1a, 0x7b, 0x2d, 0x05, 0xd5, 0x2f, 0x74, 0x01, 0x66, 0xd5, 0x32, 0x8d, 0x45, 0x94, 0xa2, 0xf5, - 0x18, 0x3a, 0x57, 0xc8, 0x2f, 0x31, 0x51, 0x5d, 0xef, 0x9d, 0x0d, 0x9c, 0xba, 0x10, 0xa7, 0xc4, - 0x9d, 0x2b, 0x8c, 0x5b, 0x60, 0x2d, 0x06, 0x9d, 0x50, 0x7f, 0x65, 0xa8, 0xaf, 0x0e, 0x9d, 0xba, - 0x65, 0xce, 0x4b, 0x55, 0xbb, 0xc0, 0x85, 0x5b, 0xc0, 0xe8, 0x7b, 0xb8, 0xfb, 0x27, 0xb9, 0x56, - 0x87, 0x09, 0xad, 0x19, 0x2e, 0x54, 0xbb, 0xae, 0x2b, 0x43, 0x99, 0x49, 0x30, 0x57, 0x0a, 0x5a, - 0xae, 0x0c, 0x2b, 0xad, 0xed, 0x9a, 0x56, 0x7a, 0x02, 0xbb, 0xba, 0xf5, 0x06, 0x9b, 0xe8, 0x15, - 0xec, 0x95, 0xa0, 0xad, 0x84, 0x0f, 0xc1, 0x98, 0xe5, 0x85, 0x68, 0xd3, 0xd1, 0x27, 0xea, 0x5c, - 0xe0, 0xe2, 0xad, 0x34, 0xd8, 0x35, 0x66, 0x39, 0x7d, 0x02, 0xbb, 0x2e, 0xa6, 0xb5, 0x53, 0xab, - 0xbc, 0x22, 0xff, 0xe6, 0xd5, 0x0b, 0xd8, 0x2b, 0x3b, 0x6c, 0x33, 0x2b, 0x7d, 0x07, 0xfb, 0xaf, - 0x13, 0xe1, 0x87, 0x7c, 0x3a, 0xff, 0xdf, 0x59, 0xaa, 0x45, 0x32, 0xea, 0x8b, 0x74, 0x0e, 0x66, - 0xd5, 0x79, 0x9b, 0x19, 0xcf, 0xbe, 0xb4, 0x61, 0xe7, 0x79, 0x31, 0x80, 0x25, 0x60, 0xa7, 0xdc, - 0x4f, 0xeb, 0x78, 0x75, 0xb2, 0xc6, 0x55, 0xe8, 0xdb, 0x7f, 0x2b, 0x6b, 0x16, 0xfa, 0xe0, 0xe3, - 0xf7, 0x5f, 0x9f, 0x0c, 0x9b, 0x1e, 0xb1, 0x7c, 0xec, 0x61, 0xc6, 0x59, 0x09, 0x66, 0x7e, 0x01, - 0x9d, 0x90, 0x91, 0x24, 0x2c, 0x75, 0x34, 0x09, 0x1b, 0xce, 0x35, 0x09, 0x9b, 0xf2, 0x37, 0x10, - 0xc6, 0x05, 0x54, 0x12, 0x06, 0xd0, 0xd1, 0x1e, 0x5b, 0xf7, 0xd7, 0x39, 0x5f, 0x92, 0x0d, 0xd6, - 0x17, 0x0b, 0x2a, 0xaa, 0xa8, 0x06, 0xf4, 0xf0, 0x06, 0x95, 0x3e, 0x34, 0x49, 0x34, 0x83, 0x3b, - 0xaf, 0x3c, 0x65, 0xfe, 0x36, 0x4c, 0x27, 0x8a, 0xe9, 0x98, 0xf6, 0x6e, 0x30, 0x09, 0xdd, 0x7c, - 0x42, 0x46, 0x8f, 0x88, 0x54, 0xa5, 0x17, 0xb6, 0xc9, 0xb5, 0x72, 0x11, 0x9a, 0x5c, 0xab, 0x3b, - 0xbe, 0x41, 0x55, 0xa2, 0x80, 0x13, 0x32, 0x7a, 0x6a, 0x7e, 0x5d, 0xda, 0xe4, 0xdb, 0xd2, 0x26, - 0x3f, 0x96, 0x36, 0xf9, 0xfc, 0xd3, 0xbe, 0xe5, 0x75, 0xd4, 0xcf, 0x72, 0xfc, 0x3b, 0x00, 0x00, - 0xff, 0xff, 0xdc, 0xa9, 0x0e, 0xdf, 0xc5, 0x05, 0x00, 0x00, -} - // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -597,9 +298,8 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// ElectionClient is the client API for Election service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Election service + type ElectionClient interface { // Campaign waits to acquire leadership in an election, returning a LeaderKey // representing the leadership if successful. The LeaderKey can then be used @@ -628,7 +328,7 @@ func NewElectionClient(cc *grpc.ClientConn) ElectionClient { func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) { out := new(CampaignResponse) - err := c.cc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, opts...) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -637,7 +337,7 @@ func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) { out := new(ProclaimResponse) - err := c.cc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, opts...) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -646,7 +346,7 @@ func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) { out := new(LeaderResponse) - err := c.cc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, opts...) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -654,7 +354,7 @@ func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ... } func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) { - stream, err := c.cc.NewStream(ctx, &_Election_serviceDesc.Streams[0], "/v3electionpb.Election/Observe", opts...) + stream, err := grpc.NewClientStream(ctx, &_Election_serviceDesc.Streams[0], c.cc, "/v3electionpb.Election/Observe", opts...) if err != nil { return nil, err } @@ -687,14 +387,15 @@ func (x *electionObserveClient) Recv() (*LeaderResponse, error) { func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) { out := new(ResignResponse) - err := c.cc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, opts...) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// ElectionServer is the server API for Election service. +// Server API for Election service + type ElectionServer interface { // Campaign waits to acquire leadership in an election, returning a LeaderKey // representing the leadership if successful. The LeaderKey can then be used @@ -713,26 +414,6 @@ type ElectionServer interface { Resign(context.Context, *ResignRequest) (*ResignResponse, error) } -// UnimplementedElectionServer can be embedded to have forward compatible implementations. -type UnimplementedElectionServer struct { -} - -func (*UnimplementedElectionServer) Campaign(ctx context.Context, req *CampaignRequest) (*CampaignResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Campaign not implemented") -} -func (*UnimplementedElectionServer) Proclaim(ctx context.Context, req *ProclaimRequest) (*ProclaimResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Proclaim not implemented") -} -func (*UnimplementedElectionServer) Leader(ctx context.Context, req *LeaderRequest) (*LeaderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Leader not implemented") -} -func (*UnimplementedElectionServer) Observe(req *LeaderRequest, srv Election_ObserveServer) error { - return status.Errorf(codes.Unimplemented, "method Observe not implemented") -} -func (*UnimplementedElectionServer) Resign(ctx context.Context, req *ResignRequest) (*ResignResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Resign not implemented") -} - func RegisterElectionServer(s *grpc.Server, srv ElectionServer) { s.RegisterService(&_Election_serviceDesc, srv) } @@ -864,7 +545,7 @@ var _Election_serviceDesc = grpc.ServiceDesc{ func (m *CampaignRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -872,45 +553,34 @@ func (m *CampaignRequest) Marshal() (dAtA []byte, err error) { } func (m *CampaignRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CampaignRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x1a + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if m.Lease != 0 { - i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - return len(dAtA) - i, nil + return i, nil } func (m *CampaignResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -918,50 +588,37 @@ func (m *CampaignResponse) Marshal() (dAtA []byte, err error) { } func (m *CampaignResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CampaignResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } if m.Leader != nil { - { - size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n2, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n2 } - return len(dAtA) - i, nil + return i, nil } func (m *LeaderKey) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -969,50 +626,39 @@ func (m *LeaderKey) Marshal() (dAtA []byte, err error) { } func (m *LeaderKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaderKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } - if m.Lease != 0 { - i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x20 + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } if m.Rev != 0 { - i = encodeVarintV3Election(dAtA, i, uint64(m.Rev)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Rev)) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if m.Lease != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) } - return len(dAtA) - i, nil + return i, nil } func (m *LeaderRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1020,33 +666,23 @@ func (m *LeaderRequest) Marshal() (dAtA []byte, err error) { } func (m *LeaderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } - return len(dAtA) - i, nil + return i, nil } func (m *LeaderResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1054,50 +690,37 @@ func (m *LeaderResponse) Marshal() (dAtA []byte, err error) { } func (m *LeaderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n3, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 } if m.Kv != nil { - { - size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Kv.Size())) + n4, err := m.Kv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n4 } - return len(dAtA) - i, nil + return i, nil } func (m *ResignRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1105,38 +728,27 @@ func (m *ResignRequest) Marshal() (dAtA []byte, err error) { } func (m *ResignRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResignRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Leader != nil { - { - size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n5, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 } - return len(dAtA) - i, nil + return i, nil } func (m *ResignResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1144,38 +756,27 @@ func (m *ResignResponse) Marshal() (dAtA []byte, err error) { } func (m *ResignResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResignResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n6, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 } - return len(dAtA) - i, nil + return i, nil } func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1183,45 +784,33 @@ func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) { } func (m *ProclaimRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProclaimRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Leader != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n7, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 } if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - if m.Leader != nil { - { - size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1229,49 +818,33 @@ func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) { } func (m *ProclaimResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProclaimResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Election(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n8, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 } - return len(dAtA) - i, nil + return i, nil } func encodeVarintV3Election(dAtA []byte, offset int, v uint64) int { - offset -= sovV3Election(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *CampaignRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -1285,16 +858,10 @@ func (m *CampaignRequest) Size() (n int) { if l > 0 { n += 1 + l + sovV3Election(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *CampaignResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -1305,16 +872,10 @@ func (m *CampaignResponse) Size() (n int) { l = m.Leader.Size() n += 1 + l + sovV3Election(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaderKey) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -1331,32 +892,20 @@ func (m *LeaderKey) Size() (n int) { if m.Lease != 0 { n += 1 + sovV3Election(uint64(m.Lease)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaderRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovV3Election(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaderResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -1367,48 +916,30 @@ func (m *LeaderResponse) Size() (n int) { l = m.Kv.Size() n += 1 + l + sovV3Election(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *ResignRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Leader != nil { l = m.Leader.Size() n += 1 + l + sovV3Election(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *ResignResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovV3Election(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *ProclaimRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Leader != nil { @@ -1419,30 +950,28 @@ func (m *ProclaimRequest) Size() (n int) { if l > 0 { n += 1 + l + sovV3Election(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *ProclaimResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovV3Election(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovV3Election(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozV3Election(x uint64) (n int) { return sovV3Election(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1462,7 +991,7 @@ func (m *CampaignRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1490,7 +1019,7 @@ func (m *CampaignRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1499,9 +1028,6 @@ func (m *CampaignRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1524,7 +1050,7 @@ func (m *CampaignRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lease |= int64(b&0x7F) << shift + m.Lease |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1543,7 +1069,7 @@ func (m *CampaignRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1552,9 +1078,6 @@ func (m *CampaignRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1572,13 +1095,9 @@ func (m *CampaignRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Election } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Election - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1603,7 +1122,7 @@ func (m *CampaignResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1631,7 +1150,7 @@ func (m *CampaignResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1640,9 +1159,6 @@ func (m *CampaignResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1667,7 +1183,7 @@ func (m *CampaignResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1676,9 +1192,6 @@ func (m *CampaignResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1698,13 +1211,9 @@ func (m *CampaignResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Election } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Election - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1729,7 +1238,7 @@ func (m *LeaderKey) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1757,7 +1266,7 @@ func (m *LeaderKey) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1766,9 +1275,6 @@ func (m *LeaderKey) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1791,7 +1297,7 @@ func (m *LeaderKey) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1800,9 +1306,6 @@ func (m *LeaderKey) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1825,7 +1328,7 @@ func (m *LeaderKey) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Rev |= int64(b&0x7F) << shift + m.Rev |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1844,7 +1347,7 @@ func (m *LeaderKey) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lease |= int64(b&0x7F) << shift + m.Lease |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1858,13 +1361,9 @@ func (m *LeaderKey) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Election } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Election - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1889,7 +1388,7 @@ func (m *LeaderRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1917,7 +1416,7 @@ func (m *LeaderRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1926,9 +1425,6 @@ func (m *LeaderRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1946,13 +1442,9 @@ func (m *LeaderRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Election } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Election - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1977,7 +1469,7 @@ func (m *LeaderResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2005,7 +1497,7 @@ func (m *LeaderResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2014,9 +1506,6 @@ func (m *LeaderResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2041,7 +1530,7 @@ func (m *LeaderResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2050,9 +1539,6 @@ func (m *LeaderResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2072,13 +1558,9 @@ func (m *LeaderResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Election } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Election - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2103,7 +1585,7 @@ func (m *ResignRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2131,7 +1613,7 @@ func (m *ResignRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2140,9 +1622,6 @@ func (m *ResignRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2162,13 +1641,9 @@ func (m *ResignRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Election } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Election - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2193,7 +1668,7 @@ func (m *ResignResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2221,7 +1696,7 @@ func (m *ResignResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2230,9 +1705,6 @@ func (m *ResignResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2252,13 +1724,9 @@ func (m *ResignResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Election } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Election - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2283,7 +1751,7 @@ func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2311,7 +1779,7 @@ func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2320,9 +1788,6 @@ func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2347,7 +1812,7 @@ func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2356,9 +1821,6 @@ func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2376,13 +1838,9 @@ func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Election } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Election - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2407,7 +1865,7 @@ func (m *ProclaimResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2435,7 +1893,7 @@ func (m *ProclaimResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2444,9 +1902,6 @@ func (m *ProclaimResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Election } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Election - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2466,13 +1921,9 @@ func (m *ProclaimResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Election } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Election - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2536,11 +1987,8 @@ func skipV3Election(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthV3Election - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthV3Election } return iNdEx, nil @@ -2571,9 +2019,6 @@ func skipV3Election(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthV3Election - } } return iNdEx, nil case 4: @@ -2592,3 +2037,43 @@ var ( ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("v3election.proto", fileDescriptorV3Election) } + +var fileDescriptorV3Election = []byte{ + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0x59, 0x27, 0x84, 0x32, 0xa4, 0xad, 0x65, 0x82, 0x48, 0x43, 0x30, 0xd1, 0x22, 0xa1, + 0x2a, 0x07, 0x2f, 0x6a, 0x38, 0xe5, 0x84, 0x40, 0xa0, 0x4a, 0x45, 0x02, 0x7c, 0x40, 0x70, 0xdc, + 0xb8, 0x23, 0x37, 0x8a, 0xe3, 0x35, 0xb6, 0x6b, 0x29, 0x57, 0x5e, 0x81, 0x03, 0x3c, 0x12, 0x47, + 0x24, 0x5e, 0x00, 0x05, 0x1e, 0x04, 0xed, 0xae, 0x8d, 0xff, 0x28, 0x41, 0xa8, 0xb9, 0x58, 0xe3, + 0x9d, 0xcf, 0xf3, 0x9b, 0x6f, 0x76, 0x12, 0x30, 0xb3, 0x09, 0x06, 0xe8, 0xa5, 0x73, 0x11, 0x3a, + 0x51, 0x2c, 0x52, 0x61, 0x75, 0xcb, 0x93, 0x68, 0x36, 0xe8, 0xf9, 0xc2, 0x17, 0x2a, 0xc1, 0x64, + 0xa4, 0x35, 0x83, 0x47, 0x98, 0x7a, 0xe7, 0x4c, 0x3e, 0x12, 0x8c, 0x33, 0x8c, 0x2b, 0x61, 0x34, + 0x63, 0x71, 0xe4, 0xe5, 0xba, 0x23, 0xa5, 0x5b, 0x66, 0x9e, 0xa7, 0x1e, 0xd1, 0x8c, 0x2d, 0xb2, + 0x3c, 0x35, 0xf4, 0x85, 0xf0, 0x03, 0x64, 0x3c, 0x9a, 0x33, 0x1e, 0x86, 0x22, 0xe5, 0x92, 0x98, + 0xe8, 0x2c, 0x7d, 0x0b, 0x87, 0xcf, 0xf9, 0x32, 0xe2, 0x73, 0x3f, 0x74, 0xf1, 0xe3, 0x25, 0x26, + 0xa9, 0x65, 0x41, 0x3b, 0xe4, 0x4b, 0xec, 0x93, 0x11, 0x39, 0xee, 0xba, 0x2a, 0xb6, 0x7a, 0x70, + 0x3d, 0x40, 0x9e, 0x60, 0xdf, 0x18, 0x91, 0xe3, 0x96, 0xab, 0x5f, 0xe4, 0x69, 0xc6, 0x83, 0x4b, + 0xec, 0xb7, 0x94, 0x54, 0xbf, 0xd0, 0x15, 0x98, 0x65, 0xc9, 0x24, 0x12, 0x61, 0x82, 0xd6, 0x13, + 0xe8, 0x5c, 0x20, 0x3f, 0xc7, 0x58, 0x55, 0xbd, 0x75, 0x32, 0x74, 0xaa, 0x46, 0x9c, 0x42, 0x77, + 0xaa, 0x34, 0x6e, 0xae, 0xb5, 0x18, 0x74, 0x02, 0xfd, 0x95, 0xa1, 0xbe, 0xba, 0xeb, 0x54, 0x47, + 0xe6, 0xbc, 0x52, 0xb9, 0x33, 0x5c, 0xb9, 0xb9, 0x8c, 0x7e, 0x80, 0x9b, 0x7f, 0x0f, 0x37, 0xfa, + 0x30, 0xa1, 0xb5, 0xc0, 0x95, 0x2a, 0xd7, 0x75, 0x65, 0x28, 0x4f, 0x62, 0xcc, 0x94, 0x83, 0x96, + 0x2b, 0xc3, 0xd2, 0x6b, 0xbb, 0xe2, 0x95, 0x3e, 0x84, 0x7d, 0x5d, 0xfa, 0x1f, 0x63, 0xa2, 0x17, + 0x70, 0x50, 0x88, 0x76, 0x32, 0x3e, 0x02, 0x63, 0x91, 0xe5, 0xa6, 0x4d, 0x47, 0xdf, 0xa8, 0x73, + 0x86, 0xab, 0x77, 0x72, 0xc0, 0xae, 0xb1, 0xc8, 0xe8, 0x53, 0xd8, 0x77, 0x31, 0xa9, 0xdc, 0x5a, + 0x39, 0x2b, 0xf2, 0x7f, 0xb3, 0x7a, 0x09, 0x07, 0x45, 0x85, 0x5d, 0x7a, 0xa5, 0xef, 0xe1, 0xf0, + 0x4d, 0x2c, 0xbc, 0x80, 0xcf, 0x97, 0x57, 0xed, 0xa5, 0x5c, 0x24, 0xa3, 0xba, 0x48, 0xa7, 0x60, + 0x96, 0x95, 0x77, 0xe9, 0xf1, 0xe4, 0x4b, 0x1b, 0xf6, 0x5e, 0xe4, 0x0d, 0x58, 0x0b, 0xd8, 0x2b, + 0xf6, 0xd3, 0xba, 0x5f, 0xef, 0xac, 0xf1, 0x53, 0x18, 0xd8, 0xdb, 0xd2, 0x9a, 0x42, 0x47, 0x9f, + 0x7e, 0xfc, 0xfe, 0x6c, 0x0c, 0xe8, 0x1d, 0x96, 0x4d, 0x58, 0x21, 0x64, 0x5e, 0x2e, 0x9b, 0x92, + 0xb1, 0x84, 0x15, 0x1e, 0x9a, 0xb0, 0xc6, 0xd4, 0x9a, 0xb0, 0xa6, 0xf5, 0x2d, 0xb0, 0x28, 0x97, + 0x49, 0x98, 0x07, 0x1d, 0x3d, 0x5b, 0xeb, 0xde, 0xa6, 0x89, 0x17, 0xa0, 0xe1, 0xe6, 0x64, 0x8e, + 0xb1, 0x15, 0xa6, 0x4f, 0x6f, 0xd7, 0x30, 0xfa, 0xa2, 0x24, 0xc4, 0x87, 0x1b, 0xaf, 0x67, 0x6a, + 0xe0, 0xbb, 0x50, 0x1e, 0x28, 0xca, 0x11, 0xed, 0xd5, 0x28, 0x42, 0x17, 0x9e, 0x92, 0xf1, 0x63, + 0x22, 0xdd, 0xe8, 0x05, 0x6d, 0x72, 0x6a, 0x8b, 0xdf, 0xe4, 0xd4, 0x77, 0x7a, 0x8b, 0x9b, 0x58, + 0x89, 0xa6, 0x64, 0xfc, 0xcc, 0xfc, 0xb6, 0xb6, 0xc9, 0xf7, 0xb5, 0x4d, 0x7e, 0xae, 0x6d, 0xf2, + 0xf5, 0x97, 0x7d, 0x6d, 0xd6, 0x51, 0x7f, 0x8c, 0x93, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2f, + 0x1d, 0xfa, 0x11, 0xb1, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto similarity index 94% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto rename to vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto index cb475b820..918f39fa8 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto @@ -19,21 +19,21 @@ service Election { // leadership still being held, and resign from the election. rpc Campaign(CampaignRequest) returns (CampaignResponse) { option (google.api.http) = { - post: "/v3beta/election/campaign" + post: "/v3/election/campaign" body: "*" }; } // Proclaim updates the leader's posted value with a new value. rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) { option (google.api.http) = { - post: "/v3beta/election/proclaim" + post: "/v3/election/proclaim" body: "*" }; } // Leader returns the current election proclamation, if any. rpc Leader(LeaderRequest) returns (LeaderResponse) { option (google.api.http) = { - post: "/v3beta/election/leader" + post: "/v3/election/leader" body: "*" }; } @@ -41,7 +41,7 @@ service Election { // elected leaders. rpc Observe(LeaderRequest) returns (stream LeaderResponse) { option (google.api.http) = { - post: "/v3beta/election/observe" + post: "/v3/election/observe" body: "*" }; } @@ -49,7 +49,7 @@ service Election { // leadership on the election. rpc Resign(ResignRequest) returns (ResignResponse) { option (google.api.http) = { - post: "/v3beta/election/resign" + post: "/v3/election/resign" body: "*" }; } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3lock/doc.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/lock.go similarity index 91% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3lock/lock.go index a5efcbab5..5a17c86fc 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/lock.go @@ -17,9 +17,9 @@ package v3lock import ( "context" - "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/clientv3/concurrency" - "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/clientv3/concurrency" + "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb" ) type lockServer struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go similarity index 96% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go index efecc45db..1eeeff185 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go @@ -9,7 +9,7 @@ It translates gRPC into RESTful JSON APIs. package gw import ( - "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb" "io" "net/http" @@ -33,7 +33,7 @@ func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, clien var protoReq v3lockpb.LockRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -46,7 +46,7 @@ func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, cli var protoReq v3lockpb.UnlockRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -155,9 +155,9 @@ func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } var ( - pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3beta", "lock"}, "")) + pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3", "lock"}, "")) - pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lock", "unlock"}, "")) + pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lock", "unlock"}, "")) ) var ( diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go similarity index 65% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go index 1ece90bef..36ebdd90f 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go @@ -1,22 +1,36 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: v3lock.proto +/* + Package v3lockpb is a generated protocol buffer package. + + It is generated from these files: + v3lock.proto + + It has these top-level messages: + LockRequest + LockResponse + UnlockRequest + UnlockResponse +*/ package v3lockpb import ( - context "context" - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" - etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" + + etcdserverpb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -36,46 +50,15 @@ type LockRequest struct { // lease is the ID of the lease that will be attached to ownership of the // lock. If the lease expires or is revoked and currently holds the lock, // the lock is automatically released. Calls to Lock with the same lease will - // be treated as a single acquistion; locking twice with the same lease is a + // be treated as a single acquisition; locking twice with the same lease is a // no-op. - Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LockRequest) Reset() { *m = LockRequest{} } -func (m *LockRequest) String() string { return proto.CompactTextString(m) } -func (*LockRequest) ProtoMessage() {} -func (*LockRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_52389b3e2f253201, []int{0} -} -func (m *LockRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LockRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LockRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LockRequest.Merge(m, src) -} -func (m *LockRequest) XXX_Size() int { - return m.Size() -} -func (m *LockRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LockRequest.DiscardUnknown(m) + Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` } -var xxx_messageInfo_LockRequest proto.InternalMessageInfo +func (m *LockRequest) Reset() { *m = LockRequest{} } +func (m *LockRequest) String() string { return proto.CompactTextString(m) } +func (*LockRequest) ProtoMessage() {} +func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} } func (m *LockRequest) GetName() []byte { if m != nil { @@ -92,48 +75,17 @@ func (m *LockRequest) GetLease() int64 { } type LockResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // key is a key that will exist on etcd for the duration that the Lock caller // owns the lock. Users should not modify this key or the lock may exhibit // undefined behavior. - Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } -func (m *LockResponse) Reset() { *m = LockResponse{} } -func (m *LockResponse) String() string { return proto.CompactTextString(m) } -func (*LockResponse) ProtoMessage() {} -func (*LockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_52389b3e2f253201, []int{1} -} -func (m *LockResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LockResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LockResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LockResponse.Merge(m, src) -} -func (m *LockResponse) XXX_Size() int { - return m.Size() -} -func (m *LockResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LockResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LockResponse proto.InternalMessageInfo +func (m *LockResponse) Reset() { *m = LockResponse{} } +func (m *LockResponse) String() string { return proto.CompactTextString(m) } +func (*LockResponse) ProtoMessage() {} +func (*LockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{1} } func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader { if m != nil { @@ -151,44 +103,13 @@ func (m *LockResponse) GetKey() []byte { type UnlockRequest struct { // key is the lock ownership key granted by Lock. - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` } -func (m *UnlockRequest) Reset() { *m = UnlockRequest{} } -func (m *UnlockRequest) String() string { return proto.CompactTextString(m) } -func (*UnlockRequest) ProtoMessage() {} -func (*UnlockRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_52389b3e2f253201, []int{2} -} -func (m *UnlockRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UnlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UnlockRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UnlockRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UnlockRequest.Merge(m, src) -} -func (m *UnlockRequest) XXX_Size() int { - return m.Size() -} -func (m *UnlockRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UnlockRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UnlockRequest proto.InternalMessageInfo +func (m *UnlockRequest) Reset() { *m = UnlockRequest{} } +func (m *UnlockRequest) String() string { return proto.CompactTextString(m) } +func (*UnlockRequest) ProtoMessage() {} +func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} } func (m *UnlockRequest) GetKey() []byte { if m != nil { @@ -198,44 +119,13 @@ func (m *UnlockRequest) GetKey() []byte { } type UnlockResponse struct { - Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UnlockResponse) Reset() { *m = UnlockResponse{} } -func (m *UnlockResponse) String() string { return proto.CompactTextString(m) } -func (*UnlockResponse) ProtoMessage() {} -func (*UnlockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_52389b3e2f253201, []int{3} -} -func (m *UnlockResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UnlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UnlockResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UnlockResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UnlockResponse.Merge(m, src) -} -func (m *UnlockResponse) XXX_Size() int { - return m.Size() -} -func (m *UnlockResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UnlockResponse.DiscardUnknown(m) + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_UnlockResponse proto.InternalMessageInfo +func (m *UnlockResponse) Reset() { *m = UnlockResponse{} } +func (m *UnlockResponse) String() string { return proto.CompactTextString(m) } +func (*UnlockResponse) ProtoMessage() {} +func (*UnlockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{3} } func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader { if m != nil { @@ -251,33 +141,6 @@ func init() { proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse") } -func init() { proto.RegisterFile("v3lock.proto", fileDescriptor_52389b3e2f253201) } - -var fileDescriptor_52389b3e2f253201 = []byte{ - // 335 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9, - 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44, - 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39, - 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, - 0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13, - 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc, - 0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, - 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b, - 0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6, - 0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a, - 0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f, - 0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41, - 0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a, - 0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x07, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42, - 0xc1, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31, - 0x4d, 0x49, 0xa6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0x62, 0x4a, 0x82, 0xfa, 0x65, 0xc6, 0x49, 0xa9, - 0x25, 0x89, 0xfa, 0x20, 0x45, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x9a, 0x8b, 0x0d, 0xe2, 0x4a, - 0x21, 0x71, 0x84, 0x7e, 0x14, 0xaf, 0x49, 0x49, 0x60, 0x4a, 0x40, 0x8d, 0x96, 0x03, 0x1b, 0x2d, - 0xa1, 0x24, 0x8c, 0x62, 0x74, 0x69, 0x1e, 0xd4, 0x70, 0x27, 0x81, 0x13, 0x8f, 0xe4, 0x18, 0x2f, - 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, 0x70, 0x7c, - 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x10, 0x82, 0x89, 0xf0, 0x45, 0x02, 0x00, 0x00, -} - // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -286,9 +149,8 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// LockClient is the client API for Lock service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Lock service + type LockClient interface { // Lock acquires a distributed shared lock on a given named lock. // On success, it will return a unique key that exists so long as the @@ -313,7 +175,7 @@ func NewLockClient(cc *grpc.ClientConn) LockClient { func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) { out := new(LockResponse) - err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, opts...) + err := grpc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -322,14 +184,15 @@ func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.Cal func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) { out := new(UnlockResponse) - err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, opts...) + err := grpc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// LockServer is the server API for Lock service. +// Server API for Lock service + type LockServer interface { // Lock acquires a distributed shared lock on a given named lock. // On success, it will return a unique key that exists so long as the @@ -344,17 +207,6 @@ type LockServer interface { Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error) } -// UnimplementedLockServer can be embedded to have forward compatible implementations. -type UnimplementedLockServer struct { -} - -func (*UnimplementedLockServer) Lock(ctx context.Context, req *LockRequest) (*LockResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Lock not implemented") -} -func (*UnimplementedLockServer) Unlock(ctx context.Context, req *UnlockRequest) (*UnlockResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Unlock not implemented") -} - func RegisterLockServer(s *grpc.Server, srv LockServer) { s.RegisterService(&_Lock_serviceDesc, srv) } @@ -415,7 +267,7 @@ var _Lock_serviceDesc = grpc.ServiceDesc{ func (m *LockRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -423,38 +275,28 @@ func (m *LockRequest) Marshal() (dAtA []byte, err error) { } func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if m.Lease != 0 { - i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease)) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *LockResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -462,45 +304,33 @@ func (m *LockResponse) Marshal() (dAtA []byte, err error) { } func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Lock(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *UnlockRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -508,33 +338,23 @@ func (m *UnlockRequest) Marshal() (dAtA []byte, err error) { } func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UnlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - return len(dAtA) - i, nil + return i, nil } func (m *UnlockResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -542,49 +362,33 @@ func (m *UnlockResponse) Marshal() (dAtA []byte, err error) { } func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UnlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintV3Lock(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } - return len(dAtA) - i, nil + return i, nil } func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int { - offset -= sovV3Lock(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *LockRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -594,16 +398,10 @@ func (m *LockRequest) Size() (n int) { if m.Lease != 0 { n += 1 + sovV3Lock(uint64(m.Lease)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LockResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -614,46 +412,38 @@ func (m *LockResponse) Size() (n int) { if l > 0 { n += 1 + l + sovV3Lock(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *UnlockRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) if l > 0 { n += 1 + l + sovV3Lock(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *UnlockResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovV3Lock(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovV3Lock(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozV3Lock(x uint64) (n int) { return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -673,7 +463,7 @@ func (m *LockRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -701,7 +491,7 @@ func (m *LockRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -710,9 +500,6 @@ func (m *LockRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Lock } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Lock - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -735,7 +522,7 @@ func (m *LockRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lease |= int64(b&0x7F) << shift + m.Lease |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -749,13 +536,9 @@ func (m *LockRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Lock } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Lock - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -780,7 +563,7 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -808,7 +591,7 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -817,9 +600,6 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Lock } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Lock - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -844,7 +624,7 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -853,9 +633,6 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Lock } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Lock - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -873,13 +650,9 @@ func (m *LockResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Lock } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Lock - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -904,7 +677,7 @@ func (m *UnlockRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -932,7 +705,7 @@ func (m *UnlockRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -941,9 +714,6 @@ func (m *UnlockRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Lock } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthV3Lock - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -961,13 +731,9 @@ func (m *UnlockRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Lock } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Lock - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -992,7 +758,7 @@ func (m *UnlockResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1020,7 +786,7 @@ func (m *UnlockResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1029,9 +795,6 @@ func (m *UnlockResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthV3Lock } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthV3Lock - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1051,13 +814,9 @@ func (m *UnlockResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthV3Lock } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthV3Lock - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1121,11 +880,8 @@ func skipV3Lock(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthV3Lock - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthV3Lock } return iNdEx, nil @@ -1156,9 +912,6 @@ func skipV3Lock(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthV3Lock - } } return iNdEx, nil case 4: @@ -1177,3 +930,30 @@ var ( ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("v3lock.proto", fileDescriptorV3Lock) } + +var fileDescriptorV3Lock = []byte{ + // 331 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9, + 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44, + 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39, + 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, + 0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13, + 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc, + 0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, + 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b, + 0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6, + 0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a, + 0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f, + 0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41, + 0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a, + 0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x03, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42, + 0xfe, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31, + 0x4d, 0x49, 0xa2, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0x42, 0x4a, 0xbc, 0xfa, 0x65, 0xc6, 0xfa, 0x20, + 0x05, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x9c, 0x8b, 0x0d, 0xe2, 0x42, 0x21, 0x71, 0x84, 0x5e, + 0x14, 0x6f, 0x49, 0x49, 0x60, 0x4a, 0x40, 0x8d, 0x95, 0x02, 0x1b, 0x2b, 0xa2, 0xc4, 0x0f, 0x37, + 0xb6, 0x34, 0x0f, 0x6a, 0xb0, 0x93, 0xc0, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, + 0x78, 0x24, 0xc7, 0x38, 0xe3, 0xb1, 0x1c, 0x43, 0x12, 0x1b, 0x38, 0x1e, 0x8d, 0x01, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x65, 0xa8, 0x61, 0xb1, 0x3d, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto similarity index 93% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto rename to vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto index 44b698d66..7220c7f0a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto @@ -20,7 +20,7 @@ service Lock { // lease associate with the owner expires. rpc Lock(LockRequest) returns (LockResponse) { option (google.api.http) = { - post: "/v3beta/lock/lock" + post: "/v3/lock/lock" body: "*" }; } @@ -30,7 +30,7 @@ service Lock { // ownership of the lock. rpc Unlock(UnlockRequest) returns (UnlockResponse) { option (google.api.http) = { - post: "/v3beta/lock/unlock" + post: "/v3/lock/unlock" body: "*" }; } @@ -42,7 +42,7 @@ message LockRequest { // lease is the ID of the lease that will be attached to ownership of the // lock. If the lease expires or is revoked and currently holds the lock, // the lock is automatically released. Calls to Lock with the same lease will - // be treated as a single acquistion; locking twice with the same lease is a + // be treated as a single acquisition; locking twice with the same lease is a // no-op. int64 lease = 2; } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/auth.go similarity index 98% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/auth.go index ca8e53ad0..62ce757be 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/auth.go @@ -17,8 +17,8 @@ package v3rpc import ( "context" - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/etcdserver" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" ) type AuthServer struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/codec.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/codec.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/grpc.go similarity index 86% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/grpc.go index c97e74662..333201661 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/grpc.go @@ -18,13 +18,13 @@ import ( "crypto/tls" "math" - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/etcdserver" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" - "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/go-grpc-prometheus" + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "go.etcd.io/etcd/clientv3/credentials" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" ) @@ -39,7 +39,8 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOptio var opts []grpc.ServerOption opts = append(opts, grpc.CustomCodec(&codec{})) if tls != nil { - opts = append(opts, grpc.Creds(credentials.NewTLS(tls))) + bundle := credentials.NewBundle(credentials.Config{TLSConfig: tls}) + opts = append(opts, grpc.Creds(bundle.TransportCredentials())) } opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( newLogUnaryInterceptor(s), diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/header.go similarity index 87% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/header.go index 75da52fb8..f23b6a738 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/header.go @@ -15,14 +15,14 @@ package v3rpc import ( - "github.com/coreos/etcd/etcdserver" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/etcdserver" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" ) type header struct { clusterID int64 memberID int64 - raftTimer etcdserver.RaftTimer + sg etcdserver.RaftStatusGetter rev func() int64 } @@ -30,7 +30,7 @@ func newHeader(s *etcdserver.EtcdServer) header { return header{ clusterID: int64(s.Cluster().ID()), memberID: int64(s.ID()), - raftTimer: s, + sg: s, rev: func() int64 { return s.KV().Rev() }, } } @@ -42,7 +42,7 @@ func (h *header) fill(rh *pb.ResponseHeader) { } rh.ClusterId = uint64(h.clusterID) rh.MemberId = uint64(h.memberID) - rh.RaftTerm = h.raftTimer.Term() + rh.RaftTerm = h.sg.Term() if rh.Revision == 0 { rh.Revision = h.rev() } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/interceptor.go similarity index 89% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/interceptor.go index fbc2ba3ed..ce9047e80 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/interceptor.go @@ -16,16 +16,17 @@ package v3rpc import ( "context" - "strings" "sync" "time" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft" + + "github.com/coreos/pkg/capnslog" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -47,14 +48,12 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { return nil, rpctypes.ErrGRPCNotCapable } + if s.IsMemberExist(s.ID()) && s.IsLearner() && !isRPCSupportedForLearner(req) { + return nil, rpctypes.ErrGPRCNotSupportedForLearner + } + md, ok := metadata.FromIncomingContext(ctx) if ok { - ver, vs := "unknown", metadataGet(md, rpctypes.MetadataClientAPIVersionKey) - if len(vs) > 0 { - ver = vs[0] - } - clientRequests.WithLabelValues("unary", ver).Inc() - if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { return nil, rpctypes.ErrGRPCNoLeader @@ -70,7 +69,11 @@ func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerIntercepto return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { startTime := time.Now() resp, err := handler(ctx, req) - defer logUnaryRequestStats(ctx, nil, info, startTime, req, resp) + lg := s.Logger() + if (lg != nil && lg.Core().Enabled(zap.DebugLevel)) || // using zap logger and debug level is enabled + (lg == nil && plog.LevelAt(capnslog.DEBUG)) { // or, using capnslog and debug level is enabled + defer logUnaryRequestStats(ctx, lg, info, startTime, req, resp) + } return resp, err } } @@ -82,7 +85,7 @@ func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, info *grpc.UnaryS if ok { remote = peerInfo.Addr.String() } - var responseType string = info.FullMethod + responseType := info.FullMethod var reqCount, respCount int64 var reqSize, respSize int var reqContent string @@ -191,14 +194,12 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor return rpctypes.ErrGRPCNotCapable } + if s.IsMemberExist(s.ID()) && s.IsLearner() { // learner does not support stream RPC + return rpctypes.ErrGPRCNotSupportedForLearner + } + md, ok := metadata.FromIncomingContext(ss.Context()) if ok { - ver, vs := "unknown", metadataGet(md, rpctypes.MetadataClientAPIVersionKey) - if len(vs) > 0 { - ver = vs[0] - } - clientRequests.WithLabelValues("stream", ver).Inc() - if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { return rpctypes.ErrGRPCNoLeader @@ -217,6 +218,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor smap.mu.Unlock() cancel() }() + } } @@ -272,8 +274,3 @@ func monitorLeader(s *etcdserver.EtcdServer) *streamsMap { return smap } - -func metadataGet(md metadata.MD, k string) []string { - k = strings.ToLower(k) - return md[k] -} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/key.go similarity index 96% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/key.go index 5e4fbcf59..ff59bac34 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/key.go @@ -18,16 +18,16 @@ package v3rpc import ( "context" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/adt" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/adt" "github.com/coreos/pkg/capnslog" ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/api/v3rpc") ) type kvServer struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/lease.go similarity index 73% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/lease.go index 5b4f2b142..7441beedf 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/lease.go @@ -18,19 +18,22 @@ import ( "context" "io" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/lease" + + "go.uber.org/zap" ) type LeaseServer struct { + lg *zap.Logger hdr header le etcdserver.Lessor } func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { - return &LeaseServer{le: s, hdr: newHeader(s)} + return &LeaseServer{lg: s.Cfg.Logger, le: s, hdr: newHeader(s)} } func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { @@ -108,9 +111,18 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro } if err != nil { if isClientCtxErr(stream.Context().Err(), err) { - plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) + if ls.lg != nil { + ls.lg.Debug("failed to receive lease keepalive request from gRPC stream", zap.Error(err)) + } else { + plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) + } } else { - plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) + if ls.lg != nil { + ls.lg.Warn("failed to receive lease keepalive request from gRPC stream", zap.Error(err)) + } else { + plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) + } + streamFailures.WithLabelValues("receive", "lease-keepalive").Inc() } return err } @@ -138,9 +150,18 @@ func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro err = stream.Send(resp) if err != nil { if isClientCtxErr(stream.Context().Err(), err) { - plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) + if ls.lg != nil { + ls.lg.Debug("failed to send lease keepalive response to gRPC stream", zap.Error(err)) + } else { + plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) + } } else { - plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) + if ls.lg != nil { + ls.lg.Warn("failed to send lease keepalive response to gRPC stream", zap.Error(err)) + } else { + plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) + } + streamFailures.WithLabelValues("send", "lease-keepalive").Inc() } return err } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/maintenance.go similarity index 69% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/maintenance.go index 9c168d78b..c51271ac0 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/maintenance.go @@ -18,17 +18,17 @@ import ( "context" "crypto/sha256" "io" - "time" - - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/version" - "github.com/dustin/go-humanize" + + "go.etcd.io/etcd/auth" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/raft" + "go.etcd.io/etcd/version" + + "go.uber.org/zap" ) type KVGetter interface { @@ -40,6 +40,9 @@ type BackendGetter interface { } type Alarmer interface { + // Alarms is implemented in Server interface located in etcdserver/server.go + // It returns a list of alarms present in the AlarmStore + Alarms() []*pb.AlarmMember Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) } @@ -47,45 +50,54 @@ type LeaderTransferrer interface { MoveLeader(ctx context.Context, lead, target uint64) error } -type RaftStatusGetter interface { - etcdserver.RaftTimer - ID() types.ID - Leader() types.ID -} - type AuthGetter interface { AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) AuthStore() auth.AuthStore } +type ClusterStatusGetter interface { + IsLearner() bool +} + type maintenanceServer struct { - rg RaftStatusGetter + lg *zap.Logger + rg etcdserver.RaftStatusGetter kg KVGetter bg BackendGetter a Alarmer lt LeaderTransferrer hdr header + cs ClusterStatusGetter } func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer { - srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s)} + srv := &maintenanceServer{lg: s.Cfg.Logger, rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s), cs: s} return &authMaintenanceServer{srv, s} } func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { - plog.Noticef("starting to defragment the storage backend...") + if ms.lg != nil { + ms.lg.Info("starting defragment") + } else { + plog.Noticef("starting to defragment the storage backend...") + } err := ms.bg.Backend().Defrag() if err != nil { - plog.Errorf("failed to defragment the storage backend (%v)", err) + if ms.lg != nil { + ms.lg.Warn("failed to defragment", zap.Error(err)) + } else { + plog.Errorf("failed to defragment the storage backend (%v)", err) + } return nil, err } - plog.Noticef("finished defragmenting the storage backend") + if ms.lg != nil { + ms.lg.Info("finished defragment") + } else { + plog.Noticef("finished defragmenting the storage backend") + } return &pb.DefragmentResponse{}, nil } -// big enough size to hold >1 OS pages in the buffer -const snapshotSendBufferSize = 32 * 1024 - func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { snap := ms.bg.Backend().Snapshot() pr, pw := io.Pipe() @@ -95,44 +107,28 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance go func() { snap.WriteTo(pw) if err := snap.Close(); err != nil { - plog.Errorf("error closing snapshot (%v)", err) + if ms.lg != nil { + ms.lg.Warn("failed to close snapshot", zap.Error(err)) + } else { + plog.Errorf("error closing snapshot (%v)", err) + } } pw.Close() }() - // record SHA digest of snapshot data - // used for integrity checks during snapshot restore operation + // send file data h := sha256.New() - - // buffer just holds read bytes from stream - // response size is multiple of OS page size, fetched in boltdb - // e.g. 4*1024 - buf := make([]byte, snapshotSendBufferSize) - - sent := int64(0) - total := snap.Size() - size := humanize.Bytes(uint64(total)) - - start := time.Now() - plog.Infof("sending database snapshot to client %s [%d bytes]", size, total) - for total-sent > 0 { + br := int64(0) + buf := make([]byte, 32*1024) + sz := snap.Size() + for br < sz { n, err := io.ReadFull(pr, buf) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return togRPCError(err) } - sent += int64(n) - - // if total is x * snapshotSendBufferSize. it is possible that - // resp.RemainingBytes == 0 - // resp.Blob == zero byte but not nil - // does this make server response sent to client nil in proto - // and client stops receiving from snapshot stream before - // server sends snapshot SHA? - // No, the client will still receive non-nil response - // until server closes the stream with EOF - + br += int64(n) resp := &pb.SnapshotResponse{ - RemainingBytes: uint64(total - sent), + RemainingBytes: uint64(sz - br), Blob: buf[:n], } if err = srv.Send(resp); err != nil { @@ -141,17 +137,13 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance h.Write(buf[:n]) } - // send SHA digest for integrity checks - // during snapshot restore operation + // send sha sha := h.Sum(nil) - - plog.Infof("sending database sha256 checksum to client [%d bytes]", len(sha)) hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha} if err := srv.Send(hresp); err != nil { return togRPCError(err) } - plog.Infof("successfully sent database snapshot to client %s [%d bytes, took %s]", size, total, humanize.Time(start)) return nil } @@ -181,15 +173,25 @@ func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*p } func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { + hdr := &pb.ResponseHeader{} + ms.hdr.fill(hdr) resp := &pb.StatusResponse{ - Header: &pb.ResponseHeader{Revision: ms.hdr.rev()}, - Version: version.Version, - DbSize: ms.bg.Backend().Size(), - Leader: uint64(ms.rg.Leader()), - RaftIndex: ms.rg.Index(), - RaftTerm: ms.rg.Term(), + Header: hdr, + Version: version.Version, + Leader: uint64(ms.rg.Leader()), + RaftIndex: ms.rg.CommittedIndex(), + RaftAppliedIndex: ms.rg.AppliedIndex(), + RaftTerm: ms.rg.Term(), + DbSize: ms.bg.Backend().Size(), + DbSizeInUse: ms.bg.Backend().SizeInUse(), + IsLearner: ms.cs.IsLearner(), + } + if resp.Leader == raft.None { + resp.Errors = append(resp.Errors, etcdserver.ErrNoLeader.Error()) + } + for _, a := range ms.a.Alarms() { + resp.Errors = append(resp.Errors, a.String()) } - ms.hdr.fill(resp.Header) return resp, nil } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/member.go similarity index 75% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/member.go index cbe7b470a..b2ebc9898 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/member.go @@ -18,12 +18,12 @@ import ( "context" "time" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api" + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/types" ) type ClusterServer struct { @@ -45,15 +45,24 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) } now := time.Now() - m := membership.NewMember("", urls, "", &now) + var m *membership.Member + if r.IsLearner { + m = membership.NewMemberAsLearner("", urls, "", &now) + } else { + m = membership.NewMember("", urls, "", &now) + } membs, merr := cs.server.AddMember(ctx, *m) if merr != nil { return nil, togRPCError(merr) } return &pb.MemberAddResponse{ - Header: cs.header(), - Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, + Header: cs.header(), + Member: &pb.Member{ + ID: uint64(m.ID), + PeerURLs: m.PeerURLs, + IsLearner: m.IsLearner, + }, Members: membersToProtoMembers(membs), }, nil } @@ -83,6 +92,14 @@ func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil } +func (cs *ClusterServer) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) { + membs, err := cs.server.PromoteMember(ctx, r.ID) + if err != nil { + return nil, togRPCError(err) + } + return &pb.MemberPromoteResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil +} + func (cs *ClusterServer) header() *pb.ResponseHeader { return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.server.Term()} } @@ -95,6 +112,7 @@ func membersToProtoMembers(membs []*membership.Member) []*pb.Member { ID: uint64(membs[i].ID), PeerURLs: membs[i].PeerURLs, ClientURLs: membs[i].ClientURLs, + IsLearner: membs[i].IsLearner, } } return protoMembs diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/metrics.go similarity index 81% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/metrics.go index 46db8649c..d633d27c2 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/metrics.go @@ -31,18 +31,18 @@ var ( Help: "The total number of bytes received from grpc clients.", }) - clientRequests = prometheus.NewCounterVec(prometheus.CounterOpts{ + streamFailures = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "etcd", - Subsystem: "server", - Name: "client_requests_total", - Help: "The total number of client requests per client version.", + Subsystem: "network", + Name: "server_stream_failures_total", + Help: "The total number of stream failures from the local server.", }, - []string{"type", "client_api_version"}, + []string{"Type", "API"}, ) ) func init() { prometheus.MustRegister(sentBytes) prometheus.MustRegister(receivedBytes) - prometheus.MustRegister(clientRequests) + prometheus.MustRegister(streamFailures) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/quota.go similarity index 87% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/quota.go index 02d99609d..a145b8b09 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/quota.go @@ -17,10 +17,10 @@ package v3rpc import ( "context" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/types" ) type quotaKVServer struct { @@ -52,7 +52,7 @@ func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { return "aKVServer{ NewKVServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, + quotaAlarmer{etcdserver.NewBackendQuota(s, "kv"), s, s.ID()}, } } @@ -85,6 +85,6 @@ func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequ func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { return "aLeaseServer{ NewLeaseServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()}, + quotaAlarmer{etcdserver.NewBackendQuota(s, "lease"), s, s.ID()}, } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go similarity index 88% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go index bc1ad7bbd..e6a281460 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -40,6 +40,9 @@ var ( ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err() ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err() ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err() + ErrGRPCMemberNotLearner = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member").Err() + ErrGRPCLearnerNotReady = status.New(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader").Err() + ErrGRPCTooManyLearners = status.New(codes.FailedPrecondition, "etcdserver: too many learner members in cluster").Err() ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err() ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err() @@ -51,6 +54,7 @@ var ( ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err() ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err() ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err() + ErrGRPCRoleEmpty = status.New(codes.InvalidArgument, "etcdserver: role name is empty").Err() ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err() ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err() ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err() @@ -69,6 +73,8 @@ var ( ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() + ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err() + ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err() errStringToError = map[string]error{ ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, @@ -91,6 +97,9 @@ var ( ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted, ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs, ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound, + ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner, + ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady, + ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners, ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge, ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests, @@ -102,6 +111,7 @@ var ( ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound, ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist, ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound, + ErrorDesc(ErrGRPCRoleEmpty): ErrGRPCRoleEmpty, ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed, ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied, ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted, @@ -112,6 +122,7 @@ var ( ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, + ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged, ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, @@ -119,6 +130,8 @@ var ( ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, + ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner, + ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee, } ) @@ -143,6 +156,9 @@ var ( ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted) ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs) ErrMemberNotFound = Error(ErrGRPCMemberNotFound) + ErrMemberNotLearner = Error(ErrGRPCMemberNotLearner) + ErrMemberLearnerNotReady = Error(ErrGRPCLearnerNotReady) + ErrTooManyLearners = Error(ErrGRPCTooManyLearners) ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge) ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests) @@ -154,6 +170,7 @@ var ( ErrUserNotFound = Error(ErrGRPCUserNotFound) ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist) ErrRoleNotFound = Error(ErrGRPCRoleNotFound) + ErrRoleEmpty = Error(ErrGRPCRoleEmpty) ErrAuthFailed = Error(ErrGRPCAuthFailed) ErrPermissionDenied = Error(ErrGRPCPermissionDenied) ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted) @@ -172,6 +189,7 @@ var ( ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) ErrUnhealthy = Error(ErrGRPCUnhealthy) ErrCorrupt = Error(ErrGRPCCorrupt) + ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee) ) // EtcdError defines gRPC server errors. diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go similarity index 92% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go index 90b8b835b..5c590e1ae 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go @@ -17,6 +17,4 @@ package rpctypes var ( MetadataRequireLeaderKey = "hasleader" MetadataHasLeader = "true" - - MetadataClientAPIVersionKey = "client-api-version" ) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/util.go similarity index 83% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/util.go index c4a1ce042..281ddc7a0 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/util.go @@ -18,12 +18,13 @@ import ( "context" "strings" - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" + "go.etcd.io/etcd/auth" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/mvcc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -34,7 +35,10 @@ var toGRPCErrorMap = map[error]error{ membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound, membership.ErrIDExists: rpctypes.ErrGRPCMemberExist, membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist, + membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner, + membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners, etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted, + etcdserver.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady, mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted, mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev, @@ -52,6 +56,7 @@ var toGRPCErrorMap = map[error]error{ etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt, + etcdserver.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee, lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound, lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist, @@ -64,6 +69,7 @@ var toGRPCErrorMap = map[error]error{ auth.ErrUserNotFound: rpctypes.ErrGRPCUserNotFound, auth.ErrRoleAlreadyExist: rpctypes.ErrGRPCRoleAlreadyExist, auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound, + auth.ErrRoleEmpty: rpctypes.ErrGRPCRoleEmpty, auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed, auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied, auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted, @@ -116,3 +122,15 @@ func isClientCtxErr(ctxErr error, err error) bool { } return false } + +// in v3.4, learner is allowed to serve serializable read and endpoint status +func isRPCSupportedForLearner(req interface{}) bool { + switch r := req.(type) { + case *pb.StatusRequest: + return true + case *pb.RangeRequest: + return r.Serializable + default: + return false + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/watch.go similarity index 72% rename from vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go rename to vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/watch.go index 9b46b0d75..f41cb6c05 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/watch.go @@ -17,37 +17,46 @@ package v3rpc import ( "context" "io" + "math/rand" "sync" "time" - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/auth" + "go.etcd.io/etcd/etcdserver" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc" + "go.etcd.io/etcd/mvcc/mvccpb" + + "go.uber.org/zap" ) type watchServer struct { + lg *zap.Logger + clusterID int64 memberID int64 maxRequestBytes int - raftTimer etcdserver.RaftTimer + sg etcdserver.RaftStatusGetter watchable mvcc.WatchableKV - - ag AuthGetter + ag AuthGetter } +// NewWatchServer returns a new watch server. func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { return &watchServer{ - clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), + lg: s.Cfg.Logger, + + clusterID: int64(s.Cluster().ID()), + memberID: int64(s.ID()), + maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes), - raftTimer: s, - watchable: s.Watchable(), - ag: s, + + sg: s, + watchable: s.Watchable(), + ag: s, } } @@ -59,52 +68,61 @@ var ( progressReportIntervalMu sync.RWMutex ) +// GetProgressReportInterval returns the current progress report interval (for testing). func GetProgressReportInterval() time.Duration { progressReportIntervalMu.RLock() - defer progressReportIntervalMu.RUnlock() - return progressReportInterval + interval := progressReportInterval + progressReportIntervalMu.RUnlock() + + // add rand(1/10*progressReportInterval) as jitter so that etcdserver will not + // send progress notifications to watchers around the same time even when watchers + // are created around the same time (which is common when a client restarts itself). + jitter := time.Duration(rand.Int63n(int64(interval) / 10)) + + return interval + jitter } +// SetProgressReportInterval updates the current progress report interval (for testing). func SetProgressReportInterval(newTimeout time.Duration) { progressReportIntervalMu.Lock() - defer progressReportIntervalMu.Unlock() progressReportInterval = newTimeout + progressReportIntervalMu.Unlock() } -const ( - // We send ctrl response inside the read loop. We do not want - // send to block read, but we still want ctrl response we sent to - // be serialized. Thus we use a buffered chan to solve the problem. - // A small buffer should be OK for most cases, since we expect the - // ctrl requests are infrequent. - ctrlStreamBufLen = 16 -) +// We send ctrl response inside the read loop. We do not want +// send to block read, but we still want ctrl response we sent to +// be serialized. Thus we use a buffered chan to solve the problem. +// A small buffer should be OK for most cases, since we expect the +// ctrl requests are infrequent. +const ctrlStreamBufLen = 16 // serverWatchStream is an etcd server side stream. It receives requests // from client side gRPC stream. It receives watch events from mvcc.WatchStream, // and creates responses that forwarded to gRPC stream. // It also forwards control message like watch created and canceled. type serverWatchStream struct { + lg *zap.Logger + clusterID int64 memberID int64 maxRequestBytes int - raftTimer etcdserver.RaftTimer - + sg etcdserver.RaftStatusGetter watchable mvcc.WatchableKV + ag AuthGetter gRPCStream pb.Watch_WatchServer watchStream mvcc.WatchStream ctrlStream chan *pb.WatchResponse - // mu protects progress, prevKV + // mu protects progress, prevKV, fragment mu sync.RWMutex - // progress tracks the watchID that stream might need to send - // progress to. + // tracks the watchID that stream might need to send progress to // TODO: combine progress and prevKV into a single struct? progress map[mvcc.WatchID]bool - prevKV map[mvcc.WatchID]bool + // record watch IDs that need return previous key-value pair + prevKV map[mvcc.WatchID]bool // records fragmented watch IDs fragment map[mvcc.WatchID]bool @@ -113,31 +131,31 @@ type serverWatchStream struct { // wg waits for the send loop to complete wg sync.WaitGroup - - ag AuthGetter } func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { sws := serverWatchStream{ + lg: ws.lg, + clusterID: ws.clusterID, memberID: ws.memberID, maxRequestBytes: ws.maxRequestBytes, - raftTimer: ws.raftTimer, - + sg: ws.sg, watchable: ws.watchable, + ag: ws.ag, gRPCStream: stream, watchStream: ws.watchable.NewWatchStream(), // chan for sending control response like watcher created and canceled. ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen), - progress: make(map[mvcc.WatchID]bool), - prevKV: make(map[mvcc.WatchID]bool), - fragment: make(map[mvcc.WatchID]bool), - closec: make(chan struct{}), - ag: ws.ag, + progress: make(map[mvcc.WatchID]bool), + prevKV: make(map[mvcc.WatchID]bool), + fragment: make(map[mvcc.WatchID]bool), + + closec: make(chan struct{}), } sws.wg.Add(1) @@ -154,16 +172,27 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { go func() { if rerr := sws.recvLoop(); rerr != nil { if isClientCtxErr(stream.Context().Err(), rerr) { - plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) + if sws.lg != nil { + sws.lg.Debug("failed to receive watch request from gRPC stream", zap.Error(rerr)) + } else { + plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) + } } else { - plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) + if sws.lg != nil { + sws.lg.Warn("failed to receive watch request from gRPC stream", zap.Error(rerr)) + } else { + plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) + } + streamFailures.WithLabelValues("receive", "watch").Inc() } errc <- rerr } }() + select { case err = <-errc: close(sws.ctrlStream) + case <-stream.Context().Done(): err = stream.Context().Err() // the only server-side cancellation is noleader for now. @@ -171,6 +200,7 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { err = rpctypes.ErrGRPCNoLeader } } + sws.close() return err } @@ -184,7 +214,6 @@ func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool // if auth is enabled, IsRangePermitted() can cause an error authInfo = &auth.AuthInfo{} } - return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil } @@ -222,7 +251,7 @@ func (sws *serverWatchStream) recvLoop() error { if !sws.isWatchPermitted(creq) { wr := &pb.WatchResponse{ Header: sws.newResponseHeader(sws.watchStream.Rev()), - WatchId: -1, + WatchId: creq.WatchId, Canceled: true, Created: true, CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), @@ -242,8 +271,8 @@ func (sws *serverWatchStream) recvLoop() error { if rev == 0 { rev = wsrev + 1 } - id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...) - if id != -1 { + id, err := sws.watchStream.Watch(mvcc.WatchID(creq.WatchId), creq.Key, creq.RangeEnd, rev, filters...) + if err == nil { sws.mu.Lock() if creq.ProgressNotify { sws.progress[id] = true @@ -260,13 +289,17 @@ func (sws *serverWatchStream) recvLoop() error { Header: sws.newResponseHeader(wsrev), WatchId: int64(id), Created: true, - Canceled: id == -1, + Canceled: err != nil, + } + if err != nil { + wr.CancelReason = err.Error() } select { case sws.ctrlStream <- wr: case <-sws.closec: return nil } + case *pb.WatchRequest_CancelRequest: if uv.CancelRequest != nil { id := uv.CancelRequest.WatchId @@ -339,7 +372,6 @@ func (sws *serverWatchStream) sendLoop() { sws.mu.RUnlock() for i := range evs { events[i] = &evs[i] - if needPrevKV { opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1} r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt) @@ -358,7 +390,7 @@ func (sws *serverWatchStream) sendLoop() { Canceled: canceled, } - if _, hasId := ids[wresp.WatchID]; !hasId { + if _, okID := ids[wresp.WatchID]; !okID { // buffer if id not yet announced wrs := append(pending[wresp.WatchID], wr) pending[wresp.WatchID] = wrs @@ -380,9 +412,18 @@ func (sws *serverWatchStream) sendLoop() { if serr != nil { if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) { - plog.Debugf("failed to send watch response to gRPC stream (%q)", serr.Error()) + if sws.lg != nil { + sws.lg.Debug("failed to send watch response to gRPC stream", zap.Error(serr)) + } else { + plog.Debugf("failed to send watch response to gRPC stream (%q)", serr.Error()) + } } else { - plog.Warningf("failed to send watch response to gRPC stream (%q)", serr.Error()) + if sws.lg != nil { + sws.lg.Warn("failed to send watch response to gRPC stream", zap.Error(serr)) + } else { + plog.Warningf("failed to send watch response to gRPC stream (%q)", serr.Error()) + } + streamFailures.WithLabelValues("send", "watch").Inc() } return } @@ -401,9 +442,18 @@ func (sws *serverWatchStream) sendLoop() { if err := sws.gRPCStream.Send(c); err != nil { if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { - plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error()) + if sws.lg != nil { + sws.lg.Debug("failed to send watch control response to gRPC stream", zap.Error(err)) + } else { + plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error()) + } } else { - plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error()) + if sws.lg != nil { + sws.lg.Warn("failed to send watch control response to gRPC stream", zap.Error(err)) + } else { + plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error()) + } + streamFailures.WithLabelValues("send", "watch").Inc() } return } @@ -421,15 +471,25 @@ func (sws *serverWatchStream) sendLoop() { mvcc.ReportEventReceived(len(v.Events)) if err := sws.gRPCStream.Send(v); err != nil { if isClientCtxErr(sws.gRPCStream.Context().Err(), err) { - plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error()) + if sws.lg != nil { + sws.lg.Debug("failed to send pending watch response to gRPC stream", zap.Error(err)) + } else { + plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error()) + } } else { - plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error()) + if sws.lg != nil { + sws.lg.Warn("failed to send pending watch response to gRPC stream", zap.Error(err)) + } else { + plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error()) + } + streamFailures.WithLabelValues("send", "watch").Inc() } return } } delete(pending, wid) } + case <-progressTicker.C: sws.mu.Lock() for id, ok := range sws.progress { @@ -439,6 +499,7 @@ func (sws *serverWatchStream) sendLoop() { sws.progress[id] = true } sws.mu.Unlock() + case <-sws.closec: return } @@ -495,7 +556,7 @@ func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader { ClusterId: uint64(sws.clusterID), MemberId: uint64(sws.memberID), Revision: rev, - RaftTerm: sws.raftTimer.Term(), + RaftTerm: sws.sg.Term(), } } @@ -507,6 +568,7 @@ func filterNoPut(e mvccpb.Event) bool { return e.Type == mvccpb.PUT } +// FiltersFromRequest returns "mvcc.FilterFunc" from a given watch create request. func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc { filters := make([]mvcc.FilterFunc, 0, len(creq.Filters)) for _, ft := range creq.Filters { diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/go.etcd.io/etcd/etcdserver/apply.go similarity index 85% rename from vendor/github.com/coreos/etcd/etcdserver/apply.go rename to vendor/go.etcd.io/etcd/etcdserver/apply.go index 555338574..822b5e322 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply.go +++ b/vendor/go.etcd.io/etcd/etcdserver/apply.go @@ -17,17 +17,20 @@ package etcdserver import ( "bytes" "context" + "fmt" "sort" "time" - "github.com/coreos/etcd/auth" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/auth" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/mvcc" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/pkg/traceutil" + "go.etcd.io/etcd/pkg/types" "github.com/gogo/protobuf/proto" + "go.uber.org/zap" ) const ( @@ -41,21 +44,24 @@ type applyResult struct { // to being logically reflected by the node. Currently only used for // Compaction requests. physc <-chan struct{} + trace *traceutil.Trace } // applierV3 is the interface for processing V3 raft messages type applierV3 interface { Apply(r *pb.InternalRaftRequest) *applyResult - Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) - Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) + Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) + Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) - Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) + Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) + LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) + Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) @@ -109,28 +115,27 @@ func (s *EtcdServer) newApplierV3() applierV3 { func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { ar := &applyResult{} defer func(start time.Time) { - warnOfExpensiveRequest(start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) - if ar.err != nil { - warnOfFailedRequest(start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) - } + warnOfExpensiveRequest(a.s.getLogger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) }(time.Now()) // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls switch { case r.Range != nil: - ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range) + ar.resp, ar.err = a.s.applyV3.Range(context.TODO(), nil, r.Range) case r.Put != nil: - ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put) + ar.resp, ar.trace, ar.err = a.s.applyV3.Put(nil, r.Put) case r.DeleteRange != nil: ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) case r.Txn != nil: ar.resp, ar.err = a.s.applyV3.Txn(r.Txn) case r.Compaction != nil: - ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction) + ar.resp, ar.physc, ar.trace, ar.err = a.s.applyV3.Compaction(r.Compaction) case r.LeaseGrant != nil: ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant) case r.LeaseRevoke != nil: ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke) + case r.LeaseCheckpoint != nil: + ar.resp, ar.err = a.s.applyV3.LeaseCheckpoint(r.LeaseCheckpoint) case r.Alarm != nil: ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm) case r.Authenticate != nil: @@ -171,32 +176,39 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { return ar } -func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) { +func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { resp = &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} - + trace = traceutil.New("put", + a.s.getLogger(), + traceutil.Field{Key: "key", Value: string(p.Key)}, + traceutil.Field{Key: "req_size", Value: proto.Size(p)}, + ) val, leaseID := p.Value, lease.LeaseID(p.Lease) if txn == nil { if leaseID != lease.NoLease { if l := a.s.lessor.Lookup(leaseID); l == nil { - return nil, lease.ErrLeaseNotFound + return nil, nil, lease.ErrLeaseNotFound } } - txn = a.s.KV().Write() + txn = a.s.KV().Write(trace) defer txn.End() } var rr *mvcc.RangeResult if p.IgnoreValue || p.IgnoreLease || p.PrevKv { + trace.DisableStep() rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{}) if err != nil { - return nil, err + return nil, nil, err } + trace.EnableStep() + trace.Step("get previous kv pair") } if p.IgnoreValue || p.IgnoreLease { if rr == nil || len(rr.KVs) == 0 { // ignore_{lease,value} flag expects previous key-value pair - return nil, ErrKeyNotFound + return nil, nil, ErrKeyNotFound } } if p.IgnoreValue { @@ -212,7 +224,8 @@ func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.Pu } resp.Header.Revision = txn.Put(p.Key, val, leaseID) - return resp, nil + trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) + return resp, trace, nil } func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { @@ -221,7 +234,7 @@ func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequ end := mkGteRange(dr.RangeEnd) if txn == nil { - txn = a.s.kv.Write() + txn = a.s.kv.Write(traceutil.TODO()) defer txn.End() } @@ -242,12 +255,14 @@ func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequ return resp, nil } -func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { + trace := traceutil.Get(ctx) + resp := &pb.RangeResponse{} resp.Header = &pb.ResponseHeader{} if txn == nil { - txn = a.s.kv.Read() + txn = a.s.kv.Read(trace) defer txn.End() } @@ -324,7 +339,7 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang rr.KVs = rr.KVs[:r.Limit] resp.More = true } - + trace.Step("filter and sort the key-value pairs") resp.Header.Revision = rr.Rev resp.Count = int64(rr.Count) resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs)) @@ -334,12 +349,13 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang } resp.Kvs[i] = &rr.KVs[i] } + trace.Step("assemble the response") return resp, nil } func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { isWrite := !isTxnReadonly(rt) - txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read()) + txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(traceutil.TODO())) txnPath := compareToPath(txn, rt) if isWrite { @@ -361,7 +377,7 @@ func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { // be the revision of the write txn. if isWrite { txn.End() - txn = a.s.KV().Write() + txn = a.s.KV().Write(traceutil.TODO()) } a.applyTxn(txn, rt, txnPath, txnResp) rev := txn.Rev() @@ -507,25 +523,39 @@ func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPat if !txnPath[0] { reqs = rt.Failure } + + lg := a.s.getLogger() for i, req := range reqs { respi := tresp.Responses[i].Response switch tv := req.Request.(type) { case *pb.RequestOp_RequestRange: - resp, err := a.Range(txn, tv.RequestRange) + resp, err := a.Range(context.TODO(), txn, tv.RequestRange) if err != nil { - plog.Panicf("unexpected error during txn: %v", err) + if lg != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } else { + plog.Panicf("unexpected error during txn: %v", err) + } } respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp case *pb.RequestOp_RequestPut: - resp, err := a.Put(txn, tv.RequestPut) + resp, _, err := a.Put(txn, tv.RequestPut) if err != nil { - plog.Panicf("unexpected error during txn: %v", err) + if lg != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } else { + plog.Panicf("unexpected error during txn: %v", err) + } } respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp case *pb.RequestOp_RequestDeleteRange: resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) if err != nil { - plog.Panicf("unexpected error during txn: %v", err) + if lg != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } else { + plog.Panicf("unexpected error during txn: %v", err) + } } respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp case *pb.RequestOp_RequestTxn: @@ -540,17 +570,22 @@ func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPat return txns } -func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) { +func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { resp := &pb.CompactionResponse{} resp.Header = &pb.ResponseHeader{} - ch, err := a.s.KV().Compact(compaction.Revision) + trace := traceutil.New("compact", + a.s.getLogger(), + traceutil.Field{Key: "revision", Value: compaction.Revision}, + ) + + ch, err := a.s.KV().Compact(trace, compaction.Revision) if err != nil { - return nil, ch, err + return nil, ch, nil, err } // get the current revision. which key to get is not important. rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{}) resp.Header.Revision = rr.Rev - return resp, ch, err + return resp, ch, trace, err } func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { @@ -569,10 +604,21 @@ func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevo return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err } +func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) { + for _, c := range lc.Checkpoints { + err := a.s.lessor.Checkpoint(lease.LeaseID(c.ID), c.Remaining_TTL) + if err != nil { + return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, err + } + } + return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, nil +} + func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { resp := &pb.AlarmResponse{} oldCount := len(a.s.alarmStore.Get(ar.Alarm)) + lg := a.s.getLogger() switch ar.Action { case pb.AlarmRequest_GET: resp.Alarms = a.s.alarmStore.Get(ar.Alarm) @@ -587,14 +633,22 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) break } - plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID)) + if lg != nil { + lg.Warn("alarm raised", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String())) + } else { + plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID)) + } switch m.Alarm { case pb.AlarmType_CORRUPT: a.s.applyV3 = newApplierV3Corrupt(a) case pb.AlarmType_NOSPACE: a.s.applyV3 = newApplierV3Capped(a) default: - plog.Errorf("unimplemented alarm activation (%+v)", m) + if lg != nil { + lg.Warn("unimplemented alarm activation", zap.String("alarm", fmt.Sprintf("%+v", m))) + } else { + plog.Errorf("unimplemented alarm activation (%+v)", m) + } } case pb.AlarmRequest_DEACTIVATE: m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) @@ -610,10 +664,18 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) switch m.Alarm { case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT: // TODO: check kv hash before deactivating CORRUPT? - plog.Infof("alarm disarmed %+v", ar) + if lg != nil { + lg.Warn("alarm disarmed", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String())) + } else { + plog.Infof("alarm disarmed %+v", ar) + } a.s.applyV3 = a.s.newApplierV3() default: - plog.Errorf("unimplemented alarm deactivation (%+v)", m) + if lg != nil { + lg.Warn("unimplemented alarm deactivation", zap.String("alarm", fmt.Sprintf("%+v", m))) + } else { + plog.Errorf("unimplemented alarm deactivation (%+v)", m) + } } default: return nil, nil @@ -630,8 +692,8 @@ type applierV3Capped struct { // with Puts so that the number of keys in the store is capped. func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } -func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { - return nil, ErrNoSpace +func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { + return nil, nil, ErrNoSpace } func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) { @@ -777,16 +839,16 @@ type quotaApplierV3 struct { } func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { - return "aApplierV3{app, NewBackendQuota(s)} + return "aApplierV3{app, NewBackendQuota(s, "v3-applier")} } -func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { +func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { ok := a.q.Available(p) - resp, err := a.applierV3.Put(txn, p) + resp, trace, err := a.applierV3.Put(txn, p) if err == nil && !ok { err = ErrNoSpace } - return resp, err + return resp, trace, err } func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/go.etcd.io/etcd/etcdserver/apply_auth.go similarity index 92% rename from vendor/github.com/coreos/etcd/etcdserver/apply_auth.go rename to vendor/go.etcd.io/etcd/etcdserver/apply_auth.go index ec9391435..269af4758 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go +++ b/vendor/go.etcd.io/etcd/etcdserver/apply_auth.go @@ -15,12 +15,14 @@ package etcdserver import ( + "context" "sync" - "github.com/coreos/etcd/auth" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" + "go.etcd.io/etcd/auth" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/mvcc" + "go.etcd.io/etcd/pkg/traceutil" ) type authApplierV3 struct { @@ -61,9 +63,9 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { return ret } -func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) { +func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { - return nil, err + return nil, nil, err } if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil { @@ -71,23 +73,23 @@ func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutRespon // be written by this user. It means the user cannot revoke the // lease so attaching the lease to the newly written key should // be forbidden. - return nil, err + return nil, nil, err } if r.PrevKv { err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil) if err != nil { - return nil, err + return nil, nil, err } } return aa.applierV3.Put(txn, r) } -func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (aa *authApplierV3) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { return nil, err } - return aa.applierV3.Range(txn, r) + return aa.applierV3.Range(ctx, txn, r) } func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { @@ -156,10 +158,7 @@ func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { return err } - if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil { - return err - } - return nil + return checkTxnReqsPermission(as, ai, rt.Failure) } func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go b/vendor/go.etcd.io/etcd/etcdserver/apply_v2.go similarity index 79% rename from vendor/github.com/coreos/etcd/etcdserver/apply_v2.go rename to vendor/go.etcd.io/etcd/etcdserver/apply_v2.go index a49b6823d..c77df1970 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go +++ b/vendor/go.etcd.io/etcd/etcdserver/apply_v2.go @@ -19,11 +19,13 @@ import ( "path" "time" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/store" + "go.etcd.io/etcd/etcdserver/api" + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/etcdserver/api/v2store" + "go.etcd.io/etcd/pkg/pbutil" + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" ) // ApplierV2 is the interface for processing V2 raft messages @@ -35,12 +37,13 @@ type ApplierV2 interface { Sync(r *RequestV2) Response } -func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 { - return &applierV2store{store: s, cluster: c} +func NewApplierV2(lg *zap.Logger, s v2store.Store, c *membership.RaftCluster) ApplierV2 { + return &applierV2store{lg: lg, store: s, cluster: c} } type applierV2store struct { - store store.Store + lg *zap.Logger + store v2store.Store cluster *membership.RaftCluster } @@ -76,7 +79,11 @@ func (a *applierV2store) Put(r *RequestV2) Response { id := membership.MustParseMemberIDFromKey(path.Dir(r.Path)) var attr membership.Attributes if err := json.Unmarshal([]byte(r.Val), &attr); err != nil { - plog.Panicf("unmarshal %s should never fail: %v", r.Val, err) + if a.lg != nil { + a.lg.Panic("failed to unmarshal", zap.String("value", r.Val), zap.Error(err)) + } else { + plog.Panicf("unmarshal %s should never fail: %v", r.Val, err) + } } if a.cluster != nil { a.cluster.UpdateAttributes(id, attr) @@ -104,10 +111,10 @@ func (a *applierV2store) Sync(r *RequestV2) Response { return Response{} } -// applyV2Request interprets r as a call to store.X and returns a Response interpreted -// from store.Event +// applyV2Request interprets r as a call to v2store.X +// and returns a Response interpreted from v2store.Event func (s *EtcdServer) applyV2Request(r *RequestV2) Response { - defer warnOfExpensiveRequest(time.Now(), r, nil, nil) + defer warnOfExpensiveRequest(s.getLogger(), time.Now(), r, nil, nil) switch r.Method { case "POST": @@ -126,15 +133,15 @@ func (s *EtcdServer) applyV2Request(r *RequestV2) Response { } } -func (r *RequestV2) TTLOptions() store.TTLOptionSet { +func (r *RequestV2) TTLOptions() v2store.TTLOptionSet { refresh, _ := pbutil.GetBool(r.Refresh) - ttlOptions := store.TTLOptionSet{Refresh: refresh} + ttlOptions := v2store.TTLOptionSet{Refresh: refresh} if r.Expiration != 0 { ttlOptions.ExpireTime = time.Unix(0, r.Expiration) } return ttlOptions } -func toResponse(ev *store.Event, err error) Response { +func toResponse(ev *v2store.Event, err error) Response { return Response{Event: ev, Err: err} } diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/go.etcd.io/etcd/etcdserver/backend.go similarity index 57% rename from vendor/github.com/coreos/etcd/etcdserver/backend.go rename to vendor/go.etcd.io/etcd/etcdserver/backend.go index fe2f86503..01ba19256 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/backend.go +++ b/vendor/go.etcd.io/etcd/etcdserver/backend.go @@ -19,16 +19,32 @@ import ( "os" "time" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" + "go.etcd.io/etcd/etcdserver/api/snap" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/mvcc" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/raft/raftpb" + + "go.uber.org/zap" ) func newBackend(cfg ServerConfig) backend.Backend { bcfg := backend.DefaultBackendConfig() bcfg.Path = cfg.backendPath() + if cfg.BackendBatchLimit != 0 { + bcfg.BatchLimit = cfg.BackendBatchLimit + if cfg.Logger != nil { + cfg.Logger.Info("setting backend batch limit", zap.Int("batch limit", cfg.BackendBatchLimit)) + } + } + if cfg.BackendBatchInterval != 0 { + bcfg.BatchInterval = cfg.BackendBatchInterval + if cfg.Logger != nil { + cfg.Logger.Info("setting backend batch interval", zap.Duration("batch interval", cfg.BackendBatchInterval)) + } + } + bcfg.BackendFreelistType = cfg.BackendFreelistType + bcfg.Logger = cfg.Logger if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { // permit 10% excess over quota for disarm bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) @@ -40,10 +56,10 @@ func newBackend(cfg ServerConfig) backend.Backend { func openSnapshotBackend(cfg ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) if err != nil { - return nil, fmt.Errorf("database snapshot file path error: %v", err) + return nil, fmt.Errorf("failed to find database snapshot file (%v)", err) } if err := os.Rename(snapPath, cfg.backendPath()); err != nil { - return nil, fmt.Errorf("rename snapshot file error: %v", err) + return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err) } return openBackend(cfg), nil } @@ -51,17 +67,32 @@ func openSnapshotBackend(cfg ServerConfig, ss *snap.Snapshotter, snapshot raftpb // openBackend returns a backend using the current etcd db. func openBackend(cfg ServerConfig) backend.Backend { fn := cfg.backendPath() - beOpened := make(chan backend.Backend) + + now, beOpened := time.Now(), make(chan backend.Backend) go func() { beOpened <- newBackend(cfg) }() + select { case be := <-beOpened: + if cfg.Logger != nil { + cfg.Logger.Info("opened backend db", zap.String("path", fn), zap.Duration("took", time.Since(now))) + } return be + case <-time.After(10 * time.Second): - plog.Warningf("another etcd process is using %q and holds the file lock, or loading backend file is taking >10 seconds", fn) - plog.Warningf("waiting for it to exit before starting...") + if cfg.Logger != nil { + cfg.Logger.Info( + "db file is flocked by another process, or taking too long", + zap.String("path", fn), + zap.Duration("took", time.Since(now)), + ) + } else { + plog.Warningf("another etcd process is using %q and holds the file lock, or loading backend file is taking >10 seconds", fn) + plog.Warningf("waiting for it to exit before starting...") + } } + return <-beOpened } @@ -71,11 +102,11 @@ func openBackend(cfg ServerConfig) backend.Backend { // case, replace the db with the snapshot db sent by the leader. func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { var cIndex consistentIndex - kv := mvcc.New(oldbe, &lease.FakeLessor{}, nil, &cIndex) + kv := mvcc.New(cfg.Logger, oldbe, &lease.FakeLessor{}, &cIndex, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit}) defer kv.Close() if snapshot.Metadata.Index <= kv.ConsistentIndex() { return oldbe, nil } oldbe.Close() - return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot) + return openSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot) } diff --git a/vendor/go.etcd.io/etcd/etcdserver/cluster_util.go b/vendor/go.etcd.io/etcd/etcdserver/cluster_util.go new file mode 100644 index 000000000..f92706cb7 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/cluster_util.go @@ -0,0 +1,407 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "sort" + "strings" + "time" + + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/version" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" +) + +// isMemberBootstrapped tries to check if the given member has been bootstrapped +// in the given cluster. +func isMemberBootstrapped(lg *zap.Logger, cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool { + rcl, err := getClusterFromRemotePeers(lg, getRemotePeerURLs(cl, member), timeout, false, rt) + if err != nil { + return false + } + id := cl.MemberByName(member).ID + m := rcl.Member(id) + if m == nil { + return false + } + if len(m.ClientURLs) > 0 { + return true + } + return false +} + +// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and +// attempts to construct a Cluster by accessing the members endpoint on one of +// these URLs. The first URL to provide a response is used. If no URLs provide +// a response, or a Cluster cannot be successfully created from a received +// response, an error is returned. +// Each request has a 10-second timeout. Because the upper limit of TTL is 5s, +// 10 second is enough for building connection and finishing request. +func GetClusterFromRemotePeers(lg *zap.Logger, urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) { + return getClusterFromRemotePeers(lg, urls, 10*time.Second, true, rt) +} + +// If logerr is true, it prints out more error messages. +func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) { + cc := &http.Client{ + Transport: rt, + Timeout: timeout, + } + for _, u := range urls { + addr := u + "/members" + resp, err := cc.Get(addr) + if err != nil { + if logerr { + if lg != nil { + lg.Warn("failed to get cluster response", zap.String("address", addr), zap.Error(err)) + } else { + plog.Warningf("could not get cluster response from %s: %v", u, err) + } + } + continue + } + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + if logerr { + if lg != nil { + lg.Warn("failed to read body of cluster response", zap.String("address", addr), zap.Error(err)) + } else { + plog.Warningf("could not read the body of cluster response: %v", err) + } + } + continue + } + var membs []*membership.Member + if err = json.Unmarshal(b, &membs); err != nil { + if logerr { + if lg != nil { + lg.Warn("failed to unmarshal cluster response", zap.String("address", addr), zap.Error(err)) + } else { + plog.Warningf("could not unmarshal cluster response: %v", err) + } + } + continue + } + id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID")) + if err != nil { + if logerr { + if lg != nil { + lg.Warn( + "failed to parse cluster ID", + zap.String("address", addr), + zap.String("header", resp.Header.Get("X-Etcd-Cluster-ID")), + zap.Error(err), + ) + } else { + plog.Warningf("could not parse the cluster ID from cluster res: %v", err) + } + } + continue + } + + // check the length of membership members + // if the membership members are present then prepare and return raft cluster + // if membership members are not present then the raft cluster formed will be + // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error + if len(membs) > 0 { + return membership.NewClusterFromMembers(lg, "", id, membs), nil + } + return nil, fmt.Errorf("failed to get raft cluster member(s) from the given URLs") + } + return nil, fmt.Errorf("could not retrieve cluster information from the given URLs") +} + +// getRemotePeerURLs returns peer urls of remote members in the cluster. The +// returned list is sorted in ascending lexicographical order. +func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { + us := make([]string, 0) + for _, m := range cl.Members() { + if m.Name == local { + continue + } + us = append(us, m.PeerURLs...) + } + sort.Strings(us) + return us +} + +// getVersions returns the versions of the members in the given cluster. +// The key of the returned map is the member's ID. The value of the returned map +// is the semver versions string, including server and cluster. +// If it fails to get the version of a member, the key will be nil. +func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { + members := cl.Members() + vers := make(map[string]*version.Versions) + for _, m := range members { + if m.ID == local { + cv := "not_decided" + if cl.Version() != nil { + cv = cl.Version().String() + } + vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv} + continue + } + ver, err := getVersion(lg, m, rt) + if err != nil { + if lg != nil { + lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err)) + } else { + plog.Warningf("cannot get the version of member %s (%v)", m.ID, err) + } + vers[m.ID.String()] = nil + } else { + vers[m.ID.String()] = ver + } + } + return vers +} + +// decideClusterVersion decides the cluster version based on the versions map. +// The returned version is the min server version in the map, or nil if the min +// version in unknown. +func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *semver.Version { + var cv *semver.Version + lv := semver.Must(semver.NewVersion(version.Version)) + + for mid, ver := range vers { + if ver == nil { + return nil + } + v, err := semver.NewVersion(ver.Server) + if err != nil { + if lg != nil { + lg.Warn( + "failed to parse server version of remote member", + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + zap.Error(err), + ) + } else { + plog.Errorf("cannot understand the version of member %s (%v)", mid, err) + } + return nil + } + if lv.LessThan(*v) { + if lg != nil { + lg.Warn( + "leader found higher-versioned member", + zap.String("local-member-version", lv.String()), + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + ) + } else { + plog.Warningf("the local etcd version %s is not up-to-date", lv.String()) + plog.Warningf("member %s has a higher version %s", mid, ver.Server) + } + } + if cv == nil { + cv = v + } else if v.LessThan(*cv) { + cv = v + } + } + return cv +} + +// isCompatibleWithCluster return true if the local member has a compatible version with +// the current running cluster. +// The version is considered as compatible when at least one of the other members in the cluster has a +// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version +// out of the range. +// We set this rule since when the local member joins, another member might be offline. +func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { + vers := getVersions(lg, cl, local, rt) + minV := semver.Must(semver.NewVersion(version.MinClusterVersion)) + maxV := semver.Must(semver.NewVersion(version.Version)) + maxV = &semver.Version{ + Major: maxV.Major, + Minor: maxV.Minor, + } + return isCompatibleWithVers(lg, vers, local, minV, maxV) +} + +func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool { + var ok bool + for id, v := range vers { + // ignore comparison with local version + if id == local.String() { + continue + } + if v == nil { + continue + } + clusterv, err := semver.NewVersion(v.Cluster) + if err != nil { + if lg != nil { + lg.Warn( + "failed to parse cluster version of remote member", + zap.String("remote-member-id", id), + zap.String("remote-member-cluster-version", v.Cluster), + zap.Error(err), + ) + } else { + plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err) + } + continue + } + if clusterv.LessThan(*minV) { + if lg != nil { + lg.Warn( + "cluster version of remote member is not compatible; too low", + zap.String("remote-member-id", id), + zap.String("remote-member-cluster-version", clusterv.String()), + zap.String("minimum-cluster-version-supported", minV.String()), + ) + } else { + plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String()) + } + return false + } + if maxV.LessThan(*clusterv) { + if lg != nil { + lg.Warn( + "cluster version of remote member is not compatible; too high", + zap.String("remote-member-id", id), + zap.String("remote-member-cluster-version", clusterv.String()), + zap.String("minimum-cluster-version-supported", minV.String()), + ) + } else { + plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String()) + } + return false + } + ok = true + } + return ok +} + +// getVersion returns the Versions of the given member via its +// peerURLs. Returns the last error if it fails to get the version. +func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*version.Versions, error) { + cc := &http.Client{ + Transport: rt, + } + var ( + err error + resp *http.Response + ) + + for _, u := range m.PeerURLs { + addr := u + "/version" + resp, err = cc.Get(addr) + if err != nil { + if lg != nil { + lg.Warn( + "failed to reach the peer URL", + zap.String("address", addr), + zap.String("remote-member-id", m.ID.String()), + zap.Error(err), + ) + } else { + plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) + } + continue + } + var b []byte + b, err = ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + if lg != nil { + lg.Warn( + "failed to read body of response", + zap.String("address", addr), + zap.String("remote-member-id", m.ID.String()), + zap.Error(err), + ) + } else { + plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err) + } + continue + } + var vers version.Versions + if err = json.Unmarshal(b, &vers); err != nil { + if lg != nil { + lg.Warn( + "failed to unmarshal response", + zap.String("address", addr), + zap.String("remote-member-id", m.ID.String()), + zap.Error(err), + ) + } else { + plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err) + } + continue + } + return &vers, nil + } + return nil, err +} + +func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.RoundTripper) ([]*membership.Member, error) { + cc := &http.Client{Transport: peerRt} + // TODO: refactor member http handler code + // cannot import etcdhttp, so manually construct url + requestUrl := url + "/members/promote/" + fmt.Sprintf("%d", id) + req, err := http.NewRequest("POST", requestUrl, nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + resp, err := cc.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusRequestTimeout { + return nil, ErrTimeout + } + if resp.StatusCode == http.StatusPreconditionFailed { + // both ErrMemberNotLearner and ErrLearnerNotReady have same http status code + if strings.Contains(string(b), ErrLearnerNotReady.Error()) { + return nil, ErrLearnerNotReady + } + if strings.Contains(string(b), membership.ErrMemberNotLearner.Error()) { + return nil, membership.ErrMemberNotLearner + } + return nil, fmt.Errorf("member promote: unknown error(%s)", string(b)) + } + if resp.StatusCode == http.StatusNotFound { + return nil, membership.ErrIDNotFound + } + + if resp.StatusCode != http.StatusOK { // all other types of errors + return nil, fmt.Errorf("member promote: unknown error(%s)", string(b)) + } + + var membs []*membership.Member + if err := json.Unmarshal(b, &membs); err != nil { + return nil, err + } + return membs, nil +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/go.etcd.io/etcd/etcdserver/config.go similarity index 76% rename from vendor/github.com/coreos/etcd/etcdserver/config.go rename to vendor/go.etcd.io/etcd/etcdserver/config.go index 295d95299..88cd721c3 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/config.go +++ b/vendor/go.etcd.io/etcd/etcdserver/config.go @@ -22,9 +22,13 @@ import ( "strings" "time" - "github.com/coreos/etcd/pkg/netutil" - "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/pkg/netutil" + "go.etcd.io/etcd/pkg/transport" + "go.etcd.io/etcd/pkg/types" + + bolt "go.etcd.io/bbolt" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) // ServerConfig holds the configuration of etcd as taken from the command line or discovery. @@ -37,16 +41,41 @@ type ServerConfig struct { DataDir string // DedicatedWALDir config will make the etcd to write the WAL to the WALDir // rather than the dataDir/member/wal. - DedicatedWALDir string - SnapCount uint64 - MaxSnapFiles uint - MaxWALFiles uint + DedicatedWALDir string + + SnapshotCount uint64 + + // SnapshotCatchUpEntries is the number of entries for a slow follower + // to catch-up after compacting the raft storage entries. + // We expect the follower has a millisecond level latency with the leader. + // The max throughput is around 10K. Keep a 5K entries is enough for helping + // follower to catch up. + // WARNING: only change this for tests. Always use "DefaultSnapshotCatchUpEntries" + SnapshotCatchUpEntries uint64 + + MaxSnapFiles uint + MaxWALFiles uint + + // BackendBatchInterval is the maximum time before commit the backend transaction. + BackendBatchInterval time.Duration + // BackendBatchLimit is the maximum operations before commit the backend transaction. + BackendBatchLimit int + + // BackendFreelistType is the type of the backend boltdb freelist. + BackendFreelistType bolt.FreelistType + InitialPeerURLsMap types.URLsMap InitialClusterToken string NewCluster bool - ForceNewCluster bool PeerTLSInfo transport.TLSInfo + CORS map[string]struct{} + + // HostWhitelist lists acceptable hostnames from client requests. + // If server is insecure (no TLS), server only accepts requests + // whose Host header value exists in this white list. + HostWhitelist map[string]struct{} + TickMs uint ElectionTicks int @@ -76,13 +105,14 @@ type ServerConfig struct { // // If single-node, it advances ticks regardless. // - // See https://github.com/coreos/etcd/issues/9333 for more detail. + // See https://github.com/etcd-io/etcd/issues/9333 for more detail. InitialElectionTickAdvance bool BootstrapTimeout time.Duration AutoCompactionRetention time.Duration AutoCompactionMode string + CompactionBatchLimit int QuotaBackendBytes int64 MaxTxnOps uint @@ -94,14 +124,39 @@ type ServerConfig struct { // ClientCertAuthEnabled is true when cert has been signed by the client CA. ClientCertAuthEnabled bool - AuthToken string + AuthToken string + BcryptCost uint // InitialCorruptCheck is true to check data corruption on boot // before serving any peer/client traffic. InitialCorruptCheck bool CorruptCheckTime time.Duration + // PreVote is true to enable Raft Pre-Vote. + PreVote bool + + // Logger logs server-side operations. + // If not nil, it disables "capnslog" and uses the given logger. + Logger *zap.Logger + + // LoggerConfig is server logger configuration for Raft logger. + // Must be either: "LoggerConfig != nil" or "LoggerCore != nil && LoggerWriteSyncer != nil". + LoggerConfig *zap.Config + // LoggerCore is "zapcore.Core" for raft logger. + // Must be either: "LoggerConfig != nil" or "LoggerCore != nil && LoggerWriteSyncer != nil". + LoggerCore zapcore.Core + LoggerWriteSyncer zapcore.WriteSyncer + Debug bool + + ForceNewCluster bool + + // EnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases. + EnableLeaseCheckpoint bool + // LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints. + LeaseCheckpointInterval time.Duration + + EnableGRPCGateway bool } // VerifyBootstrap sanity-checks the initial config for bootstrap case @@ -154,7 +209,7 @@ func (c *ServerConfig) advertiseMatchesCluster() error { sort.Strings(apurls) ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() - ok, err := netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) + ok, err := netutil.URLStringsEqual(ctx, c.Logger, apurls, urls.StringSlice()) if ok { return nil } @@ -228,36 +283,6 @@ func (c *ServerConfig) peerDialTimeout() time.Duration { return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond } -func (c *ServerConfig) PrintWithInitial() { c.print(true) } - -func (c *ServerConfig) Print() { c.print(false) } - -func (c *ServerConfig) print(initial bool) { - plog.Infof("name = %s", c.Name) - if c.ForceNewCluster { - plog.Infof("force new cluster") - } - plog.Infof("data dir = %s", c.DataDir) - plog.Infof("member dir = %s", c.MemberDir()) - if c.DedicatedWALDir != "" { - plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir) - } - plog.Infof("heartbeat = %dms", c.TickMs) - plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs)) - plog.Infof("snapshot count = %d", c.SnapCount) - if len(c.DiscoveryURL) != 0 { - plog.Infof("discovery URL= %s", c.DiscoveryURL) - if len(c.DiscoveryProxy) != 0 { - plog.Infof("discovery proxy = %s", c.DiscoveryProxy) - } - } - plog.Infof("advertise client URLs = %s", c.ClientURLs) - if initial { - plog.Infof("initial advertise peer URLs = %s", c.PeerURLs) - plog.Infof("initial cluster = %s", c.InitialPeerURLsMap) - } -} - func checkDuplicateURL(urlsmap types.URLsMap) bool { um := make(map[string]bool) for _, urls := range urlsmap { diff --git a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go b/vendor/go.etcd.io/etcd/etcdserver/consistent_index.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/consistent_index.go rename to vendor/go.etcd.io/etcd/etcdserver/consistent_index.go diff --git a/vendor/go.etcd.io/etcd/etcdserver/corrupt.go b/vendor/go.etcd.io/etcd/etcdserver/corrupt.go new file mode 100644 index 000000000..2351eef44 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/corrupt.go @@ -0,0 +1,412 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "context" + "fmt" + "time" + + "go.etcd.io/etcd/clientv3" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc" + "go.etcd.io/etcd/pkg/traceutil" + "go.etcd.io/etcd/pkg/types" + + "go.uber.org/zap" +) + +// CheckInitialHashKV compares initial hash values with its peers +// before serving any peer/client traffic. Only mismatch when hashes +// are different at requested revision, with same compact revision. +func (s *EtcdServer) CheckInitialHashKV() error { + if !s.Cfg.InitialCorruptCheck { + return nil + } + + lg := s.getLogger() + + if lg != nil { + lg.Info( + "starting initial corruption check", + zap.String("local-member-id", s.ID().String()), + zap.Duration("timeout", s.Cfg.ReqTimeout()), + ) + } else { + plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout()) + } + + h, rev, crev, err := s.kv.HashByRev(0) + if err != nil { + return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err) + } + peers := s.getPeerHashKVs(rev) + mismatch := 0 + for _, p := range peers { + if p.resp != nil { + peerID := types.ID(p.resp.Header.MemberId) + fields := []zap.Field{ + zap.String("local-member-id", s.ID().String()), + zap.Int64("local-member-revision", rev), + zap.Int64("local-member-compact-revision", crev), + zap.Uint32("local-member-hash", h), + zap.String("remote-peer-id", peerID.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Int64("remote-peer-revision", p.resp.Header.Revision), + zap.Int64("remote-peer-compact-revision", p.resp.CompactRevision), + zap.Uint32("remote-peer-hash", p.resp.Hash), + } + + if h != p.resp.Hash { + if crev == p.resp.CompactRevision { + if lg != nil { + lg.Warn("found different hash values from remote peer", fields...) + } else { + plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev) + } + mismatch++ + } else { + if lg != nil { + lg.Warn("found different compact revision values from remote peer", fields...) + } else { + plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev) + } + } + } + + continue + } + + if p.err != nil { + switch p.err { + case rpctypes.ErrFutureRev: + if lg != nil { + lg.Warn( + "cannot fetch hash from slow remote peer", + zap.String("local-member-id", s.ID().String()), + zap.Int64("local-member-revision", rev), + zap.Int64("local-member-compact-revision", crev), + zap.Uint32("local-member-hash", h), + zap.String("remote-peer-id", p.id.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Error(err), + ) + } else { + plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error()) + } + case rpctypes.ErrCompacted: + if lg != nil { + lg.Warn( + "cannot fetch hash from remote peer; local member is behind", + zap.String("local-member-id", s.ID().String()), + zap.Int64("local-member-revision", rev), + zap.Int64("local-member-compact-revision", crev), + zap.Uint32("local-member-hash", h), + zap.String("remote-peer-id", p.id.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Error(err), + ) + } else { + plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error()) + } + } + } + } + if mismatch > 0 { + return fmt.Errorf("%s found data inconsistency with peers", s.ID()) + } + + if lg != nil { + lg.Info( + "initial corruption checking passed; no corruption", + zap.String("local-member-id", s.ID().String()), + ) + } else { + plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID()) + } + return nil +} + +func (s *EtcdServer) monitorKVHash() { + t := s.Cfg.CorruptCheckTime + if t == 0 { + return + } + + lg := s.getLogger() + if lg != nil { + lg.Info( + "enabled corruption checking", + zap.String("local-member-id", s.ID().String()), + zap.Duration("interval", t), + ) + } else { + plog.Infof("enabled corruption checking with %s interval", t) + } + + for { + select { + case <-s.stopping: + return + case <-time.After(t): + } + if !s.isLeader() { + continue + } + if err := s.checkHashKV(); err != nil { + if lg != nil { + lg.Warn("failed to check hash KV", zap.Error(err)) + } else { + plog.Debugf("check hash kv failed %v", err) + } + } + } +} + +func (s *EtcdServer) checkHashKV() error { + lg := s.getLogger() + + h, rev, crev, err := s.kv.HashByRev(0) + if err != nil { + return err + } + peers := s.getPeerHashKVs(rev) + + ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + err = s.linearizableReadNotify(ctx) + cancel() + if err != nil { + return err + } + + h2, rev2, crev2, err := s.kv.HashByRev(0) + if err != nil { + return err + } + + alarmed := false + mismatch := func(id uint64) { + if alarmed { + return + } + alarmed = true + a := &pb.AlarmRequest{ + MemberID: id, + Action: pb.AlarmRequest_ACTIVATE, + Alarm: pb.AlarmType_CORRUPT, + } + s.goAttach(func() { + s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) + }) + } + + if h2 != h && rev2 == rev && crev == crev2 { + if lg != nil { + lg.Warn( + "found hash mismatch", + zap.Int64("revision-1", rev), + zap.Int64("compact-revision-1", crev), + zap.Uint32("hash-1", h), + zap.Int64("revision-2", rev2), + zap.Int64("compact-revision-2", crev2), + zap.Uint32("hash-2", h2), + ) + } else { + plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev) + } + mismatch(uint64(s.ID())) + } + + for _, p := range peers { + if p.resp == nil { + continue + } + id := p.resp.Header.MemberId + + // leader expects follower's latest revision less than or equal to leader's + if p.resp.Header.Revision > rev2 { + if lg != nil { + lg.Warn( + "revision from follower must be less than or equal to leader's", + zap.Int64("leader-revision", rev2), + zap.Int64("follower-revision", p.resp.Header.Revision), + zap.String("follower-peer-id", types.ID(id).String()), + ) + } else { + plog.Warningf( + "revision %d from member %v, expected at most %d", + p.resp.Header.Revision, + types.ID(id), + rev2) + } + mismatch(id) + } + + // leader expects follower's latest compact revision less than or equal to leader's + if p.resp.CompactRevision > crev2 { + if lg != nil { + lg.Warn( + "compact revision from follower must be less than or equal to leader's", + zap.Int64("leader-compact-revision", crev2), + zap.Int64("follower-compact-revision", p.resp.CompactRevision), + zap.String("follower-peer-id", types.ID(id).String()), + ) + } else { + plog.Warningf( + "compact revision %d from member %v, expected at most %d", + p.resp.CompactRevision, + types.ID(id), + crev2, + ) + } + mismatch(id) + } + + // follower's compact revision is leader's old one, then hashes must match + if p.resp.CompactRevision == crev && p.resp.Hash != h { + if lg != nil { + lg.Warn( + "same compact revision then hashes must match", + zap.Int64("leader-compact-revision", crev2), + zap.Uint32("leader-hash", h), + zap.Int64("follower-compact-revision", p.resp.CompactRevision), + zap.Uint32("follower-hash", p.resp.Hash), + zap.String("follower-peer-id", types.ID(id).String()), + ) + } else { + plog.Warningf( + "hash %d at revision %d from member %v, expected hash %d", + p.resp.Hash, + rev, + types.ID(id), + h, + ) + } + mismatch(id) + } + } + return nil +} + +type peerHashKVResp struct { + id types.ID + eps []string + + resp *clientv3.HashKVResponse + err error +} + +func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) { + // TODO: handle the case when "s.cluster.Members" have not + // been populated (e.g. no snapshot to load from disk) + mbs := s.cluster.Members() + pss := make([]peerHashKVResp, len(mbs)) + for _, m := range mbs { + if m.ID == s.ID() { + continue + } + pss = append(pss, peerHashKVResp{id: m.ID, eps: m.PeerURLs}) + } + + lg := s.getLogger() + + for _, p := range pss { + if len(p.eps) == 0 { + continue + } + cli, cerr := clientv3.New(clientv3.Config{ + DialTimeout: s.Cfg.ReqTimeout(), + Endpoints: p.eps, + }) + if cerr != nil { + if lg != nil { + lg.Warn( + "failed to create client to peer URL", + zap.String("local-member-id", s.ID().String()), + zap.String("remote-peer-id", p.id.String()), + zap.Strings("remote-peer-endpoints", p.eps), + zap.Error(cerr), + ) + } else { + plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), p.eps, cerr.Error()) + } + continue + } + + respsLen := len(resps) + for _, c := range cli.Endpoints() { + ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + var resp *clientv3.HashKVResponse + resp, cerr = cli.HashKV(ctx, c, rev) + cancel() + if cerr == nil { + resps = append(resps, &peerHashKVResp{id: p.id, eps: p.eps, resp: resp, err: nil}) + break + } + if lg != nil { + lg.Warn( + "failed hash kv request", + zap.String("local-member-id", s.ID().String()), + zap.Int64("requested-revision", rev), + zap.String("remote-peer-endpoint", c), + zap.Error(cerr), + ) + } else { + plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev) + } + } + cli.Close() + + if respsLen == len(resps) { + resps = append(resps, &peerHashKVResp{id: p.id, eps: p.eps, resp: nil, err: cerr}) + } + } + return resps +} + +type applierV3Corrupt struct { + applierV3 +} + +func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } + +func (a *applierV3Corrupt) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { + return nil, nil, ErrCorrupt +} + +func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) { + return nil, ErrCorrupt +} + +func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + return nil, ErrCorrupt +} + +func (a *applierV3Corrupt) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { + return nil, ErrCorrupt +} + +func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { + return nil, nil, nil, ErrCorrupt +} + +func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + return nil, ErrCorrupt +} + +func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + return nil, ErrCorrupt +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/doc.go b/vendor/go.etcd.io/etcd/etcdserver/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/doc.go rename to vendor/go.etcd.io/etcd/etcdserver/doc.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/go.etcd.io/etcd/etcdserver/errors.go similarity index 91% rename from vendor/github.com/coreos/etcd/etcdserver/errors.go rename to vendor/go.etcd.io/etcd/etcdserver/errors.go index 8cec52a17..d0fe28970 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/errors.go +++ b/vendor/go.etcd.io/etcd/etcdserver/errors.go @@ -29,6 +29,7 @@ var ( ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") ErrLeaderChanged = errors.New("etcdserver: leader changed") ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") + ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader") ErrNoLeader = errors.New("etcdserver: no leader") ErrNotLeader = errors.New("etcdserver: not leader") ErrRequestTooLarge = errors.New("etcdserver: request is too large") @@ -37,6 +38,7 @@ var ( ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") ErrKeyNotFound = errors.New("etcdserver: key not found") ErrCorrupt = errors.New("etcdserver: corrupt cluster") + ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee") ) type DiscoveryError struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go similarity index 75% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go rename to vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index 12b676397..9e9b42cea 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -1,16 +1,125 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: etcdserver.proto +/* + Package etcdserverpb is a generated protocol buffer package. + + It is generated from these files: + etcdserver.proto + raft_internal.proto + rpc.proto + + It has these top-level messages: + Request + Metadata + RequestHeader + InternalRaftRequest + EmptyResponse + InternalAuthenticateRequest + ResponseHeader + RangeRequest + RangeResponse + PutRequest + PutResponse + DeleteRangeRequest + DeleteRangeResponse + RequestOp + ResponseOp + Compare + TxnRequest + TxnResponse + CompactionRequest + CompactionResponse + HashRequest + HashKVRequest + HashKVResponse + HashResponse + SnapshotRequest + SnapshotResponse + WatchRequest + WatchCreateRequest + WatchCancelRequest + WatchProgressRequest + WatchResponse + LeaseGrantRequest + LeaseGrantResponse + LeaseRevokeRequest + LeaseRevokeResponse + LeaseCheckpoint + LeaseCheckpointRequest + LeaseCheckpointResponse + LeaseKeepAliveRequest + LeaseKeepAliveResponse + LeaseTimeToLiveRequest + LeaseTimeToLiveResponse + LeaseLeasesRequest + LeaseStatus + LeaseLeasesResponse + Member + MemberAddRequest + MemberAddResponse + MemberRemoveRequest + MemberRemoveResponse + MemberUpdateRequest + MemberUpdateResponse + MemberListRequest + MemberListResponse + MemberPromoteRequest + MemberPromoteResponse + DefragmentRequest + DefragmentResponse + MoveLeaderRequest + MoveLeaderResponse + AlarmRequest + AlarmMember + AlarmResponse + StatusRequest + StatusResponse + AuthEnableRequest + AuthDisableRequest + AuthenticateRequest + AuthUserAddRequest + AuthUserGetRequest + AuthUserDeleteRequest + AuthUserChangePasswordRequest + AuthUserGrantRoleRequest + AuthUserRevokeRoleRequest + AuthRoleAddRequest + AuthRoleGetRequest + AuthUserListRequest + AuthRoleListRequest + AuthRoleDeleteRequest + AuthRoleGrantPermissionRequest + AuthRoleRevokePermissionRequest + AuthEnableResponse + AuthDisableResponse + AuthenticateResponse + AuthUserAddResponse + AuthUserGetResponse + AuthUserDeleteResponse + AuthUserChangePasswordResponse + AuthUserGrantRoleResponse + AuthUserRevokeRoleResponse + AuthRoleAddResponse + AuthRoleGetResponse + AuthRoleListResponse + AuthUserListResponse + AuthRoleDeleteResponse + AuthRoleGrantPermissionResponse + AuthRoleRevokePermissionResponse +*/ package etcdserverpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -25,141 +134,50 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Request struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` - Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` - Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` - Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` - PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` - PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` - PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` - Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` - Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` - Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` - Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` - Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` - Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` - Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` - Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` - Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_09ffbeb3bebbce7e, []int{0} -} -func (m *Request) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Request.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(m, src) -} -func (m *Request) XXX_Size() int { - return m.Size() -} -func (m *Request) XXX_DiscardUnknown() { - xxx_messageInfo_Request.DiscardUnknown(m) + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"` + Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"` + Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"` + Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"` + PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"` + PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"` + PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"` + Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"` + Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"` + Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"` + Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"` + Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"` + Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"` + Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"` + Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"` + Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_Request proto.InternalMessageInfo +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} } type Metadata struct { - NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` - ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (m *Metadata) String() string { return proto.CompactTextString(m) } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_09ffbeb3bebbce7e, []int{1} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return m.Size() -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) + NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"` + ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_Metadata proto.InternalMessageInfo +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} } func init() { proto.RegisterType((*Request)(nil), "etcdserverpb.Request") proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata") } - -func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) } - -var fileDescriptor_09ffbeb3bebbce7e = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, - 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, - 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, - 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, - 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, - 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, - 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, - 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, - 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, - 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, - 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, - 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, - 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, - 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, - 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, - 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, - 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, - 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, - 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, - 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, - 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, - 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, - 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, - 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, -} - func (m *Request) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -167,133 +185,123 @@ func (m *Request) Marshal() (dAtA []byte, err error) { } func (m *Request) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x12 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) + i += copy(dAtA[i:], m.Method) + dAtA[i] = 0x1a + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x22 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) + i += copy(dAtA[i:], m.Val) + dAtA[i] = 0x28 + i++ + if m.Dir { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.Refresh != nil { - i-- - if *m.Refresh { + i++ + dAtA[i] = 0x32 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) + i += copy(dAtA[i:], m.PrevValue) + dAtA[i] = 0x38 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) + if m.PrevExist != nil { + dAtA[i] = 0x40 + i++ + if *m.PrevExist { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 + i++ } - i-- - if m.Stream { + dAtA[i] = 0x48 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) + dAtA[i] = 0x50 + i++ + if m.Wait { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) - i-- - dAtA[i] = 0x78 - i-- - if m.Quorum { + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) + dAtA[i] = 0x60 + i++ + if m.Recursive { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x70 - i-- + i++ + dAtA[i] = 0x68 + i++ if m.Sorted { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x68 - i-- - if m.Recursive { + i++ + dAtA[i] = 0x70 + i++ + if m.Quorum { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x60 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since)) - i-- - dAtA[i] = 0x58 - i-- - if m.Wait { + i++ + dAtA[i] = 0x78 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time)) + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if m.Stream { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x50 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration)) - i-- - dAtA[i] = 0x48 - if m.PrevExist != nil { - i-- - if *m.PrevExist { + i++ + if m.Refresh != nil { + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + if *m.Refresh { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x40 + i++ } - i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex)) - i-- - dAtA[i] = 0x38 - i -= len(m.PrevValue) - copy(dAtA[i:], m.PrevValue) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue))) - i-- - dAtA[i] = 0x32 - i-- - if m.Dir { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - i-- - dAtA[i] = 0x28 - i -= len(m.Val) - copy(dAtA[i:], m.Val) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val))) - i-- - dAtA[i] = 0x22 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x1a - i -= len(m.Method) - copy(dAtA[i:], m.Method) - i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method))) - i-- - dAtA[i] = 0x12 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *Metadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -301,43 +309,32 @@ func (m *Metadata) Marshal() (dAtA []byte, err error) { } func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID)) - i-- - dAtA[i] = 0x10 - i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int { - offset -= sovEtcdserver(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Request) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovEtcdserver(uint64(m.ID)) @@ -372,9 +369,6 @@ func (m *Request) Size() (n int) { } func (m *Metadata) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovEtcdserver(uint64(m.NodeID)) @@ -386,7 +380,14 @@ func (m *Metadata) Size() (n int) { } func sovEtcdserver(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozEtcdserver(x uint64) (n int) { return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -406,7 +407,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -434,7 +435,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -453,7 +454,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -463,9 +464,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEtcdserver } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -485,7 +483,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -495,9 +493,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEtcdserver } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -517,7 +512,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -527,9 +522,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEtcdserver } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -549,7 +541,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -569,7 +561,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -579,9 +571,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { return ErrInvalidLengthEtcdserver } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEtcdserver - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -601,7 +590,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PrevIndex |= uint64(b&0x7F) << shift + m.PrevIndex |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -620,7 +609,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -641,7 +630,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Expiration |= int64(b&0x7F) << shift + m.Expiration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -660,7 +649,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -680,7 +669,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Since |= uint64(b&0x7F) << shift + m.Since |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -699,7 +688,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -719,7 +708,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -739,7 +728,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -759,7 +748,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Time |= int64(b&0x7F) << shift + m.Time |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -778,7 +767,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -798,7 +787,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -814,9 +803,6 @@ func (m *Request) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthEtcdserver } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthEtcdserver - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -845,7 +831,7 @@ func (m *Metadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -873,7 +859,7 @@ func (m *Metadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NodeID |= uint64(b&0x7F) << shift + m.NodeID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -892,7 +878,7 @@ func (m *Metadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ClusterID |= uint64(b&0x7F) << shift + m.ClusterID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -906,9 +892,6 @@ func (m *Metadata) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthEtcdserver } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthEtcdserver - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -976,11 +959,8 @@ func skipEtcdserver(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthEtcdserver - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthEtcdserver } return iNdEx, nil @@ -1011,9 +991,6 @@ func skipEtcdserver(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthEtcdserver - } } return iNdEx, nil case 4: @@ -1032,3 +1009,33 @@ var ( ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } + +var fileDescriptorEtcdserver = []byte{ + // 380 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, + 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, + 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, + 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79, + 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d, + 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a, + 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89, + 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93, + 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe, + 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c, + 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70, + 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab, + 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11, + 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7, + 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89, + 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82, + 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6, + 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63, + 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6, + 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff, + 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea, + 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f, + 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto rename to vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go similarity index 86% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go rename to vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go index c50525ba3..904c32187 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go @@ -9,7 +9,7 @@ It translates gRPC into RESTful JSON APIs. package gw import ( - "github.com/coreos/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/etcdserver/etcdserverpb" "io" "net/http" @@ -33,7 +33,7 @@ func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client var protoReq etcdserverpb.RangeRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -46,7 +46,7 @@ func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client e var protoReq etcdserverpb.PutRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -59,7 +59,7 @@ func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq etcdserverpb.DeleteRangeRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -72,7 +72,7 @@ func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client e var protoReq etcdserverpb.TxnRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -85,7 +85,7 @@ func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, clie var protoReq etcdserverpb.CompactionRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -104,7 +104,7 @@ func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, cli dec := marshaler.NewDecoder(req.Body) handleSend := func() error { var protoReq etcdserverpb.WatchRequest - err = dec.Decode(&protoReq) + err := dec.Decode(&protoReq) if err == io.EOF { return err } @@ -112,7 +112,7 @@ func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, cli grpclog.Printf("Failed to decode request: %v", err) return err } - if err = stream.Send(&protoReq); err != nil { + if err := stream.Send(&protoReq); err != nil { grpclog.Printf("Failed to send request: %v", err) return err } @@ -150,7 +150,7 @@ func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler var protoReq etcdserverpb.LeaseGrantRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -163,7 +163,20 @@ func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.LeaseRevokeRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseRevokeRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -182,7 +195,7 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh dec := marshaler.NewDecoder(req.Body) handleSend := func() error { var protoReq etcdserverpb.LeaseKeepAliveRequest - err = dec.Decode(&protoReq) + err := dec.Decode(&protoReq) if err == io.EOF { return err } @@ -190,7 +203,7 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh grpclog.Printf("Failed to decode request: %v", err) return err } - if err = stream.Send(&protoReq); err != nil { + if err := stream.Send(&protoReq); err != nil { grpclog.Printf("Failed to send request: %v", err) return err } @@ -228,7 +241,20 @@ func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Mars var protoReq etcdserverpb.LeaseTimeToLiveRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseTimeToLiveRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -241,7 +267,20 @@ func request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.LeaseLeasesRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseLeasesRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -254,7 +293,7 @@ func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.MemberAddRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -267,7 +306,7 @@ func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marsh var protoReq etcdserverpb.MemberRemoveRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -280,7 +319,7 @@ func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marsh var protoReq etcdserverpb.MemberUpdateRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -293,7 +332,7 @@ func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.MemberListRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -302,11 +341,24 @@ func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshal } +func request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberPromoteRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.MemberPromote(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq etcdserverpb.AlarmRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -319,7 +371,7 @@ func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.StatusRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -332,7 +384,7 @@ func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Mar var protoReq etcdserverpb.DefragmentRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -345,7 +397,7 @@ func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler var protoReq etcdserverpb.HashRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -358,7 +410,7 @@ func request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.HashKVRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -371,7 +423,7 @@ func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marsh var protoReq etcdserverpb.SnapshotRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -392,7 +444,7 @@ func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Mar var protoReq etcdserverpb.MoveLeaderRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -405,7 +457,7 @@ func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq etcdserverpb.AuthEnableRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -418,7 +470,7 @@ func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler var protoReq etcdserverpb.AuthDisableRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -431,7 +483,7 @@ func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshale var protoReq etcdserverpb.AuthenticateRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -444,7 +496,7 @@ func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl var protoReq etcdserverpb.AuthUserAddRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -457,7 +509,7 @@ func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, cl var protoReq etcdserverpb.AuthUserGetRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -470,7 +522,7 @@ func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, c var protoReq etcdserverpb.AuthUserListRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -483,7 +535,7 @@ func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq etcdserverpb.AuthUserDeleteRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -496,7 +548,7 @@ func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Ma var protoReq etcdserverpb.AuthUserChangePasswordRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -509,7 +561,7 @@ func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshal var protoReq etcdserverpb.AuthUserGrantRoleRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -522,7 +574,7 @@ func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marsha var protoReq etcdserverpb.AuthUserRevokeRoleRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -535,7 +587,7 @@ func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl var protoReq etcdserverpb.AuthRoleAddRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -548,7 +600,7 @@ func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, cl var protoReq etcdserverpb.AuthRoleGetRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -561,7 +613,7 @@ func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, c var protoReq etcdserverpb.AuthRoleListRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -574,7 +626,7 @@ func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, var protoReq etcdserverpb.AuthRoleDeleteRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -587,7 +639,7 @@ func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.M var protoReq etcdserverpb.AuthRoleGrantPermissionRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -600,7 +652,7 @@ func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime. var protoReq etcdserverpb.AuthRoleRevokePermissionRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -796,15 +848,15 @@ func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client } var ( - pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "range"}, "")) + pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "range"}, "")) - pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "put"}, "")) + pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "put"}, "")) - pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "deleterange"}, "")) + pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "deleterange"}, "")) - pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "txn"}, "")) + pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "txn"}, "")) - pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "compaction"}, "")) + pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "compaction"}, "")) ) var ( @@ -890,7 +942,7 @@ func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie } var ( - pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3beta", "watch"}, "")) + pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3", "watch"}, "")) ) var ( @@ -993,6 +1045,35 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("POST", pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lease_LeaseRevoke_1(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lease_LeaseRevoke_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -1051,6 +1132,35 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("POST", pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lease_LeaseTimeToLive_1(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lease_LeaseTimeToLive_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -1080,19 +1190,54 @@ func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie }) + mux.Handle("POST", pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lease_LeaseLeases_1(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lease_LeaseLeases_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } var ( - pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lease", "grant"}, "")) + pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "grant"}, "")) + + pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "revoke"}, "")) - pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "revoke"}, "")) + pattern_Lease_LeaseRevoke_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "revoke"}, "")) - pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lease", "keepalive"}, "")) + pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "keepalive"}, "")) - pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "timetolive"}, "")) + pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "timetolive"}, "")) - pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "leases"}, "")) + pattern_Lease_LeaseTimeToLive_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "timetolive"}, "")) + + pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "leases"}, "")) + + pattern_Lease_LeaseLeases_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "leases"}, "")) ) var ( @@ -1100,11 +1245,17 @@ var ( forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage + forward_Lease_LeaseRevoke_1 = runtime.ForwardResponseMessage + forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage + forward_Lease_LeaseTimeToLive_1 = runtime.ForwardResponseMessage + forward_Lease_LeaseLeases_0 = runtime.ForwardResponseMessage + + forward_Lease_LeaseLeases_1 = runtime.ForwardResponseMessage ) // RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but @@ -1261,17 +1412,48 @@ func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl }) + mux.Handle("POST", pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Cluster_MemberPromote_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Cluster_MemberPromote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } var ( - pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "add"}, "")) + pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "add"}, "")) - pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "remove"}, "")) + pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "remove"}, "")) - pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "update"}, "")) + pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "update"}, "")) - pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "list"}, "")) + pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "list"}, "")) + + pattern_Cluster_MemberPromote_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "promote"}, "")) ) var ( @@ -1282,6 +1464,8 @@ var ( forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage + + forward_Cluster_MemberPromote_0 = runtime.ForwardResponseMessage ) // RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but @@ -1529,19 +1713,19 @@ func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux } var ( - pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "alarm"}, "")) + pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "alarm"}, "")) - pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "status"}, "")) + pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "status"}, "")) - pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "defragment"}, "")) + pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "defragment"}, "")) - pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "hash"}, "")) + pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hash"}, "")) - pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "hash"}, "")) + pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hash"}, "")) - pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "snapshot"}, "")) + pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "snapshot"}, "")) - pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "transfer-leadership"}, "")) + pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "transfer-leadership"}, "")) ) var ( @@ -2066,37 +2250,37 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien } var ( - pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "enable"}, "")) + pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "enable"}, "")) - pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "disable"}, "")) + pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "disable"}, "")) - pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "authenticate"}, "")) + pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "authenticate"}, "")) - pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "add"}, "")) + pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "add"}, "")) - pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "get"}, "")) + pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "get"}, "")) - pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "list"}, "")) + pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "list"}, "")) - pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "delete"}, "")) + pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "delete"}, "")) - pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "changepw"}, "")) + pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "changepw"}, "")) - pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "grant"}, "")) + pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "grant"}, "")) - pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "revoke"}, "")) + pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "revoke"}, "")) - pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "add"}, "")) + pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "add"}, "")) - pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "get"}, "")) + pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "get"}, "")) - pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "list"}, "")) + pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "list"}, "")) - pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "delete"}, "")) + pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "delete"}, "")) - pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "grant"}, "")) + pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "grant"}, "")) - pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "revoke"}, "")) + pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "revoke"}, "")) ) var ( diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go similarity index 63% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go rename to vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go index b3a199e9c..b170499e4 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -4,13 +4,15 @@ package etcdserverpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -18,162 +20,64 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - type RequestHeader struct { ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` // username is a username that is associated with an auth token of gRPC connection Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` // auth_revision is a revision number of auth.authStore. It is not related to mvcc - AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestHeader) Reset() { *m = RequestHeader{} } -func (m *RequestHeader) String() string { return proto.CompactTextString(m) } -func (*RequestHeader) ProtoMessage() {} -func (*RequestHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{0} -} -func (m *RequestHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestHeader.Merge(m, src) -} -func (m *RequestHeader) XXX_Size() int { - return m.Size() -} -func (m *RequestHeader) XXX_DiscardUnknown() { - xxx_messageInfo_RequestHeader.DiscardUnknown(m) + AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"` } -var xxx_messageInfo_RequestHeader proto.InternalMessageInfo +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} } // An InternalRaftRequest is the union of all requests which can be // sent via raft. type InternalRaftRequest struct { - Header *RequestHeader `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"` + Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"` ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - V2 *Request `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"` - Range *RangeRequest `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` - Put *PutRequest `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"` - DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"` - Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"` - Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"` - LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"` - LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"` - Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"` - AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"` - AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"` - Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"` - AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"` - AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"` - AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"` - AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"` - AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"` - AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"` - AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"` - AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"` - AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"` - AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"` - AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"` - AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"` - AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } -func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } -func (*InternalRaftRequest) ProtoMessage() {} -func (*InternalRaftRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{1} -} -func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InternalRaftRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InternalRaftRequest.Merge(m, src) -} -func (m *InternalRaftRequest) XXX_Size() int { - return m.Size() -} -func (m *InternalRaftRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m) + V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"` + Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"` + Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"` + DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"` + Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"` + Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"` + LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"` + LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"` + Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"` + LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint" json:"lease_checkpoint,omitempty"` + AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"` + AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"` + Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"` + AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"` + AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"` + AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"` + AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"` + AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"` + AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"` + AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"` + AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"` + AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"` + AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"` + AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"` + AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"` + AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"` } -var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} } type EmptyResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` } -func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } -func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } -func (*EmptyResponse) ProtoMessage() {} -func (*EmptyResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{2} -} -func (m *EmptyResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EmptyResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_EmptyResponse.Merge(m, src) -} -func (m *EmptyResponse) XXX_Size() int { - return m.Size() -} -func (m *EmptyResponse) XXX_DiscardUnknown() { - xxx_messageInfo_EmptyResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo +func (m *EmptyResponse) Reset() { *m = EmptyResponse{} } +func (m *EmptyResponse) String() string { return proto.CompactTextString(m) } +func (*EmptyResponse) ProtoMessage() {} +func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} } // What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest? // InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. @@ -182,115 +86,26 @@ type InternalAuthenticateRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` // simple_token is generated in API layer (etcdserver/v3_server.go) - SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"` } func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} } func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) } func (*InternalAuthenticateRequest) ProtoMessage() {} func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b4c9a9be0cfca103, []int{3} -} -func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src) -} -func (m *InternalAuthenticateRequest) XXX_Size() int { - return m.Size() -} -func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m) + return fileDescriptorRaftInternal, []int{3} } -var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo - func init() { proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader") proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest") proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse") proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest") } - -func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) } - -var fileDescriptor_b4c9a9be0cfca103 = []byte{ - // 840 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdb, 0x4e, 0xdb, 0x48, - 0x18, 0xc7, 0x71, 0x38, 0x66, 0x12, 0xb2, 0xec, 0x00, 0xbb, 0xb3, 0x41, 0xca, 0x86, 0xa0, 0xdd, - 0x65, 0x77, 0x5b, 0x5a, 0x85, 0x07, 0x68, 0x53, 0x82, 0x00, 0x09, 0x21, 0x64, 0x51, 0xa9, 0x52, - 0x2f, 0xdc, 0x21, 0xfe, 0x48, 0x5c, 0x1c, 0xdb, 0x1d, 0x4f, 0x52, 0xfa, 0x26, 0x7d, 0x8c, 0x9e, - 0x1e, 0x82, 0x8b, 0x1e, 0x68, 0xfb, 0x02, 0x2d, 0xbd, 0xe9, 0x55, 0x6f, 0xda, 0x07, 0xa8, 0xe6, - 0x60, 0x3b, 0x4e, 0x1c, 0xee, 0xec, 0x6f, 0xfe, 0xdf, 0xef, 0xfb, 0x0f, 0xf3, 0x37, 0x13, 0xb4, - 0xc8, 0xe8, 0x09, 0xb7, 0x1c, 0x8f, 0x03, 0xf3, 0xa8, 0xbb, 0x11, 0x30, 0x9f, 0xfb, 0xb8, 0x08, - 0xbc, 0x65, 0x87, 0xc0, 0xfa, 0xc0, 0x82, 0xe3, 0xf2, 0x52, 0xdb, 0x6f, 0xfb, 0x72, 0xe1, 0x86, - 0x78, 0x52, 0x9a, 0xf2, 0x42, 0xa2, 0xd1, 0x95, 0x3c, 0x0b, 0x5a, 0xea, 0xb1, 0xf6, 0x00, 0xcd, - 0x9b, 0xf0, 0xa8, 0x07, 0x21, 0xdf, 0x05, 0x6a, 0x03, 0xc3, 0x25, 0x94, 0xdb, 0x6b, 0x12, 0xa3, - 0x6a, 0xac, 0x4f, 0x99, 0xb9, 0xbd, 0x26, 0x2e, 0xa3, 0xb9, 0x5e, 0x28, 0x46, 0x76, 0x81, 0xe4, - 0xaa, 0xc6, 0x7a, 0xde, 0x8c, 0xdf, 0xf1, 0x1a, 0x9a, 0xa7, 0x3d, 0xde, 0xb1, 0x18, 0xf4, 0x9d, - 0xd0, 0xf1, 0x3d, 0x32, 0x29, 0xdb, 0x8a, 0xa2, 0x68, 0xea, 0x5a, 0xed, 0x5b, 0x09, 0x2d, 0xee, - 0x69, 0xd7, 0x26, 0x3d, 0xe1, 0x7a, 0x1c, 0xde, 0x44, 0x33, 0x1d, 0x39, 0x92, 0xd8, 0x55, 0x63, - 0xbd, 0x50, 0x5f, 0xd9, 0x18, 0xdc, 0xcb, 0x46, 0xca, 0x95, 0xa9, 0xa5, 0x23, 0xee, 0xfe, 0x42, - 0xb9, 0x7e, 0x5d, 0xfa, 0x2a, 0xd4, 0x97, 0x33, 0x01, 0x66, 0xae, 0x5f, 0xc7, 0x37, 0xd1, 0x34, - 0xa3, 0x5e, 0x1b, 0xa4, 0xc1, 0x42, 0xbd, 0x3c, 0xa4, 0x14, 0x4b, 0x91, 0x5c, 0x09, 0xf1, 0x7f, - 0x68, 0x32, 0xe8, 0x71, 0x32, 0x25, 0xf5, 0x24, 0xad, 0x3f, 0xec, 0x45, 0x9b, 0x30, 0x85, 0x08, - 0x6f, 0xa1, 0xa2, 0x0d, 0x2e, 0x70, 0xb0, 0xd4, 0x90, 0x69, 0xd9, 0x54, 0x4d, 0x37, 0x35, 0xa5, - 0x22, 0x35, 0xaa, 0x60, 0x27, 0x35, 0x31, 0x90, 0x9f, 0x79, 0x64, 0x26, 0x6b, 0xe0, 0xd1, 0x99, - 0x17, 0x0f, 0xe4, 0x67, 0x1e, 0xbe, 0x85, 0x50, 0xcb, 0xef, 0x06, 0xb4, 0xc5, 0xc5, 0x1f, 0x7d, - 0x56, 0xb6, 0xfc, 0x99, 0x6e, 0xd9, 0x8a, 0xd7, 0xa3, 0xce, 0x81, 0x16, 0x7c, 0x1b, 0x15, 0x5c, - 0xa0, 0x21, 0x58, 0x6d, 0x46, 0x3d, 0x4e, 0xe6, 0xb2, 0x08, 0xfb, 0x42, 0xb0, 0x23, 0xd6, 0x63, - 0x82, 0x1b, 0x97, 0xc4, 0x9e, 0x15, 0x81, 0x41, 0xdf, 0x3f, 0x05, 0x92, 0xcf, 0xda, 0xb3, 0x44, - 0x98, 0x52, 0x10, 0xef, 0xd9, 0x4d, 0x6a, 0xe2, 0x58, 0xa8, 0x4b, 0x59, 0x97, 0xa0, 0xac, 0x63, - 0x69, 0x88, 0xa5, 0xf8, 0x58, 0xa4, 0x10, 0x37, 0x50, 0x41, 0x26, 0x0e, 0x3c, 0x7a, 0xec, 0x02, - 0xf9, 0x9a, 0xb9, 0xf7, 0x46, 0x8f, 0x77, 0xb6, 0xa5, 0x20, 0x76, 0x4e, 0xe3, 0x12, 0x6e, 0x22, - 0x99, 0x4f, 0xcb, 0x76, 0x42, 0xc9, 0xf8, 0x3e, 0x9b, 0x65, 0x5d, 0x30, 0x9a, 0x4a, 0x11, 0x5b, - 0xa7, 0x49, 0x0d, 0x1f, 0x28, 0x0a, 0x78, 0xdc, 0x69, 0x51, 0x0e, 0xe4, 0x87, 0xa2, 0xfc, 0x9b, - 0xa6, 0x44, 0xb9, 0x6f, 0x0c, 0x48, 0x23, 0x5c, 0xaa, 0x1f, 0x6f, 0xeb, 0x4f, 0x49, 0x7c, 0x5b, - 0x16, 0xb5, 0x6d, 0xf2, 0x7a, 0x6e, 0x9c, 0xad, 0xbb, 0x21, 0xb0, 0x86, 0x6d, 0xa7, 0x6c, 0xe9, - 0x1a, 0x3e, 0x40, 0x0b, 0x09, 0x46, 0xc5, 0x8b, 0xbc, 0x51, 0xa4, 0xb5, 0x6c, 0x92, 0xce, 0xa5, - 0x86, 0x95, 0x68, 0xaa, 0x9c, 0xb6, 0xd5, 0x06, 0x4e, 0xde, 0x5e, 0x69, 0x6b, 0x07, 0xf8, 0x88, - 0xad, 0x1d, 0xe0, 0xb8, 0x8d, 0xfe, 0x48, 0x30, 0xad, 0x8e, 0x08, 0xbc, 0x15, 0xd0, 0x30, 0x7c, - 0xec, 0x33, 0x9b, 0xbc, 0x53, 0xc8, 0xff, 0xb3, 0x91, 0x5b, 0x52, 0x7d, 0xa8, 0xc5, 0x11, 0xfd, - 0x37, 0x9a, 0xb9, 0x8c, 0xef, 0xa1, 0xa5, 0x01, 0xbf, 0x22, 0xa9, 0x16, 0xf3, 0x5d, 0x20, 0x17, - 0x6a, 0xc6, 0xdf, 0x63, 0x6c, 0xcb, 0x94, 0xfb, 0xc9, 0x51, 0xff, 0x4a, 0x87, 0x57, 0xf0, 0x7d, - 0xb4, 0x9c, 0x90, 0x55, 0xe8, 0x15, 0xfa, 0xbd, 0x42, 0xff, 0x93, 0x8d, 0xd6, 0xe9, 0x1f, 0x60, - 0x63, 0x3a, 0xb2, 0x84, 0x77, 0x51, 0x29, 0x81, 0xbb, 0x4e, 0xc8, 0xc9, 0x07, 0x45, 0x5d, 0xcd, - 0xa6, 0xee, 0x3b, 0x21, 0x4f, 0xe5, 0x28, 0x2a, 0xc6, 0x24, 0x61, 0x4d, 0x91, 0x3e, 0x8e, 0x25, - 0x89, 0xd1, 0x23, 0xa4, 0xa8, 0x18, 0x1f, 0xbd, 0x24, 0x89, 0x44, 0x3e, 0xcb, 0x8f, 0x3b, 0x7a, - 0xd1, 0x33, 0x9c, 0x48, 0x5d, 0x8b, 0x13, 0x29, 0x31, 0x3a, 0x91, 0xcf, 0xf3, 0xe3, 0x12, 0x29, - 0xba, 0x32, 0x12, 0x99, 0x94, 0xd3, 0xb6, 0x44, 0x22, 0x5f, 0x5c, 0x69, 0x6b, 0x38, 0x91, 0xba, - 0x86, 0x1f, 0xa2, 0xf2, 0x00, 0x46, 0x06, 0x25, 0x00, 0xd6, 0x75, 0x42, 0x79, 0x8f, 0xbd, 0x54, - 0xcc, 0x6b, 0x63, 0x98, 0x42, 0x7e, 0x18, 0xab, 0x23, 0xfe, 0xef, 0x34, 0x7b, 0x1d, 0x77, 0xd1, - 0x4a, 0x32, 0x4b, 0x47, 0x67, 0x60, 0xd8, 0x2b, 0x35, 0xec, 0x7a, 0xf6, 0x30, 0x95, 0x92, 0xd1, - 0x69, 0x84, 0x8e, 0x11, 0xd4, 0x7e, 0x41, 0xf3, 0xdb, 0xdd, 0x80, 0x3f, 0x31, 0x21, 0x0c, 0x7c, - 0x2f, 0x84, 0x5a, 0x80, 0x56, 0xae, 0xf8, 0x47, 0x84, 0x31, 0x9a, 0x92, 0xb7, 0xbb, 0x21, 0x6f, - 0x77, 0xf9, 0x2c, 0x6e, 0xfd, 0xf8, 0xfb, 0xd4, 0xb7, 0x7e, 0xf4, 0x8e, 0x57, 0x51, 0x31, 0x74, - 0xba, 0x81, 0x0b, 0x16, 0xf7, 0x4f, 0x41, 0x5d, 0xfa, 0x79, 0xb3, 0xa0, 0x6a, 0x47, 0xa2, 0x74, - 0x67, 0xe9, 0xfc, 0x73, 0x65, 0xe2, 0xfc, 0xb2, 0x62, 0x5c, 0x5c, 0x56, 0x8c, 0x4f, 0x97, 0x15, - 0xe3, 0xe9, 0x97, 0xca, 0xc4, 0xf1, 0x8c, 0xfc, 0xc9, 0xb1, 0xf9, 0x33, 0x00, 0x00, 0xff, 0xff, - 0xa0, 0xbb, 0x20, 0x2c, 0xca, 0x08, 0x00, 0x00, -} - func (m *RequestHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -298,43 +113,33 @@ func (m *RequestHeader) Marshal() (dAtA []byte, err error) { } func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.AuthRevision != 0 { - i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) - i-- - dAtA[i] = 0x18 + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) } if len(m.Username) > 0 { - i -= len(m.Username) - copy(dAtA[i:], m.Username) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) } - if m.ID != 0 { - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 + if m.AuthRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision)) } - return len(dAtA) - i, nil + return i, nil } func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -342,377 +147,326 @@ func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { } func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) } - if m.AuthRoleRevokePermission != nil { - { - size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.V2 != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size())) + n1, err := m.V2.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0xa2 + i += n1 } - if m.AuthRoleGrantPermission != nil { - { - size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Range != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size())) + n2, err := m.Range.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x9a + i += n2 } - if m.AuthRoleGet != nil { - { - size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Put != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size())) + n3, err := m.Put.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x92 + i += n3 } - if m.AuthRoleDelete != nil { - { - size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.DeleteRange != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size())) + n4, err := m.DeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x8a + i += n4 } - if m.AuthRoleAdd != nil { - { - size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Txn != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size())) + n5, err := m.Txn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4b - i-- - dAtA[i] = 0x82 + i += n5 } - if m.AuthRoleList != nil { - { - size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Compaction != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size())) + n6, err := m.Compaction.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x9a + i += n6 } - if m.AuthUserList != nil { - { - size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.LeaseGrant != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size())) + n7, err := m.LeaseGrant.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x92 + i += n7 } - if m.AuthUserRevokeRole != nil { - { - size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.LeaseRevoke != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size())) + n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x8a + i += n8 } - if m.AuthUserGrantRole != nil { - { - size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Alarm != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size())) + n9, err := m.Alarm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x45 - i-- - dAtA[i] = 0x82 + i += n9 } - if m.AuthUserChangePassword != nil { - { - size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.LeaseCheckpoint != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseCheckpoint.Size())) + n10, err := m.LeaseCheckpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xfa + i += n10 } - if m.AuthUserGet != nil { - { - size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.Header != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x6 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size())) + n11, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xf2 + i += n11 } - if m.AuthUserDelete != nil { - { - size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthEnable != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x3e + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size())) + n12, err := m.AuthEnable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xea + i += n12 } - if m.AuthUserAdd != nil { - { - size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthDisable != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size())) + n13, err := m.AuthDisable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x44 - i-- - dAtA[i] = 0xe2 + i += n13 } if m.Authenticate != nil { - { - size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3f - i-- dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x3f + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size())) + n14, err := m.Authenticate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 } - if m.AuthDisable != nil { - { - size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserAdd != nil { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size())) + n15, err := m.AuthUserAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3f - i-- - dAtA[i] = 0x9a + i += n15 } - if m.AuthEnable != nil { - { - size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserDelete != nil { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size())) + n16, err := m.AuthUserDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3e - i-- - dAtA[i] = 0xc2 + i += n16 } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserGet != nil { + dAtA[i] = 0xf2 + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size())) + n17, err := m.AuthUserGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xa2 + i += n17 } - if m.Alarm != nil { - { - size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserChangePassword != nil { + dAtA[i] = 0xfa + i++ + dAtA[i] = 0x44 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size())) + n18, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x52 + i += n18 } - if m.LeaseRevoke != nil { - { - size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserGrantRole != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size())) + n19, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4a + i += n19 } - if m.LeaseGrant != nil { - { - size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserRevokeRole != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size())) + n20, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x42 + i += n20 } - if m.Compaction != nil { - { - size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthUserList != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size())) + n21, err := m.AuthUserList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x3a + i += n21 } - if m.Txn != nil { - { - size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthRoleList != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x45 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size())) + n22, err := m.AuthRoleList.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x32 + i += n22 } - if m.DeleteRange != nil { - { - size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthRoleAdd != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size())) + n23, err := m.AuthRoleAdd.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a + i += n23 } - if m.Put != nil { - { - size, err := m.Put.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthRoleDelete != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size())) + n24, err := m.AuthRoleDelete.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n24 } - if m.Range != nil { - { - size, err := m.Range.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthRoleGet != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size())) + n25, err := m.AuthRoleGet.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n25 } - if m.V2 != nil { - { - size, err := m.V2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaftInternal(dAtA, i, uint64(size)) + if m.AuthRoleGrantPermission != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size())) + n26, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n26 } - if m.ID != 0 { - i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 + if m.AuthRoleRevokePermission != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x4b + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size())) + n27, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 } - return len(dAtA) - i, nil + return i, nil } func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -720,26 +474,17 @@ func (m *EmptyResponse) Marshal() (dAtA []byte, err error) { } func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -747,58 +492,41 @@ func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) { } func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.SimpleToken) > 0 { - i -= len(m.SimpleToken) - copy(dAtA[i:], m.SimpleToken) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) - i-- - dAtA[i] = 0x1a + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.SimpleToken) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken))) + i += copy(dAtA[i:], m.SimpleToken) } - return len(dAtA) - i, nil + return i, nil } func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int { - offset -= sovRaftInternal(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *RequestHeader) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -811,16 +539,10 @@ func (m *RequestHeader) Size() (n int) { if m.AuthRevision != 0 { n += 1 + sovRaftInternal(uint64(m.AuthRevision)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *InternalRaftRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -862,6 +584,10 @@ func (m *InternalRaftRequest) Size() (n int) { l = m.Alarm.Size() n += 1 + l + sovRaftInternal(uint64(l)) } + if m.LeaseCheckpoint != nil { + l = m.LeaseCheckpoint.Size() + n += 1 + l + sovRaftInternal(uint64(l)) + } if m.Header != nil { l = m.Header.Size() n += 2 + l + sovRaftInternal(uint64(l)) @@ -930,28 +656,16 @@ func (m *InternalRaftRequest) Size() (n int) { l = m.AuthRoleRevokePermission.Size() n += 2 + l + sovRaftInternal(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *EmptyResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *InternalAuthenticateRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -966,14 +680,18 @@ func (m *InternalAuthenticateRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRaftInternal(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovRaftInternal(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozRaftInternal(x uint64) (n int) { return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -993,7 +711,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1021,7 +739,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1040,7 +758,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1050,9 +768,6 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1072,7 +787,7 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AuthRevision |= uint64(b&0x7F) << shift + m.AuthRevision |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1086,13 +801,9 @@ func (m *RequestHeader) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaftInternal } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaftInternal - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1117,7 +828,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1145,7 +856,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1164,7 +875,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1173,9 +884,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1200,7 +908,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1209,9 +917,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1236,7 +941,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1245,9 +950,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1272,7 +974,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1281,9 +983,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1308,7 +1007,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1317,9 +1016,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1344,7 +1040,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1353,9 +1049,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1380,7 +1073,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1389,9 +1082,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1416,7 +1106,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1425,9 +1115,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1452,7 +1139,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1461,9 +1148,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1474,9 +1158,9 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 100: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LeaseCheckpoint", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1488,7 +1172,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1497,9 +1181,39 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LeaseCheckpoint == nil { + m.LeaseCheckpoint = &LeaseCheckpointRequest{} + } + if err := m.LeaseCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 100: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaftInternal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthRaftInternal } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } @@ -1524,7 +1238,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1533,9 +1247,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1560,7 +1271,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1569,9 +1280,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1596,7 +1304,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1605,9 +1313,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1632,7 +1337,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1641,9 +1346,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1668,7 +1370,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1677,9 +1379,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1704,7 +1403,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1713,9 +1412,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1740,7 +1436,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1749,9 +1445,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1776,7 +1469,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1785,9 +1478,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1812,7 +1502,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1821,9 +1511,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1848,7 +1535,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1857,9 +1544,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1884,7 +1568,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1893,9 +1577,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1920,7 +1601,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1929,9 +1610,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1956,7 +1634,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1965,9 +1643,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1992,7 +1667,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2001,9 +1676,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2028,7 +1700,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2037,9 +1709,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2064,7 +1733,7 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2073,9 +1742,6 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2095,13 +1761,9 @@ func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaftInternal } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaftInternal - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2126,7 +1788,7 @@ func (m *EmptyResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2149,13 +1811,9 @@ func (m *EmptyResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaftInternal } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaftInternal - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2180,7 +1838,7 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2208,7 +1866,7 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2218,9 +1876,6 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2240,7 +1895,7 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2250,9 +1905,6 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2272,7 +1924,7 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2282,9 +1934,6 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaftInternal } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRaftInternal - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2299,13 +1948,9 @@ func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaftInternal } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaftInternal - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2369,11 +2014,8 @@ func skipRaftInternal(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthRaftInternal - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthRaftInternal } return iNdEx, nil @@ -2404,9 +2046,6 @@ func skipRaftInternal(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRaftInternal - } } return iNdEx, nil case 4: @@ -2425,3 +2064,64 @@ var ( ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) } + +var fileDescriptorRaftInternal = []byte{ + // 867 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xcb, 0x52, 0x33, 0x45, + 0x14, 0xc7, 0x49, 0xb8, 0xa6, 0x13, 0x2e, 0x36, 0xa0, 0x6d, 0xa8, 0x8a, 0x10, 0xbc, 0xe0, 0x0d, + 0xad, 0xf0, 0x00, 0x1a, 0x13, 0x0a, 0xa8, 0xa2, 0x90, 0x9a, 0xc2, 0x2a, 0xab, 0x5c, 0x8c, 0xcd, + 0xcc, 0x21, 0x19, 0x99, 0xcc, 0x8c, 0x3d, 0x9d, 0x88, 0x6f, 0xe2, 0x63, 0x78, 0xdb, 0xbb, 0x65, + 0xe1, 0x05, 0xf5, 0x05, 0x14, 0x37, 0xee, 0xbf, 0xef, 0x01, 0xbe, 0xea, 0xcb, 0xf4, 0x64, 0x92, + 0x0e, 0xbb, 0xc9, 0x39, 0xff, 0xf3, 0xfb, 0x9f, 0x99, 0x3e, 0x07, 0x1a, 0x6d, 0x32, 0x7a, 0xc3, + 0xdd, 0x20, 0xe2, 0xc0, 0x22, 0x1a, 0x1e, 0x26, 0x2c, 0xe6, 0x31, 0xae, 0x01, 0xf7, 0xfc, 0x14, + 0xd8, 0x08, 0x58, 0x72, 0x5d, 0xdf, 0xea, 0xc5, 0xbd, 0x58, 0x26, 0x3e, 0x10, 0x4f, 0x4a, 0x53, + 0xdf, 0xc8, 0x35, 0x3a, 0x52, 0x61, 0x89, 0xa7, 0x1e, 0x9b, 0x5f, 0xa2, 0x55, 0x07, 0xbe, 0x1e, + 0x42, 0xca, 0x4f, 0x81, 0xfa, 0xc0, 0xf0, 0x1a, 0x2a, 0x9f, 0x75, 0x49, 0x69, 0xb7, 0x74, 0xb0, + 0xe0, 0x94, 0xcf, 0xba, 0xb8, 0x8e, 0x56, 0x86, 0xa9, 0xb0, 0x1c, 0x00, 0x29, 0xef, 0x96, 0x0e, + 0x2a, 0x8e, 0xf9, 0x8d, 0xf7, 0xd1, 0x2a, 0x1d, 0xf2, 0xbe, 0xcb, 0x60, 0x14, 0xa4, 0x41, 0x1c, + 0x91, 0x79, 0x59, 0x56, 0x13, 0x41, 0x47, 0xc7, 0x9a, 0xbf, 0xac, 0xa3, 0xcd, 0x33, 0xdd, 0xb5, + 0x43, 0x6f, 0xb8, 0xb6, 0x9b, 0x32, 0x7a, 0x03, 0x95, 0x47, 0x2d, 0x69, 0x51, 0x6d, 0x6d, 0x1f, + 0x8e, 0xbf, 0xd7, 0xa1, 0x2e, 0x71, 0xca, 0xa3, 0x16, 0xfe, 0x10, 0x2d, 0x32, 0x1a, 0xf5, 0x40, + 0x7a, 0x55, 0x5b, 0xf5, 0x09, 0xa5, 0x48, 0x65, 0x72, 0x25, 0xc4, 0xef, 0xa0, 0xf9, 0x64, 0xc8, + 0xc9, 0x82, 0xd4, 0x93, 0xa2, 0xfe, 0x72, 0x98, 0xf5, 0xe3, 0x08, 0x11, 0xee, 0xa0, 0x9a, 0x0f, + 0x21, 0x70, 0x70, 0x95, 0xc9, 0xa2, 0x2c, 0xda, 0x2d, 0x16, 0x75, 0xa5, 0xa2, 0x60, 0x55, 0xf5, + 0xf3, 0x98, 0x30, 0xe4, 0x77, 0x11, 0x59, 0xb2, 0x19, 0x5e, 0xdd, 0x45, 0xc6, 0x90, 0xdf, 0x45, + 0xf8, 0x23, 0x84, 0xbc, 0x78, 0x90, 0x50, 0x8f, 0x8b, 0xef, 0xb7, 0x2c, 0x4b, 0x5e, 0x2b, 0x96, + 0x74, 0x4c, 0x3e, 0xab, 0x1c, 0x2b, 0xc1, 0x1f, 0xa3, 0x6a, 0x08, 0x34, 0x05, 0xb7, 0xc7, 0x68, + 0xc4, 0xc9, 0x8a, 0x8d, 0x70, 0x2e, 0x04, 0x27, 0x22, 0x6f, 0x08, 0xa1, 0x09, 0x89, 0x77, 0x56, + 0x04, 0x06, 0xa3, 0xf8, 0x16, 0x48, 0xc5, 0xf6, 0xce, 0x12, 0xe1, 0x48, 0x81, 0x79, 0xe7, 0x30, + 0x8f, 0x89, 0x63, 0xa1, 0x21, 0x65, 0x03, 0x82, 0x6c, 0xc7, 0xd2, 0x16, 0x29, 0x73, 0x2c, 0x52, + 0x88, 0x3f, 0x45, 0x1b, 0xca, 0xd6, 0xeb, 0x83, 0x77, 0x9b, 0xc4, 0x41, 0xc4, 0x49, 0x55, 0x16, + 0xbf, 0x6e, 0xb1, 0xee, 0x18, 0x51, 0x86, 0x59, 0x0f, 0x8b, 0x71, 0x7c, 0x84, 0x96, 0xfa, 0x72, + 0x86, 0x89, 0x2f, 0x31, 0x3b, 0xd6, 0x21, 0x52, 0x63, 0xee, 0x68, 0x29, 0x6e, 0xa3, 0xaa, 0x1c, + 0x61, 0x88, 0xe8, 0x75, 0x08, 0xe4, 0x7f, 0xeb, 0x09, 0xb4, 0x87, 0xbc, 0x7f, 0x2c, 0x05, 0xe6, + 0xfb, 0x51, 0x13, 0xc2, 0x5d, 0x24, 0x07, 0xde, 0xf5, 0x83, 0x54, 0x32, 0x9e, 0x2d, 0xdb, 0x3e, + 0xa0, 0x60, 0x74, 0x95, 0xc2, 0x7c, 0x40, 0x9a, 0xc7, 0xf0, 0x85, 0xa2, 0x40, 0xc4, 0x03, 0x8f, + 0x72, 0x20, 0xcf, 0x15, 0xe5, 0xed, 0x22, 0x25, 0x5b, 0xa4, 0xf6, 0x98, 0x34, 0xc3, 0x15, 0xea, + 0xf1, 0xb1, 0xde, 0x4d, 0xb1, 0xac, 0x2e, 0xf5, 0x7d, 0xf2, 0xeb, 0xca, 0xac, 0xb6, 0x3e, 0x4b, + 0x81, 0xb5, 0x7d, 0xbf, 0xd0, 0x96, 0x8e, 0xe1, 0x0b, 0xb4, 0x91, 0x63, 0xd4, 0x90, 0x93, 0xdf, + 0x14, 0x69, 0xdf, 0x4e, 0xd2, 0xdb, 0xa1, 0x61, 0x6b, 0xb4, 0x10, 0x2e, 0xb6, 0xd5, 0x03, 0x4e, + 0x7e, 0x7f, 0xb2, 0xad, 0x13, 0xe0, 0x53, 0x6d, 0x9d, 0x00, 0xc7, 0x3d, 0xf4, 0x6a, 0x8e, 0xf1, + 0xfa, 0x62, 0xed, 0xdc, 0x84, 0xa6, 0xe9, 0x37, 0x31, 0xf3, 0xc9, 0x1f, 0x0a, 0xf9, 0xae, 0x1d, + 0xd9, 0x91, 0xea, 0x4b, 0x2d, 0xce, 0xe8, 0x2f, 0x53, 0x6b, 0x1a, 0x7f, 0x8e, 0xb6, 0xc6, 0xfa, + 0x15, 0xfb, 0xe2, 0xb2, 0x38, 0x04, 0xf2, 0xa0, 0x3c, 0xde, 0x9c, 0xd1, 0xb6, 0xdc, 0xb5, 0x38, + 0x3f, 0xea, 0x97, 0xe8, 0x64, 0x06, 0x7f, 0x81, 0xb6, 0x73, 0xb2, 0x5a, 0x3d, 0x85, 0xfe, 0x53, + 0xa1, 0xdf, 0xb2, 0xa3, 0xf5, 0x0e, 0x8e, 0xb1, 0x31, 0x9d, 0x4a, 0xe1, 0x53, 0xb4, 0x96, 0xc3, + 0xc3, 0x20, 0xe5, 0xe4, 0x2f, 0x45, 0xdd, 0xb3, 0x53, 0xcf, 0x83, 0x94, 0x17, 0xe6, 0x28, 0x0b, + 0x1a, 0x92, 0x68, 0x4d, 0x91, 0xfe, 0x9e, 0x49, 0x12, 0xd6, 0x53, 0xa4, 0x2c, 0x68, 0x8e, 0x5e, + 0x92, 0xc4, 0x44, 0x7e, 0x5f, 0x99, 0x75, 0xf4, 0xa2, 0x66, 0x72, 0x22, 0x75, 0xcc, 0x4c, 0xa4, + 0xc4, 0xe8, 0x89, 0xfc, 0xa1, 0x32, 0x6b, 0x22, 0x45, 0x95, 0x65, 0x22, 0xf3, 0x70, 0xb1, 0x2d, + 0x31, 0x91, 0x3f, 0x3e, 0xd9, 0xd6, 0xe4, 0x44, 0xea, 0x18, 0xfe, 0x0a, 0xd5, 0xc7, 0x30, 0x72, + 0x50, 0x12, 0x60, 0x83, 0x20, 0x95, 0xff, 0x18, 0x7f, 0x52, 0xcc, 0xf7, 0x66, 0x30, 0x85, 0xfc, + 0xd2, 0xa8, 0x33, 0xfe, 0x2b, 0xd4, 0x9e, 0xc7, 0x03, 0xb4, 0x93, 0x7b, 0xe9, 0xd1, 0x19, 0x33, + 0xfb, 0x59, 0x99, 0xbd, 0x6f, 0x37, 0x53, 0x53, 0x32, 0xed, 0x46, 0xe8, 0x0c, 0x41, 0x73, 0x1d, + 0xad, 0x1e, 0x0f, 0x12, 0xfe, 0xad, 0x03, 0x69, 0x12, 0x47, 0x29, 0x34, 0x13, 0xb4, 0xf3, 0xc4, + 0x1f, 0x22, 0x8c, 0xd1, 0x82, 0xbc, 0x2e, 0x94, 0xe4, 0x75, 0x41, 0x3e, 0x8b, 0x6b, 0x84, 0xd9, + 0x4f, 0x7d, 0x8d, 0xc8, 0x7e, 0xe3, 0x3d, 0x54, 0x4b, 0x83, 0x41, 0x12, 0x82, 0xcb, 0xe3, 0x5b, + 0x50, 0xb7, 0x88, 0x8a, 0x53, 0x55, 0xb1, 0x2b, 0x11, 0xfa, 0x64, 0xeb, 0xfe, 0xdf, 0xc6, 0xdc, + 0xfd, 0x63, 0xa3, 0xf4, 0xf0, 0xd8, 0x28, 0xfd, 0xf3, 0xd8, 0x28, 0x7d, 0xf7, 0x5f, 0x63, 0xee, + 0x7a, 0x49, 0xde, 0x61, 0x8e, 0x5e, 0x04, 0x00, 0x00, 0xff, 0xff, 0xed, 0x36, 0xf0, 0x6f, 0x1b, + 0x09, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto similarity index 97% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto rename to vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto index 25d45d3c4..7111f4572 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto @@ -37,6 +37,8 @@ message InternalRaftRequest { AlarmRequest alarm = 10; + LeaseCheckpointRequest lease_checkpoint = 11; + AuthEnableRequest auth_enable = 1000; AuthDisableRequest auth_disable = 1011; @@ -71,4 +73,3 @@ message InternalAuthenticateRequest { // simple_token is generated in API layer (etcdserver/v3_server.go) string simple_token = 3; } - diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go similarity index 100% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go rename to vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go similarity index 62% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go rename to vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go index a0cff8ffd..199ee6244 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -4,20 +4,23 @@ package etcdserverpb import ( - context "context" - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" - authpb "github.com/coreos/etcd/auth/authpb" - mvccpb "github.com/coreos/etcd/mvcc/mvccpb" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" + + mvccpb "go.etcd.io/etcd/mvcc/mvccpb" + + authpb "go.etcd.io/etcd/auth/authpb" + + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -25,12 +28,6 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - type AlarmType int32 const ( @@ -44,7 +41,6 @@ var AlarmType_name = map[int32]string{ 1: "NOSPACE", 2: "CORRUPT", } - var AlarmType_value = map[string]int32{ "NONE": 0, "NOSPACE": 1, @@ -54,10 +50,7 @@ var AlarmType_value = map[string]int32{ func (x AlarmType) String() string { return proto.EnumName(AlarmType_name, int32(x)) } - -func (AlarmType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{0} -} +func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } type RangeRequest_SortOrder int32 @@ -72,7 +65,6 @@ var RangeRequest_SortOrder_name = map[int32]string{ 1: "ASCEND", 2: "DESCEND", } - var RangeRequest_SortOrder_value = map[string]int32{ "NONE": 0, "ASCEND": 1, @@ -82,10 +74,7 @@ var RangeRequest_SortOrder_value = map[string]int32{ func (x RangeRequest_SortOrder) String() string { return proto.EnumName(RangeRequest_SortOrder_name, int32(x)) } - -func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1, 0} -} +func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} } type RangeRequest_SortTarget int32 @@ -104,7 +93,6 @@ var RangeRequest_SortTarget_name = map[int32]string{ 3: "MOD", 4: "VALUE", } - var RangeRequest_SortTarget_value = map[string]int32{ "KEY": 0, "VERSION": 1, @@ -116,10 +104,7 @@ var RangeRequest_SortTarget_value = map[string]int32{ func (x RangeRequest_SortTarget) String() string { return proto.EnumName(RangeRequest_SortTarget_name, int32(x)) } - -func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1, 1} -} +func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} } type Compare_CompareResult int32 @@ -136,7 +121,6 @@ var Compare_CompareResult_name = map[int32]string{ 2: "LESS", 3: "NOT_EQUAL", } - var Compare_CompareResult_value = map[string]int32{ "EQUAL": 0, "GREATER": 1, @@ -147,10 +131,7 @@ var Compare_CompareResult_value = map[string]int32{ func (x Compare_CompareResult) String() string { return proto.EnumName(Compare_CompareResult_name, int32(x)) } - -func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{9, 0} -} +func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} } type Compare_CompareTarget int32 @@ -169,7 +150,6 @@ var Compare_CompareTarget_name = map[int32]string{ 3: "VALUE", 4: "LEASE", } - var Compare_CompareTarget_value = map[string]int32{ "VERSION": 0, "CREATE": 1, @@ -181,10 +161,7 @@ var Compare_CompareTarget_value = map[string]int32{ func (x Compare_CompareTarget) String() string { return proto.EnumName(Compare_CompareTarget_name, int32(x)) } - -func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{9, 1} -} +func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} } type WatchCreateRequest_FilterType int32 @@ -199,7 +176,6 @@ var WatchCreateRequest_FilterType_name = map[int32]string{ 0: "NOPUT", 1: "NODELETE", } - var WatchCreateRequest_FilterType_value = map[string]int32{ "NOPUT": 0, "NODELETE": 1, @@ -208,9 +184,8 @@ var WatchCreateRequest_FilterType_value = map[string]int32{ func (x WatchCreateRequest_FilterType) String() string { return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x)) } - func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{21, 0} + return fileDescriptorRpc, []int{21, 0} } type AlarmRequest_AlarmAction int32 @@ -226,7 +201,6 @@ var AlarmRequest_AlarmAction_name = map[int32]string{ 1: "ACTIVATE", 2: "DEACTIVATE", } - var AlarmRequest_AlarmAction_value = map[string]int32{ "GET": 0, "ACTIVATE": 1, @@ -236,9 +210,8 @@ var AlarmRequest_AlarmAction_value = map[string]int32{ func (x AlarmRequest_AlarmAction) String() string { return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) } - func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{49, 0} + return fileDescriptorRpc, []int{54, 0} } type ResponseHeader struct { @@ -252,44 +225,13 @@ type ResponseHeader struct { // header.revision number. Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` // raft_term is the raft term when the request was applied. - RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } -func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } -func (*ResponseHeader) ProtoMessage() {} -func (*ResponseHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{0} -} -func (m *ResponseHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseHeader.Merge(m, src) -} -func (m *ResponseHeader) XXX_Size() int { - return m.Size() -} -func (m *ResponseHeader) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseHeader.DiscardUnknown(m) + RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"` } -var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo +func (m *ResponseHeader) Reset() { *m = ResponseHeader{} } +func (m *ResponseHeader) String() string { return proto.CompactTextString(m) } +func (*ResponseHeader) ProtoMessage() {} +func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } func (m *ResponseHeader) GetClusterId() uint64 { if m != nil { @@ -357,48 +299,17 @@ type RangeRequest struct { // greater mod revisions will be filtered away. MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"` // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create trevisions will be filtered away. + // lesser create revisions will be filtered away. MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"` // max_create_revision is the upper bound for returned key create revisions; all keys with // greater create revisions will be filtered away. - MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RangeRequest) Reset() { *m = RangeRequest{} } -func (m *RangeRequest) String() string { return proto.CompactTextString(m) } -func (*RangeRequest) ProtoMessage() {} -func (*RangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1} -} -func (m *RangeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RangeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RangeRequest.Merge(m, src) -} -func (m *RangeRequest) XXX_Size() int { - return m.Size() -} -func (m *RangeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RangeRequest.DiscardUnknown(m) + MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"` } -var xxx_messageInfo_RangeRequest proto.InternalMessageInfo +func (m *RangeRequest) Reset() { *m = RangeRequest{} } +func (m *RangeRequest) String() string { return proto.CompactTextString(m) } +func (*RangeRequest) ProtoMessage() {} +func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } func (m *RangeRequest) GetKey() []byte { if m != nil { @@ -492,51 +403,20 @@ func (m *RangeRequest) GetMaxCreateRevision() int64 { } type RangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kvs is the list of key-value pairs matched by the range request. // kvs is empty when count is requested. - Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"` + Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"` // more indicates if there are more keys to return in the requested range. More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"` // count is set to the number of keys within the range when requested. - Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RangeResponse) Reset() { *m = RangeResponse{} } -func (m *RangeResponse) String() string { return proto.CompactTextString(m) } -func (*RangeResponse) ProtoMessage() {} -func (*RangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{2} -} -func (m *RangeResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RangeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RangeResponse.Merge(m, src) -} -func (m *RangeResponse) XXX_Size() int { - return m.Size() -} -func (m *RangeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RangeResponse.DiscardUnknown(m) + Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` } -var xxx_messageInfo_RangeResponse proto.InternalMessageInfo +func (m *RangeResponse) Reset() { *m = RangeResponse{} } +func (m *RangeResponse) String() string { return proto.CompactTextString(m) } +func (*RangeResponse) ProtoMessage() {} +func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } func (m *RangeResponse) GetHeader() *ResponseHeader { if m != nil { @@ -582,44 +462,13 @@ type PutRequest struct { IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` // If ignore_lease is set, etcd updates the key using its current lease. // Returns an error if the key does not exist. - IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} -func (*PutRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{3} -} -func (m *PutRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PutRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutRequest.Merge(m, src) -} -func (m *PutRequest) XXX_Size() int { - return m.Size() -} -func (m *PutRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PutRequest.DiscardUnknown(m) + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` } -var xxx_messageInfo_PutRequest proto.InternalMessageInfo +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } func (m *PutRequest) GetKey() []byte { if m != nil { @@ -664,46 +513,15 @@ func (m *PutRequest) GetIgnoreLease() bool { } type PutResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // if prev_kv is set in the request, the previous key-value pair will be returned. - PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} -func (*PutResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{4} -} -func (m *PutResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PutResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutResponse.Merge(m, src) -} -func (m *PutResponse) XXX_Size() int { - return m.Size() -} -func (m *PutResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PutResponse.DiscardUnknown(m) + PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` } -var xxx_messageInfo_PutResponse proto.InternalMessageInfo +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } func (m *PutResponse) GetHeader() *ResponseHeader { if m != nil { @@ -730,44 +548,13 @@ type DeleteRangeRequest struct { RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. // The previous key-value pairs will be returned in the delete response. - PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } -func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeRequest) ProtoMessage() {} -func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{5} -} -func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRangeRequest.Merge(m, src) -} -func (m *DeleteRangeRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteRangeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m) + PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` } -var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo +func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} } +func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeRequest) ProtoMessage() {} +func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } func (m *DeleteRangeRequest) GetKey() []byte { if m != nil { @@ -791,48 +578,17 @@ func (m *DeleteRangeRequest) GetPrevKv() bool { } type DeleteRangeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // deleted is the number of keys deleted by the delete range request. Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` // if prev_kv is set in the request, the previous key-value pairs will be returned. - PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } -func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteRangeResponse) ProtoMessage() {} -func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{6} -} -func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRangeResponse.Merge(m, src) -} -func (m *DeleteRangeResponse) XXX_Size() int { - return m.Size() -} -func (m *DeleteRangeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m) + PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"` } -var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo +func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} } +func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteRangeResponse) ProtoMessage() {} +func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} } func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { if m != nil { @@ -863,44 +619,13 @@ type RequestOp struct { // *RequestOp_RequestPut // *RequestOp_RequestDeleteRange // *RequestOp_RequestTxn - Request isRequestOp_Request `protobuf_oneof:"request"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestOp) Reset() { *m = RequestOp{} } -func (m *RequestOp) String() string { return proto.CompactTextString(m) } -func (*RequestOp) ProtoMessage() {} -func (*RequestOp) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{7} -} -func (m *RequestOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestOp.Merge(m, src) -} -func (m *RequestOp) XXX_Size() int { - return m.Size() -} -func (m *RequestOp) XXX_DiscardUnknown() { - xxx_messageInfo_RequestOp.DiscardUnknown(m) + Request isRequestOp_Request `protobuf_oneof:"request"` } -var xxx_messageInfo_RequestOp proto.InternalMessageInfo +func (m *RequestOp) Reset() { *m = RequestOp{} } +func (m *RequestOp) String() string { return proto.CompactTextString(m) } +func (*RequestOp) ProtoMessage() {} +func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} } type isRequestOp_Request interface { isRequestOp_Request() @@ -909,16 +634,16 @@ type isRequestOp_Request interface { } type RequestOp_RequestRange struct { - RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof"` + RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"` } type RequestOp_RequestPut struct { - RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof"` + RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"` } type RequestOp_RequestDeleteRange struct { - RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof"` + RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` } type RequestOp_RequestTxn struct { - RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof"` + RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"` } func (*RequestOp_RequestRange) isRequestOp_Request() {} @@ -1048,22 +773,22 @@ func _RequestOp_OneofSizer(msg proto.Message) (n int) { switch x := m.Request.(type) { case *RequestOp_RequestRange: s := proto.Size(x.RequestRange) - n += 1 // tag and wire + n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *RequestOp_RequestPut: s := proto.Size(x.RequestPut) - n += 1 // tag and wire + n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *RequestOp_RequestDeleteRange: s := proto.Size(x.RequestDeleteRange) - n += 1 // tag and wire + n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *RequestOp_RequestTxn: s := proto.Size(x.RequestTxn) - n += 1 // tag and wire + n += proto.SizeVarint(4<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case nil: @@ -1081,44 +806,13 @@ type ResponseOp struct { // *ResponseOp_ResponsePut // *ResponseOp_ResponseDeleteRange // *ResponseOp_ResponseTxn - Response isResponseOp_Response `protobuf_oneof:"response"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResponseOp) Reset() { *m = ResponseOp{} } -func (m *ResponseOp) String() string { return proto.CompactTextString(m) } -func (*ResponseOp) ProtoMessage() {} -func (*ResponseOp) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{8} -} -func (m *ResponseOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseOp.Merge(m, src) -} -func (m *ResponseOp) XXX_Size() int { - return m.Size() -} -func (m *ResponseOp) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseOp.DiscardUnknown(m) + Response isResponseOp_Response `protobuf_oneof:"response"` } -var xxx_messageInfo_ResponseOp proto.InternalMessageInfo +func (m *ResponseOp) Reset() { *m = ResponseOp{} } +func (m *ResponseOp) String() string { return proto.CompactTextString(m) } +func (*ResponseOp) ProtoMessage() {} +func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} } type isResponseOp_Response interface { isResponseOp_Response() @@ -1127,16 +821,16 @@ type isResponseOp_Response interface { } type ResponseOp_ResponseRange struct { - ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof"` + ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"` } type ResponseOp_ResponsePut struct { - ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof"` + ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"` } type ResponseOp_ResponseDeleteRange struct { - ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof"` + ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` } type ResponseOp_ResponseTxn struct { - ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof"` + ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"` } func (*ResponseOp_ResponseRange) isResponseOp_Response() {} @@ -1266,22 +960,22 @@ func _ResponseOp_OneofSizer(msg proto.Message) (n int) { switch x := m.Response.(type) { case *ResponseOp_ResponseRange: s := proto.Size(x.ResponseRange) - n += 1 // tag and wire + n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *ResponseOp_ResponsePut: s := proto.Size(x.ResponsePut) - n += 1 // tag and wire + n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *ResponseOp_ResponseDeleteRange: s := proto.Size(x.ResponseDeleteRange) - n += 1 // tag and wire + n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *ResponseOp_ResponseTxn: s := proto.Size(x.ResponseTxn) - n += 1 // tag and wire + n += proto.SizeVarint(4<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case nil: @@ -1307,44 +1001,13 @@ type Compare struct { TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` // range_end compares the given target to all keys in the range [key, range_end). // See RangeRequest for more details on key ranges. - RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Compare) Reset() { *m = Compare{} } -func (m *Compare) String() string { return proto.CompactTextString(m) } -func (*Compare) ProtoMessage() {} -func (*Compare) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{9} -} -func (m *Compare) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Compare.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Compare) XXX_Merge(src proto.Message) { - xxx_messageInfo_Compare.Merge(m, src) -} -func (m *Compare) XXX_Size() int { - return m.Size() -} -func (m *Compare) XXX_DiscardUnknown() { - xxx_messageInfo_Compare.DiscardUnknown(m) + RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` } -var xxx_messageInfo_Compare proto.InternalMessageInfo +func (m *Compare) Reset() { *m = Compare{} } +func (m *Compare) String() string { return proto.CompactTextString(m) } +func (*Compare) ProtoMessage() {} +func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} } type isCompare_TargetUnion interface { isCompare_TargetUnion() @@ -1529,20 +1192,20 @@ func _Compare_OneofSizer(msg proto.Message) (n int) { // target_union switch x := m.TargetUnion.(type) { case *Compare_Version: - n += 1 // tag and wire + n += proto.SizeVarint(4<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.Version)) case *Compare_CreateRevision: - n += 1 // tag and wire + n += proto.SizeVarint(5<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.CreateRevision)) case *Compare_ModRevision: - n += 1 // tag and wire + n += proto.SizeVarint(6<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.ModRevision)) case *Compare_Value: - n += 1 // tag and wire + n += proto.SizeVarint(7<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(len(x.Value))) n += len(x.Value) case *Compare_Lease: - n += 1 // tag and wire + n += proto.SizeVarint(8<<3 | proto.WireVarint) n += proto.SizeVarint(uint64(x.Lease)) case nil: default: @@ -1572,48 +1235,17 @@ type TxnRequest struct { // and the response will contain their respective responses in order. // If the comparisons fail, then the failure requests will be processed in order, // and the response will contain their respective responses in order. - Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"` + Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"` // success is a list of requests which will be applied when compare evaluates to true. - Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"` + Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"` // failure is a list of requests which will be applied when compare evaluates to false. - Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TxnRequest) Reset() { *m = TxnRequest{} } -func (m *TxnRequest) String() string { return proto.CompactTextString(m) } -func (*TxnRequest) ProtoMessage() {} -func (*TxnRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{10} -} -func (m *TxnRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TxnRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxnRequest.Merge(m, src) -} -func (m *TxnRequest) XXX_Size() int { - return m.Size() -} -func (m *TxnRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TxnRequest.DiscardUnknown(m) + Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"` } -var xxx_messageInfo_TxnRequest proto.InternalMessageInfo +func (m *TxnRequest) Reset() { *m = TxnRequest{} } +func (m *TxnRequest) String() string { return proto.CompactTextString(m) } +func (*TxnRequest) ProtoMessage() {} +func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} } func (m *TxnRequest) GetCompare() []*Compare { if m != nil { @@ -1637,49 +1269,18 @@ func (m *TxnRequest) GetFailure() []*RequestOp { } type TxnResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // succeeded is set to true if the compare evaluated to true or false otherwise. Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"` // responses is a list of responses corresponding to the results from applying // success if succeeded is true or failure if succeeded is false. - Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TxnResponse) Reset() { *m = TxnResponse{} } -func (m *TxnResponse) String() string { return proto.CompactTextString(m) } -func (*TxnResponse) ProtoMessage() {} -func (*TxnResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{11} -} -func (m *TxnResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TxnResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxnResponse.Merge(m, src) -} -func (m *TxnResponse) XXX_Size() int { - return m.Size() -} -func (m *TxnResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TxnResponse.DiscardUnknown(m) + Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"` } -var xxx_messageInfo_TxnResponse proto.InternalMessageInfo +func (m *TxnResponse) Reset() { *m = TxnResponse{} } +func (m *TxnResponse) String() string { return proto.CompactTextString(m) } +func (*TxnResponse) ProtoMessage() {} +func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} } func (m *TxnResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1710,44 +1311,13 @@ type CompactionRequest struct { // physical is set so the RPC will wait until the compaction is physically // applied to the local database such that compacted entries are totally // removed from the backend database. - Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } -func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } -func (*CompactionRequest) ProtoMessage() {} -func (*CompactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{12} -} -func (m *CompactionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CompactionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompactionRequest.Merge(m, src) -} -func (m *CompactionRequest) XXX_Size() int { - return m.Size() -} -func (m *CompactionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CompactionRequest.DiscardUnknown(m) + Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"` } -var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo +func (m *CompactionRequest) Reset() { *m = CompactionRequest{} } +func (m *CompactionRequest) String() string { return proto.CompactTextString(m) } +func (*CompactionRequest) ProtoMessage() {} +func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } func (m *CompactionRequest) GetRevision() int64 { if m != nil { @@ -1764,44 +1334,13 @@ func (m *CompactionRequest) GetPhysical() bool { } type CompactionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } -func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } -func (*CompactionResponse) ProtoMessage() {} -func (*CompactionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{13} -} -func (m *CompactionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CompactionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompactionResponse.Merge(m, src) -} -func (m *CompactionResponse) XXX_Size() int { - return m.Size() -} -func (m *CompactionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CompactionResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo +func (m *CompactionResponse) Reset() { *m = CompactionResponse{} } +func (m *CompactionResponse) String() string { return proto.CompactTextString(m) } +func (*CompactionResponse) ProtoMessage() {} +func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} } func (m *CompactionResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1811,84 +1350,22 @@ func (m *CompactionResponse) GetHeader() *ResponseHeader { } type HashRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashRequest) Reset() { *m = HashRequest{} } -func (m *HashRequest) String() string { return proto.CompactTextString(m) } -func (*HashRequest) ProtoMessage() {} -func (*HashRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{14} -} -func (m *HashRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashRequest.Merge(m, src) -} -func (m *HashRequest) XXX_Size() int { - return m.Size() -} -func (m *HashRequest) XXX_DiscardUnknown() { - xxx_messageInfo_HashRequest.DiscardUnknown(m) } -var xxx_messageInfo_HashRequest proto.InternalMessageInfo +func (m *HashRequest) Reset() { *m = HashRequest{} } +func (m *HashRequest) String() string { return proto.CompactTextString(m) } +func (*HashRequest) ProtoMessage() {} +func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} } type HashKVRequest struct { // revision is the key-value store revision for the hash operation. - Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } -func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } -func (*HashKVRequest) ProtoMessage() {} -func (*HashKVRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{15} -} -func (m *HashKVRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashKVRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashKVRequest.Merge(m, src) -} -func (m *HashKVRequest) XXX_Size() int { - return m.Size() -} -func (m *HashKVRequest) XXX_DiscardUnknown() { - xxx_messageInfo_HashKVRequest.DiscardUnknown(m) + Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"` } -var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo +func (m *HashKVRequest) Reset() { *m = HashKVRequest{} } +func (m *HashKVRequest) String() string { return proto.CompactTextString(m) } +func (*HashKVRequest) ProtoMessage() {} +func (*HashKVRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} } func (m *HashKVRequest) GetRevision() int64 { if m != nil { @@ -1898,48 +1375,17 @@ func (m *HashKVRequest) GetRevision() int64 { } type HashKVResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // hash is the hash value computed from the responding member's MVCC keys up to a given revision. Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` // compact_revision is the compacted revision of key-value store when hash begins. - CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } -func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } -func (*HashKVResponse) ProtoMessage() {} -func (*HashKVResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{16} -} -func (m *HashKVResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashKVResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashKVResponse.Merge(m, src) -} -func (m *HashKVResponse) XXX_Size() int { - return m.Size() -} -func (m *HashKVResponse) XXX_DiscardUnknown() { - xxx_messageInfo_HashKVResponse.DiscardUnknown(m) + CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` } -var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo +func (m *HashKVResponse) Reset() { *m = HashKVResponse{} } +func (m *HashKVResponse) String() string { return proto.CompactTextString(m) } +func (*HashKVResponse) ProtoMessage() {} +func (*HashKVResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} } func (m *HashKVResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1963,46 +1409,15 @@ func (m *HashKVResponse) GetCompactRevision() int64 { } type HashResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // hash is the hash value computed from the responding member's KV's backend. - Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HashResponse) Reset() { *m = HashResponse{} } -func (m *HashResponse) String() string { return proto.CompactTextString(m) } -func (*HashResponse) ProtoMessage() {} -func (*HashResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{17} -} -func (m *HashResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HashResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_HashResponse.Merge(m, src) -} -func (m *HashResponse) XXX_Size() int { - return m.Size() -} -func (m *HashResponse) XXX_DiscardUnknown() { - xxx_messageInfo_HashResponse.DiscardUnknown(m) + Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"` } -var xxx_messageInfo_HashResponse proto.InternalMessageInfo +func (m *HashResponse) Reset() { *m = HashResponse{} } +func (m *HashResponse) String() string { return proto.CompactTextString(m) } +func (*HashResponse) ProtoMessage() {} +func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} } func (m *HashResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2019,89 +1434,27 @@ func (m *HashResponse) GetHash() uint32 { } type SnapshotRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } -func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*SnapshotRequest) ProtoMessage() {} -func (*SnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{18} -} -func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotRequest.Merge(m, src) -} -func (m *SnapshotRequest) XXX_Size() int { - return m.Size() -} -func (m *SnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotRequest.DiscardUnknown(m) } -var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo +func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} } +func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*SnapshotRequest) ProtoMessage() {} +func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} } type SnapshotResponse struct { // header has the current key-value store information. The first header in the snapshot // stream indicates the point in time of the snapshot. - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // remaining_bytes is the number of blob bytes to be sent after this message RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` // blob contains the next chunk of the snapshot in the snapshot stream. - Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } -func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*SnapshotResponse) ProtoMessage() {} -func (*SnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{19} -} -func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotResponse.Merge(m, src) -} -func (m *SnapshotResponse) XXX_Size() int { - return m.Size() -} -func (m *SnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotResponse.DiscardUnknown(m) + Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` } -var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo +func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} } +func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*SnapshotResponse) ProtoMessage() {} +func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } func (m *SnapshotResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2131,44 +1484,13 @@ type WatchRequest struct { // *WatchRequest_CreateRequest // *WatchRequest_CancelRequest // *WatchRequest_ProgressRequest - RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchRequest) Reset() { *m = WatchRequest{} } -func (m *WatchRequest) String() string { return proto.CompactTextString(m) } -func (*WatchRequest) ProtoMessage() {} -func (*WatchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{20} -} -func (m *WatchRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchRequest.Merge(m, src) -} -func (m *WatchRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchRequest.DiscardUnknown(m) + RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"` } -var xxx_messageInfo_WatchRequest proto.InternalMessageInfo +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (m *WatchRequest) String() string { return proto.CompactTextString(m) } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } type isWatchRequest_RequestUnion interface { isWatchRequest_RequestUnion() @@ -2177,13 +1499,13 @@ type isWatchRequest_RequestUnion interface { } type WatchRequest_CreateRequest struct { - CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof"` + CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"` } type WatchRequest_CancelRequest struct { - CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof"` + CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"` } type WatchRequest_ProgressRequest struct { - ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof"` + ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,oneof"` } func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {} @@ -2291,17 +1613,17 @@ func _WatchRequest_OneofSizer(msg proto.Message) (n int) { switch x := m.RequestUnion.(type) { case *WatchRequest_CreateRequest: s := proto.Size(x.CreateRequest) - n += 1 // tag and wire + n += proto.SizeVarint(1<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *WatchRequest_CancelRequest: s := proto.Size(x.CancelRequest) - n += 1 // tag and wire + n += proto.SizeVarint(2<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case *WatchRequest_ProgressRequest: s := proto.Size(x.ProgressRequest) - n += 1 // tag and wire + n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s case nil: @@ -2328,7 +1650,7 @@ type WatchCreateRequest struct { // The etcd server may decide how often it will send notifications based on current load. ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"` // filters filter the events at server side before it sends back to the watcher. - Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` + Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"` // If prev_kv is set, created watcher gets the previous KV before the event happens. // If the previous KV is already compacted, nothing will be returned. PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` @@ -2339,44 +1661,13 @@ type WatchCreateRequest struct { // use on the stream will cause an error to be returned. WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` // fragment enables splitting large revisions into multiple watch responses. - Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } -func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCreateRequest) ProtoMessage() {} -func (*WatchCreateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{21} -} -func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchCreateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchCreateRequest.Merge(m, src) -} -func (m *WatchCreateRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchCreateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m) + Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"` } -var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo +func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} } +func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCreateRequest) ProtoMessage() {} +func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} } func (m *WatchCreateRequest) GetKey() []byte { if m != nil { @@ -2436,44 +1727,13 @@ func (m *WatchCreateRequest) GetFragment() bool { type WatchCancelRequest struct { // watch_id is the watcher id to cancel so that no more events are transmitted. - WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } -func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } -func (*WatchCancelRequest) ProtoMessage() {} -func (*WatchCancelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{22} -} -func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchCancelRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchCancelRequest.Merge(m, src) -} -func (m *WatchCancelRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchCancelRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m) + WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` } -var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo +func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} } +func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) } +func (*WatchCancelRequest) ProtoMessage() {} +func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } func (m *WatchCancelRequest) GetWatchId() int64 { if m != nil { @@ -2485,46 +1745,15 @@ func (m *WatchCancelRequest) GetWatchId() int64 { // Requests the a watch stream progress status be sent in the watch response stream as soon as // possible. type WatchProgressRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} } -func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) } -func (*WatchProgressRequest) ProtoMessage() {} -func (*WatchProgressRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{23} -} -func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchProgressRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchProgressRequest.Merge(m, src) -} -func (m *WatchProgressRequest) XXX_Size() int { - return m.Size() -} -func (m *WatchProgressRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m) } -var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo +func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} } +func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) } +func (*WatchProgressRequest) ProtoMessage() {} +func (*WatchProgressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} } type WatchResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // watch_id is the ID of the watcher that corresponds to the response. WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` // created is set to true if the response is for a create watch request. @@ -2547,45 +1776,14 @@ type WatchResponse struct { // cancel_reason indicates the reason for canceling the watcher. CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` // framgment is true if large watch response was split over multiple responses. - Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WatchResponse) Reset() { *m = WatchResponse{} } -func (m *WatchResponse) String() string { return proto.CompactTextString(m) } -func (*WatchResponse) ProtoMessage() {} -func (*WatchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{24} -} -func (m *WatchResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WatchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchResponse.Merge(m, src) -} -func (m *WatchResponse) XXX_Size() int { - return m.Size() -} -func (m *WatchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WatchResponse.DiscardUnknown(m) + Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` } -var xxx_messageInfo_WatchResponse proto.InternalMessageInfo +func (m *WatchResponse) Reset() { *m = WatchResponse{} } +func (m *WatchResponse) String() string { return proto.CompactTextString(m) } +func (*WatchResponse) ProtoMessage() {} +func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } func (m *WatchResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2647,44 +1845,13 @@ type LeaseGrantRequest struct { // TTL is the advisory time-to-live in seconds. Expired lease will return -1. TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. - ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } -func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantRequest) ProtoMessage() {} -func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{25} -} -func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseGrantRequest.Merge(m, src) -} -func (m *LeaseGrantRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseGrantRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m) + ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` } -var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo +func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} } +func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantRequest) ProtoMessage() {} +func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} } func (m *LeaseGrantRequest) GetTTL() int64 { if m != nil { @@ -2701,49 +1868,18 @@ func (m *LeaseGrantRequest) GetID() int64 { } type LeaseGrantResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID for the granted lease. ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` // TTL is the server chosen lease time-to-live in seconds. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } -func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseGrantResponse) ProtoMessage() {} -func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{26} -} -func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseGrantResponse.Merge(m, src) -} -func (m *LeaseGrantResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseGrantResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m) + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` } -var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo +func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} } +func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseGrantResponse) ProtoMessage() {} +func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2775,93 +1911,89 @@ func (m *LeaseGrantResponse) GetError() string { type LeaseRevokeRequest struct { // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } -func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeRequest) ProtoMessage() {} -func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{27} -} -func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} } +func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeRequest) ProtoMessage() {} +func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} } + +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID } + return 0 } -func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseRevokeRequest.Merge(m, src) + +type LeaseRevokeResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -func (m *LeaseRevokeRequest) XXX_Size() int { - return m.Size() + +func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } +func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseRevokeResponse) ProtoMessage() {} +func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } + +func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil } -func (m *LeaseRevokeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m) + +type LeaseCheckpoint struct { + // ID is the lease ID to checkpoint. + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Remaining_TTL is the remaining time until expiry of the lease. + Remaining_TTL int64 `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"` } -var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo +func (m *LeaseCheckpoint) Reset() { *m = LeaseCheckpoint{} } +func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpoint) ProtoMessage() {} +func (*LeaseCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } -func (m *LeaseRevokeRequest) GetID() int64 { +func (m *LeaseCheckpoint) GetID() int64 { if m != nil { return m.ID } return 0 } -type LeaseRevokeResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} } -func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseRevokeResponse) ProtoMessage() {} -func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{28} -} -func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *LeaseCheckpoint) GetRemaining_TTL() int64 { + if m != nil { + return m.Remaining_TTL } + return 0 } -func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseRevokeResponse.Merge(m, src) + +type LeaseCheckpointRequest struct { + Checkpoints []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` } -func (m *LeaseRevokeResponse) XXX_Size() int { - return m.Size() + +func (m *LeaseCheckpointRequest) Reset() { *m = LeaseCheckpointRequest{} } +func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointRequest) ProtoMessage() {} +func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } + +func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint { + if m != nil { + return m.Checkpoints + } + return nil } -func (m *LeaseRevokeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m) + +type LeaseCheckpointResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo +func (m *LeaseCheckpointResponse) Reset() { *m = LeaseCheckpointResponse{} } +func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseCheckpointResponse) ProtoMessage() {} +func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } -func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { +func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader { if m != nil { return m.Header } @@ -2870,44 +2002,13 @@ func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader { type LeaseKeepAliveRequest struct { // ID is the lease ID for the lease to keep alive. - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } -func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveRequest) ProtoMessage() {} -func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{29} -} -func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src) -} -func (m *LeaseKeepAliveRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m) + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` } -var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo +func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} } +func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveRequest) ProtoMessage() {} +func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} } func (m *LeaseKeepAliveRequest) GetID() int64 { if m != nil { @@ -2917,48 +2018,17 @@ func (m *LeaseKeepAliveRequest) GetID() int64 { } type LeaseKeepAliveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` // TTL is the new time-to-live for the lease. - TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } -func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseKeepAliveResponse) ProtoMessage() {} -func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{30} -} -func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src) -} -func (m *LeaseKeepAliveResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m) + TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"` } -var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo +func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} } +func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseKeepAliveResponse) ProtoMessage() {} +func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2985,44 +2055,13 @@ type LeaseTimeToLiveRequest struct { // ID is the lease ID for the lease. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` // keys is true to query all the keys attached to this lease. - Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } -func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveRequest) ProtoMessage() {} -func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{31} -} -func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src) -} -func (m *LeaseTimeToLiveRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m) + Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"` } -var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo +func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} } +func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveRequest) ProtoMessage() {} +func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} } func (m *LeaseTimeToLiveRequest) GetID() int64 { if m != nil { @@ -3039,7 +2078,7 @@ func (m *LeaseTimeToLiveRequest) GetKeys() bool { } type LeaseTimeToLiveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"` // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. @@ -3047,44 +2086,13 @@ type LeaseTimeToLiveResponse struct { // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"` // Keys is the list of keys attached to this lease. - Keys [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } -func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveResponse) ProtoMessage() {} -func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{32} -} -func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src) -} -func (m *LeaseTimeToLiveResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m) + Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` } -var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo +func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } +func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveResponse) ProtoMessage() {} +func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3122,83 +2130,21 @@ func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { } type LeaseLeasesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } -func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseLeasesRequest) ProtoMessage() {} -func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{33} -} -func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseLeasesRequest.Merge(m, src) -} -func (m *LeaseLeasesRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseLeasesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m) } -var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo +func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} } +func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesRequest) ProtoMessage() {} +func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} } type LeaseStatus struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } -func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } -func (*LeaseStatus) ProtoMessage() {} -func (*LeaseStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{34} -} -func (m *LeaseStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseStatus.Merge(m, src) -} -func (m *LeaseStatus) XXX_Size() int { - return m.Size() -} -func (m *LeaseStatus) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseStatus.DiscardUnknown(m) + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` } -var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo +func (m *LeaseStatus) Reset() { *m = LeaseStatus{} } +func (m *LeaseStatus) String() string { return proto.CompactTextString(m) } +func (*LeaseStatus) ProtoMessage() {} +func (*LeaseStatus) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} } func (m *LeaseStatus) GetID() int64 { if m != nil { @@ -3208,45 +2154,14 @@ func (m *LeaseStatus) GetID() int64 { } type LeaseLeasesResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } -func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseLeasesResponse) ProtoMessage() {} -func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{35} -} -func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseLeasesResponse.Merge(m, src) -} -func (m *LeaseLeasesResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseLeasesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases" json:"leases,omitempty"` } -var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo +func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} } +func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseLeasesResponse) ProtoMessage() {} +func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} } func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3268,46 +2183,17 @@ type Member struct { // name is the human-readable name of the member. If the member is not started, the name will be an empty string. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // peerURLs is the list of URLs the member exposes to the cluster for communication. - PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` + PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"` // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. - ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Member) Reset() { *m = Member{} } -func (m *Member) String() string { return proto.CompactTextString(m) } -func (*Member) ProtoMessage() {} -func (*Member) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{36} -} -func (m *Member) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Member.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Member) XXX_Merge(src proto.Message) { - xxx_messageInfo_Member.Merge(m, src) -} -func (m *Member) XXX_Size() int { - return m.Size() -} -func (m *Member) XXX_DiscardUnknown() { - xxx_messageInfo_Member.DiscardUnknown(m) + ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,5,opt,name=isLearner,proto3" json:"isLearner,omitempty"` } -var xxx_messageInfo_Member proto.InternalMessageInfo +func (m *Member) Reset() { *m = Member{} } +func (m *Member) String() string { return proto.CompactTextString(m) } +func (*Member) ProtoMessage() {} +func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} } func (m *Member) GetID() uint64 { if m != nil { @@ -3337,46 +2223,24 @@ func (m *Member) GetClientURLs() []string { return nil } -type MemberAddRequest struct { - // peerURLs is the list of URLs the added member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } -func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } -func (*MemberAddRequest) ProtoMessage() {} -func (*MemberAddRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{37} -} -func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *Member) GetIsLearner() bool { + if m != nil { + return m.IsLearner } + return false } -func (m *MemberAddRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberAddRequest.Merge(m, src) -} -func (m *MemberAddRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberAddRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberAddRequest.DiscardUnknown(m) + +type MemberAddRequest struct { + // peerURLs is the list of URLs the added member will use to communicate with the cluster. + PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` + // isLearner indicates if the added member is raft learner. + IsLearner bool `protobuf:"varint,2,opt,name=isLearner,proto3" json:"isLearner,omitempty"` } -var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo +func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} } +func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) } +func (*MemberAddRequest) ProtoMessage() {} +func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} } func (m *MemberAddRequest) GetPeerURLs() []string { if m != nil { @@ -3385,49 +2249,25 @@ func (m *MemberAddRequest) GetPeerURLs() []string { return nil } +func (m *MemberAddRequest) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false +} + type MemberAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // member is the member information for the added member. - Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"` + Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` // members is a list of all members after adding the new member. - Members []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } -func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } -func (*MemberAddResponse) ProtoMessage() {} -func (*MemberAddResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{38} -} -func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberAddResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberAddResponse.Merge(m, src) -} -func (m *MemberAddResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberAddResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberAddResponse.DiscardUnknown(m) + Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` } -var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo +func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } +func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) } +func (*MemberAddResponse) ProtoMessage() {} +func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } func (m *MemberAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3452,44 +2292,13 @@ func (m *MemberAddResponse) GetMembers() []*Member { type MemberRemoveRequest struct { // ID is the member ID of the member to remove. - ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } -func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveRequest) ProtoMessage() {} -func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{39} -} -func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberRemoveRequest.Merge(m, src) -} -func (m *MemberRemoveRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberRemoveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m) + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` } -var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo +func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} } +func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveRequest) ProtoMessage() {} +func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } func (m *MemberRemoveRequest) GetID() uint64 { if m != nil { @@ -3499,46 +2308,15 @@ func (m *MemberRemoveRequest) GetID() uint64 { } type MemberRemoveResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // members is a list of all members after removing the member. - Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } -func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } -func (*MemberRemoveResponse) ProtoMessage() {} -func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{40} -} -func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberRemoveResponse.Merge(m, src) -} -func (m *MemberRemoveResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberRemoveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m) + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } -var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo +func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } +func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) } +func (*MemberRemoveResponse) ProtoMessage() {} +func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3558,44 +2336,13 @@ type MemberUpdateRequest struct { // ID is the member ID of the member to update. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` // peerURLs is the new list of URLs the member will use to communicate with the cluster. - PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } -func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateRequest) ProtoMessage() {} -func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{41} -} -func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberUpdateRequest.Merge(m, src) -} -func (m *MemberUpdateRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberUpdateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m) + PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"` } -var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo +func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} } +func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateRequest) ProtoMessage() {} +func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } func (m *MemberUpdateRequest) GetID() uint64 { if m != nil { @@ -3612,46 +2359,15 @@ func (m *MemberUpdateRequest) GetPeerURLs() []string { } type MemberUpdateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // members is a list of all members after updating the member. - Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } -func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } -func (*MemberUpdateResponse) ProtoMessage() {} -func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{42} -} -func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberUpdateResponse.Merge(m, src) -} -func (m *MemberUpdateResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberUpdateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m) + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } -var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo +func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } +func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) } +func (*MemberUpdateResponse) ProtoMessage() {} +func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3668,85 +2384,23 @@ func (m *MemberUpdateResponse) GetMembers() []*Member { } type MemberListRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } -func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } -func (*MemberListRequest) ProtoMessage() {} -func (*MemberListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{43} -} -func (m *MemberListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberListRequest.Merge(m, src) -} -func (m *MemberListRequest) XXX_Size() int { - return m.Size() -} -func (m *MemberListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MemberListRequest.DiscardUnknown(m) } -var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo +func (m *MemberListRequest) Reset() { *m = MemberListRequest{} } +func (m *MemberListRequest) String() string { return proto.CompactTextString(m) } +func (*MemberListRequest) ProtoMessage() {} +func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } type MemberListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // members is a list of all members associated with the cluster. - Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } -func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } -func (*MemberListResponse) ProtoMessage() {} -func (*MemberListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{44} -} -func (m *MemberListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemberListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemberListResponse.Merge(m, src) -} -func (m *MemberListResponse) XXX_Size() int { - return m.Size() -} -func (m *MemberListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MemberListResponse.DiscardUnknown(m) + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } -var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo +func (m *MemberListResponse) Reset() { *m = MemberListResponse{} } +func (m *MemberListResponse) String() string { return proto.CompactTextString(m) } +func (*MemberListResponse) ProtoMessage() {} +func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } func (m *MemberListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3762,84 +2416,64 @@ func (m *MemberListResponse) GetMembers() []*Member { return nil } -type DefragmentRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } -func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } -func (*DefragmentRequest) ProtoMessage() {} -func (*DefragmentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{45} -} -func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DefragmentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DefragmentRequest.Merge(m, src) +type MemberPromoteRequest struct { + // ID is the member ID of the member to promote. + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` } -func (m *DefragmentRequest) XXX_Size() int { - return m.Size() + +func (m *MemberPromoteRequest) Reset() { *m = MemberPromoteRequest{} } +func (m *MemberPromoteRequest) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteRequest) ProtoMessage() {} +func (*MemberPromoteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } + +func (m *MemberPromoteRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 } -func (m *DefragmentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DefragmentRequest.DiscardUnknown(m) + +type MemberPromoteResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after promoting the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } -var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo +func (m *MemberPromoteResponse) Reset() { *m = MemberPromoteResponse{} } +func (m *MemberPromoteResponse) String() string { return proto.CompactTextString(m) } +func (*MemberPromoteResponse) ProtoMessage() {} +func (*MemberPromoteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } -type DefragmentResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } -func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } -func (*DefragmentResponse) ProtoMessage() {} -func (*DefragmentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{46} -} -func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *MemberPromoteResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header } + return nil } -func (m *DefragmentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DefragmentResponse.Merge(m, src) + +func (m *MemberPromoteResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil } -func (m *DefragmentResponse) XXX_Size() int { - return m.Size() + +type DefragmentRequest struct { } -func (m *DefragmentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DefragmentResponse.DiscardUnknown(m) + +func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} } +func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) } +func (*DefragmentRequest) ProtoMessage() {} +func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } + +type DefragmentResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo +func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} } +func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) } +func (*DefragmentResponse) ProtoMessage() {} +func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } func (m *DefragmentResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3850,44 +2484,13 @@ func (m *DefragmentResponse) GetHeader() *ResponseHeader { type MoveLeaderRequest struct { // targetID is the node ID for the new leader. - TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } -func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } -func (*MoveLeaderRequest) ProtoMessage() {} -func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{47} -} -func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MoveLeaderRequest.Merge(m, src) -} -func (m *MoveLeaderRequest) XXX_Size() int { - return m.Size() -} -func (m *MoveLeaderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m) + TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` } -var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo +func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } +func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderRequest) ProtoMessage() {} +func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } func (m *MoveLeaderRequest) GetTargetID() uint64 { if m != nil { @@ -3897,44 +2500,13 @@ func (m *MoveLeaderRequest) GetTargetID() uint64 { } type MoveLeaderResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } -func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } -func (*MoveLeaderResponse) ProtoMessage() {} -func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{48} -} -func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MoveLeaderResponse.Merge(m, src) -} -func (m *MoveLeaderResponse) XXX_Size() int { - return m.Size() -} -func (m *MoveLeaderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo +func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } +func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderResponse) ProtoMessage() {} +func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { if m != nil { @@ -3952,44 +2524,13 @@ type AlarmRequest struct { // alarm request covers all members. MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"` // alarm is the type of alarm to consider for this request. - Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } -func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } -func (*AlarmRequest) ProtoMessage() {} -func (*AlarmRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{49} -} -func (m *AlarmRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AlarmRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlarmRequest.Merge(m, src) -} -func (m *AlarmRequest) XXX_Size() int { - return m.Size() -} -func (m *AlarmRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AlarmRequest.DiscardUnknown(m) + Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` } -var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo +func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } +func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } +func (*AlarmRequest) ProtoMessage() {} +func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { if m != nil { @@ -4016,44 +2557,13 @@ type AlarmMember struct { // memberID is the ID of the member associated with the raised alarm. MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` // alarm is the type of alarm which has been raised. - Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AlarmMember) Reset() { *m = AlarmMember{} } -func (m *AlarmMember) String() string { return proto.CompactTextString(m) } -func (*AlarmMember) ProtoMessage() {} -func (*AlarmMember) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{50} -} -func (m *AlarmMember) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AlarmMember) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlarmMember.Merge(m, src) -} -func (m *AlarmMember) XXX_Size() int { - return m.Size() -} -func (m *AlarmMember) XXX_DiscardUnknown() { - xxx_messageInfo_AlarmMember.DiscardUnknown(m) + Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"` } -var xxx_messageInfo_AlarmMember proto.InternalMessageInfo +func (m *AlarmMember) Reset() { *m = AlarmMember{} } +func (m *AlarmMember) String() string { return proto.CompactTextString(m) } +func (*AlarmMember) ProtoMessage() {} +func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } func (m *AlarmMember) GetMemberID() uint64 { if m != nil { @@ -4070,46 +2580,15 @@ func (m *AlarmMember) GetAlarm() AlarmType { } type AlarmResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // alarms is a list of alarms associated with the alarm request. - Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } -func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } -func (*AlarmResponse) ProtoMessage() {} -func (*AlarmResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{51} -} -func (m *AlarmResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AlarmResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AlarmResponse.Merge(m, src) -} -func (m *AlarmResponse) XXX_Size() int { - return m.Size() -} -func (m *AlarmResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AlarmResponse.DiscardUnknown(m) + Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"` } -var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo +func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } +func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } +func (*AlarmResponse) ProtoMessage() {} +func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } func (m *AlarmResponse) GetHeader() *ResponseHeader { if m != nil { @@ -4126,93 +2605,39 @@ func (m *AlarmResponse) GetAlarms() []*AlarmMember { } type StatusRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (m *StatusRequest) String() string { return proto.CompactTextString(m) } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{52} -} -func (m *StatusRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusRequest.Merge(m, src) -} -func (m *StatusRequest) XXX_Size() int { - return m.Size() -} -func (m *StatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatusRequest.DiscardUnknown(m) } -var xxx_messageInfo_StatusRequest proto.InternalMessageInfo +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } type StatusResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // version is the cluster protocol version used by the responding member. Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // dbSize is the size of the backend database, in bytes, of the responding member. + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"` // leader is the member ID which the responding member believes is the current leader. Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"` - // raftIndex is the current raft index of the responding member. + // raftIndex is the current raft committed index of the responding member. RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"` // raftTerm is the current raft term of the responding member. - RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (m *StatusResponse) String() string { return proto.CompactTextString(m) } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{53} -} -func (m *StatusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusResponse.Merge(m, src) -} -func (m *StatusResponse) XXX_Size() int { - return m.Size() -} -func (m *StatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"` + // raftAppliedIndex is the current raft applied index of the responding member. + RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"` + // errors contains alarm/health information and status. + Errors []string `protobuf:"bytes,8,rep,name=errors" json:"errors,omitempty"` + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"` + // isLearner indicates if the member is raft learner. + IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } func (m *StatusResponse) GetHeader() *ResponseHeader { if m != nil { @@ -4256,124 +2681,59 @@ func (m *StatusResponse) GetRaftTerm() uint64 { return 0 } -type AuthEnableRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } -func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthEnableRequest) ProtoMessage() {} -func (*AuthEnableRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{54} -} -func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *StatusResponse) GetRaftAppliedIndex() uint64 { + if m != nil { + return m.RaftAppliedIndex } -} -func (m *AuthEnableRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthEnableRequest.Merge(m, src) -} -func (m *AuthEnableRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthEnableRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m) + return 0 } -var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo +func (m *StatusResponse) GetErrors() []string { + if m != nil { + return m.Errors + } + return nil +} -type AuthDisableRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } -func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } -func (*AuthDisableRequest) ProtoMessage() {} -func (*AuthDisableRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{55} -} -func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *StatusResponse) GetDbSizeInUse() int64 { + if m != nil { + return m.DbSizeInUse } + return 0 } -func (m *AuthDisableRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthDisableRequest.Merge(m, src) + +func (m *StatusResponse) GetIsLearner() bool { + if m != nil { + return m.IsLearner + } + return false } -func (m *AuthDisableRequest) XXX_Size() int { - return m.Size() + +type AuthEnableRequest struct { } -func (m *AuthDisableRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m) + +func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } +func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthEnableRequest) ProtoMessage() {} +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } + +type AuthDisableRequest struct { } -var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo +func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } +func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } +func (*AuthDisableRequest) ProtoMessage() {} +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } type AuthenticateRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } -func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } -func (*AuthenticateRequest) ProtoMessage() {} -func (*AuthenticateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{56} -} -func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthenticateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthenticateRequest.Merge(m, src) -} -func (m *AuthenticateRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthenticateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` } -var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo +func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } +func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } +func (*AuthenticateRequest) ProtoMessage() {} +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } func (m *AuthenticateRequest) GetName() string { if m != nil { @@ -4390,45 +2750,15 @@ func (m *AuthenticateRequest) GetPassword() string { } type AuthUserAddRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } -func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddRequest) ProtoMessage() {} -func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{57} -} -func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserAddRequest.Merge(m, src) -} -func (m *AuthUserAddRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserAddRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Options *authpb.UserAddOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` } -var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo +func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } +func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddRequest) ProtoMessage() {} +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } func (m *AuthUserAddRequest) GetName() string { if m != nil { @@ -4444,45 +2774,21 @@ func (m *AuthUserAddRequest) GetPassword() string { return "" } -type AuthUserGetRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } -func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetRequest) ProtoMessage() {} -func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{58} -} -func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *AuthUserAddRequest) GetOptions() *authpb.UserAddOptions { + if m != nil { + return m.Options } + return nil } -func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGetRequest.Merge(m, src) -} -func (m *AuthUserGetRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m) + +type AuthUserGetRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo +func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } +func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetRequest) ProtoMessage() {} +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } func (m *AuthUserGetRequest) GetName() string { if m != nil { @@ -4493,44 +2799,13 @@ func (m *AuthUserGetRequest) GetName() string { type AuthUserDeleteRequest struct { // name is the name of the user to delete. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } -func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteRequest) ProtoMessage() {} -func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{59} -} -func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src) -} -func (m *AuthUserDeleteRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo +func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } +func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteRequest) ProtoMessage() {} +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } func (m *AuthUserDeleteRequest) GetName() string { if m != nil { @@ -4543,45 +2818,16 @@ type AuthUserChangePasswordRequest struct { // name is the name of the user whose password is being changed. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // password is the new password for the user. - Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` } func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} } func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserChangePasswordRequest) ProtoMessage() {} func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{60} -} -func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src) -} -func (m *AuthUserChangePasswordRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m) + return fileDescriptorRpc, []int{65} } -var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo - func (m *AuthUserChangePasswordRequest) GetName() string { if m != nil { return m.Name @@ -4600,44 +2846,13 @@ type AuthUserGrantRoleRequest struct { // user is the name of the user which should be granted a given role. User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` // role is the name of the role to grant to the user. - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } -func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleRequest) ProtoMessage() {} -func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{61} -} -func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src) -} -func (m *AuthUserGrantRoleRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m) + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` } -var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo +func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } +func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleRequest) ProtoMessage() {} +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } func (m *AuthUserGrantRoleRequest) GetUser() string { if m != nil { @@ -4654,45 +2869,14 @@ func (m *AuthUserGrantRoleRequest) GetRole() string { } type AuthUserRevokeRoleRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } -func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleRequest) ProtoMessage() {} -func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{62} -} -func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src) -} -func (m *AuthUserRevokeRoleRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` } -var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo +func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } +func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleRequest) ProtoMessage() {} +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } func (m *AuthUserRevokeRoleRequest) GetName() string { if m != nil { @@ -4710,44 +2894,13 @@ func (m *AuthUserRevokeRoleRequest) GetRole() string { type AuthRoleAddRequest struct { // name is the name of the role to add to the authentication system. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } -func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddRequest) ProtoMessage() {} -func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{63} -} -func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleAddRequest.Merge(m, src) -} -func (m *AuthRoleAddRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleAddRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo +func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } +func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddRequest) ProtoMessage() {} +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{68} } func (m *AuthRoleAddRequest) GetName() string { if m != nil { @@ -4757,44 +2910,13 @@ func (m *AuthRoleAddRequest) GetName() string { } type AuthRoleGetRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } -func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetRequest) ProtoMessage() {} -func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{64} -} -func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGetRequest.Merge(m, src) -} -func (m *AuthRoleGetRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m) + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` } -var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo +func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } +func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetRequest) ProtoMessage() {} +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } func (m *AuthRoleGetRequest) GetRole() string { if m != nil { @@ -4804,122 +2926,29 @@ func (m *AuthRoleGetRequest) GetRole() string { } type AuthUserListRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } -func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthUserListRequest) ProtoMessage() {} -func (*AuthUserListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{65} -} -func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserListRequest.Merge(m, src) -} -func (m *AuthUserListRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthUserListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m) } -var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo +func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } +func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthUserListRequest) ProtoMessage() {} +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } type AuthRoleListRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } -func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListRequest) ProtoMessage() {} -func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{66} -} -func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleListRequest.Merge(m, src) -} -func (m *AuthRoleListRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m) } -var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo +func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } +func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListRequest) ProtoMessage() {} +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } type AuthRoleDeleteRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } -func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteRequest) ProtoMessage() {} -func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{67} -} -func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src) -} -func (m *AuthRoleDeleteRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m) + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` } -var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo +func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } +func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteRequest) ProtoMessage() {} +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } func (m *AuthRoleDeleteRequest) GetRole() string { if m != nil { @@ -4932,44 +2961,15 @@ type AuthRoleGrantPermissionRequest struct { // name is the name of the role which will be granted the permission. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // perm is the permission to grant to the role. - Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"` } func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} } func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{68} -} -func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } + return fileDescriptorRpc, []int{73} } -func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src) -} -func (m *AuthRoleGrantPermissionRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo func (m *AuthRoleGrantPermissionRequest) GetName() string { if m != nil { @@ -4986,47 +2986,18 @@ func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { } type AuthRoleRevokePermissionRequest struct { - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` } func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} } func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{69} -} -func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src) -} -func (m *AuthRoleRevokePermissionRequest) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m) + return fileDescriptorRpc, []int{74} } -var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo - func (m *AuthRoleRevokePermissionRequest) GetRole() string { if m != nil { return m.Role @@ -5034,59 +3005,28 @@ func (m *AuthRoleRevokePermissionRequest) GetRole() string { return "" } -func (m *AuthRoleRevokePermissionRequest) GetKey() string { +func (m *AuthRoleRevokePermissionRequest) GetKey() []byte { if m != nil { return m.Key } - return "" + return nil } -func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte { if m != nil { return m.RangeEnd } - return "" + return nil } type AuthEnableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } -func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthEnableResponse) ProtoMessage() {} -func (*AuthEnableResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{70} -} -func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthEnableResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthEnableResponse.Merge(m, src) -} -func (m *AuthEnableResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthEnableResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo +func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } +func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthEnableResponse) ProtoMessage() {} +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } func (m *AuthEnableResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5096,44 +3036,13 @@ func (m *AuthEnableResponse) GetHeader() *ResponseHeader { } type AuthDisableResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } -func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } -func (*AuthDisableResponse) ProtoMessage() {} -func (*AuthDisableResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{71} -} -func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthDisableResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthDisableResponse.Merge(m, src) -} -func (m *AuthDisableResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthDisableResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo +func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } +func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } +func (*AuthDisableResponse) ProtoMessage() {} +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} } func (m *AuthDisableResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5143,46 +3052,15 @@ func (m *AuthDisableResponse) GetHeader() *ResponseHeader { } type AuthenticateResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // token is an authorized token that can be used in succeeding RPCs - Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } -func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } -func (*AuthenticateResponse) ProtoMessage() {} -func (*AuthenticateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72} -} -func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthenticateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthenticateResponse.Merge(m, src) -} -func (m *AuthenticateResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthenticateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m) + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` } -var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo +func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } +func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } +func (*AuthenticateResponse) ProtoMessage() {} +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} } func (m *AuthenticateResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5199,44 +3077,13 @@ func (m *AuthenticateResponse) GetToken() string { } type AuthUserAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } -func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserAddResponse) ProtoMessage() {} -func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{73} -} -func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserAddResponse.Merge(m, src) -} -func (m *AuthUserAddResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserAddResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo +func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } +func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserAddResponse) ProtoMessage() {} +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{78} } func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5246,45 +3093,14 @@ func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { } type AuthUserGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } -func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGetResponse) ProtoMessage() {} -func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{74} -} -func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGetResponse.Merge(m, src) -} -func (m *AuthUserGetResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` } -var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo +func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } +func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGetResponse) ProtoMessage() {} +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{79} } func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5301,44 +3117,13 @@ func (m *AuthUserGetResponse) GetRoles() []string { } type AuthUserDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } -func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserDeleteResponse) ProtoMessage() {} -func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{75} -} -func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src) -} -func (m *AuthUserDeleteResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo +func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } +func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserDeleteResponse) ProtoMessage() {} +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{80} } func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5348,44 +3133,15 @@ func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { } type AuthUserChangePasswordResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} } func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserChangePasswordResponse) ProtoMessage() {} func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{76} -} -func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src) -} -func (m *AuthUserChangePasswordResponse) XXX_Size() int { - return m.Size() + return fileDescriptorRpc, []int{81} } -func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5395,44 +3151,13 @@ func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { } type AuthUserGrantRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } -func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserGrantRoleResponse) ProtoMessage() {} -func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{77} -} -func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src) -} -func (m *AuthUserGrantRoleResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo +func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } +func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserGrantRoleResponse) ProtoMessage() {} +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{82} } func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5442,44 +3167,13 @@ func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { } type AuthUserRevokeRoleResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } -func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserRevokeRoleResponse) ProtoMessage() {} -func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{78} -} -func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src) -} -func (m *AuthUserRevokeRoleResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo +func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } +func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserRevokeRoleResponse) ProtoMessage() {} +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{83} } func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5489,44 +3183,13 @@ func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { } type AuthRoleAddResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } -func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleAddResponse) ProtoMessage() {} -func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{79} -} -func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleAddResponse.Merge(m, src) -} -func (m *AuthRoleAddResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleAddResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo +func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } +func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleAddResponse) ProtoMessage() {} +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{84} } func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5536,45 +3199,14 @@ func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { } type AuthRoleGetResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } -func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleGetResponse) ProtoMessage() {} -func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{80} -} -func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGetResponse.Merge(m, src) -} -func (m *AuthRoleGetResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"` } -var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo +func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } +func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleGetResponse) ProtoMessage() {} +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{85} } func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5591,45 +3223,14 @@ func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission { } type AuthRoleListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } -func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleListResponse) ProtoMessage() {} -func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{81} -} -func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleListResponse.Merge(m, src) -} -func (m *AuthRoleListResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"` } -var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo +func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } +func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleListResponse) ProtoMessage() {} +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{86} } func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5646,45 +3247,14 @@ func (m *AuthRoleListResponse) GetRoles() []string { } type AuthUserListResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Users []string `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } -func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } -func (*AuthUserListResponse) ProtoMessage() {} -func (*AuthUserListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{82} -} -func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthUserListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthUserListResponse.Merge(m, src) -} -func (m *AuthUserListResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthUserListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` } -var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo +func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } +func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } +func (*AuthUserListResponse) ProtoMessage() {} +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{87} } func (m *AuthUserListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5701,44 +3271,13 @@ func (m *AuthUserListResponse) GetUsers() []string { } type AuthRoleDeleteResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } -func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*AuthRoleDeleteResponse) ProtoMessage() {} -func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{83} -} -func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src) -} -func (m *AuthRoleDeleteResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m) + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } -var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo +func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } +func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*AuthRoleDeleteResponse) ProtoMessage() {} +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{88} } func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5748,45 +3287,16 @@ func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { } type AuthRoleGrantPermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} } func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{84} -} -func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src) -} -func (m *AuthRoleGrantPermissionResponse) XXX_Size() int { - return m.Size() -} -func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m) + return fileDescriptorRpc, []int{89} } -var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo - func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { if m != nil { return m.Header @@ -5795,44 +3305,15 @@ func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { } type AuthRoleRevokePermissionResponse struct { - Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} } func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{85} -} -func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src) -} -func (m *AuthRoleRevokePermissionResponse) XXX_Size() int { - return m.Size() + return fileDescriptorRpc, []int{90} } -func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { if m != nil { @@ -5842,13 +3323,6 @@ func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { } func init() { - proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) - proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) - proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) - proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) - proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader") proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest") proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse") @@ -5878,6 +3352,9 @@ func init() { proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse") proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest") proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse") + proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint") + proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest") + proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse") proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest") proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse") proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest") @@ -5894,6 +3371,8 @@ func init() { proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse") proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest") proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") + proto.RegisterType((*MemberPromoteRequest)(nil), "etcdserverpb.MemberPromoteRequest") + proto.RegisterType((*MemberPromoteResponse)(nil), "etcdserverpb.MemberPromoteResponse") proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") @@ -5935,244 +3414,13 @@ func init() { proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse") proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse") proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse") -} - -func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } - -var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 3711 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0x47, - 0x76, 0xe6, 0x00, 0x04, 0x40, 0x1c, 0x5c, 0x08, 0x35, 0x29, 0x09, 0x84, 0x24, 0x8a, 0x6a, 0xdd, - 0xa8, 0x8b, 0x09, 0x9b, 0x76, 0xf2, 0xa0, 0xa4, 0x5c, 0xa6, 0x48, 0x58, 0xa4, 0x49, 0x91, 0xf4, - 0x10, 0x94, 0x9d, 0x2a, 0x27, 0xac, 0x21, 0xd0, 0x22, 0x11, 0x02, 0x33, 0xc8, 0xcc, 0x00, 0x22, - 0x15, 0x57, 0x52, 0xe5, 0x38, 0xae, 0x3c, 0xc7, 0x55, 0xa9, 0x24, 0xaf, 0x5b, 0x5b, 0x2e, 0xff, - 0x82, 0xfd, 0x0b, 0x5b, 0xfb, 0xb2, 0xbb, 0xb5, 0x7f, 0x60, 0xcb, 0xbb, 0x2f, 0xfb, 0x0b, 0xf6, - 0xf2, 0xb4, 0xd5, 0xb7, 0x99, 0x9e, 0x1b, 0x48, 0x1b, 0xb6, 0x5f, 0xa8, 0xe9, 0xd3, 0xa7, 0xcf, - 0x39, 0x7d, 0xba, 0xcf, 0x39, 0xdd, 0x5f, 0x43, 0x90, 0xb7, 0xfb, 0xad, 0xa5, 0xbe, 0x6d, 0xb9, - 0x16, 0x2a, 0x12, 0xb7, 0xd5, 0x76, 0x88, 0x3d, 0x24, 0x76, 0xff, 0xb0, 0x36, 0x7b, 0x64, 0x1d, - 0x59, 0xac, 0xa3, 0x4e, 0xbf, 0x38, 0x4f, 0x6d, 0x8e, 0xf2, 0xd4, 0x7b, 0xc3, 0x56, 0x8b, 0xfd, - 0xe9, 0x1f, 0xd6, 0x4f, 0x86, 0xa2, 0xeb, 0x1a, 0xeb, 0x32, 0x06, 0xee, 0x31, 0xfb, 0xd3, 0x3f, - 0x64, 0xff, 0x88, 0xce, 0xeb, 0x47, 0x96, 0x75, 0xd4, 0x25, 0x75, 0xa3, 0xdf, 0xa9, 0x1b, 0xa6, - 0x69, 0xb9, 0x86, 0xdb, 0xb1, 0x4c, 0x87, 0xf7, 0xe2, 0xff, 0xd4, 0xa0, 0xac, 0x13, 0xa7, 0x6f, - 0x99, 0x0e, 0x59, 0x27, 0x46, 0x9b, 0xd8, 0xe8, 0x06, 0x40, 0xab, 0x3b, 0x70, 0x5c, 0x62, 0x1f, - 0x74, 0xda, 0x55, 0x6d, 0x41, 0x5b, 0x9c, 0xd4, 0xf3, 0x82, 0xb2, 0xd1, 0x46, 0xd7, 0x20, 0xdf, - 0x23, 0xbd, 0x43, 0xde, 0x9b, 0x62, 0xbd, 0x53, 0x9c, 0xb0, 0xd1, 0x46, 0x35, 0x98, 0xb2, 0xc9, - 0xb0, 0xe3, 0x74, 0x2c, 0xb3, 0x9a, 0x5e, 0xd0, 0x16, 0xd3, 0xba, 0xd7, 0xa6, 0x03, 0x6d, 0xe3, - 0xa5, 0x7b, 0xe0, 0x12, 0xbb, 0x57, 0x9d, 0xe4, 0x03, 0x29, 0xa1, 0x49, 0xec, 0x1e, 0xfe, 0x3c, - 0x03, 0x45, 0xdd, 0x30, 0x8f, 0x88, 0x4e, 0xfe, 0x65, 0x40, 0x1c, 0x17, 0x55, 0x20, 0x7d, 0x42, - 0xce, 0x98, 0xfa, 0xa2, 0x4e, 0x3f, 0xf9, 0x78, 0xf3, 0x88, 0x1c, 0x10, 0x93, 0x2b, 0x2e, 0xd2, - 0xf1, 0xe6, 0x11, 0x69, 0x98, 0x6d, 0x34, 0x0b, 0x99, 0x6e, 0xa7, 0xd7, 0x71, 0x85, 0x56, 0xde, - 0x08, 0x98, 0x33, 0x19, 0x32, 0x67, 0x15, 0xc0, 0xb1, 0x6c, 0xf7, 0xc0, 0xb2, 0xdb, 0xc4, 0xae, - 0x66, 0x16, 0xb4, 0xc5, 0xf2, 0xf2, 0x9d, 0x25, 0x75, 0x21, 0x96, 0x54, 0x83, 0x96, 0xf6, 0x2c, - 0xdb, 0xdd, 0xa1, 0xbc, 0x7a, 0xde, 0x91, 0x9f, 0xe8, 0x7d, 0x28, 0x30, 0x21, 0xae, 0x61, 0x1f, - 0x11, 0xb7, 0x9a, 0x65, 0x52, 0xee, 0x9e, 0x23, 0xa5, 0xc9, 0x98, 0x75, 0xa6, 0x9e, 0x7f, 0x23, - 0x0c, 0x45, 0x87, 0xd8, 0x1d, 0xa3, 0xdb, 0x79, 0x6d, 0x1c, 0x76, 0x49, 0x35, 0xb7, 0xa0, 0x2d, - 0x4e, 0xe9, 0x01, 0x1a, 0x9d, 0xff, 0x09, 0x39, 0x73, 0x0e, 0x2c, 0xb3, 0x7b, 0x56, 0x9d, 0x62, - 0x0c, 0x53, 0x94, 0xb0, 0x63, 0x76, 0xcf, 0xd8, 0xa2, 0x59, 0x03, 0xd3, 0xe5, 0xbd, 0x79, 0xd6, - 0x9b, 0x67, 0x14, 0xd6, 0xbd, 0x08, 0x95, 0x5e, 0xc7, 0x3c, 0xe8, 0x59, 0xed, 0x03, 0xcf, 0x21, - 0xc0, 0x1c, 0x52, 0xee, 0x75, 0xcc, 0xe7, 0x56, 0x5b, 0x97, 0x6e, 0xa1, 0x9c, 0xc6, 0x69, 0x90, - 0xb3, 0x20, 0x38, 0x8d, 0x53, 0x95, 0x73, 0x09, 0x66, 0xa8, 0xcc, 0x96, 0x4d, 0x0c, 0x97, 0xf8, - 0xcc, 0x45, 0xc6, 0x7c, 0xa9, 0xd7, 0x31, 0x57, 0x59, 0x4f, 0x80, 0xdf, 0x38, 0x8d, 0xf0, 0x97, - 0x04, 0xbf, 0x71, 0x1a, 0xe4, 0xc7, 0x4b, 0x90, 0xf7, 0x7c, 0x8e, 0xa6, 0x60, 0x72, 0x7b, 0x67, - 0xbb, 0x51, 0x99, 0x40, 0x00, 0xd9, 0x95, 0xbd, 0xd5, 0xc6, 0xf6, 0x5a, 0x45, 0x43, 0x05, 0xc8, - 0xad, 0x35, 0x78, 0x23, 0x85, 0x9f, 0x02, 0xf8, 0xde, 0x45, 0x39, 0x48, 0x6f, 0x36, 0xfe, 0xa1, - 0x32, 0x41, 0x79, 0x5e, 0x34, 0xf4, 0xbd, 0x8d, 0x9d, 0xed, 0x8a, 0x46, 0x07, 0xaf, 0xea, 0x8d, - 0x95, 0x66, 0xa3, 0x92, 0xa2, 0x1c, 0xcf, 0x77, 0xd6, 0x2a, 0x69, 0x94, 0x87, 0xcc, 0x8b, 0x95, - 0xad, 0xfd, 0x46, 0x65, 0x12, 0x7f, 0xa9, 0x41, 0x49, 0xac, 0x17, 0x8f, 0x09, 0xf4, 0x0e, 0x64, - 0x8f, 0x59, 0x5c, 0xb0, 0xad, 0x58, 0x58, 0xbe, 0x1e, 0x5a, 0xdc, 0x40, 0xec, 0xe8, 0x82, 0x17, - 0x61, 0x48, 0x9f, 0x0c, 0x9d, 0x6a, 0x6a, 0x21, 0xbd, 0x58, 0x58, 0xae, 0x2c, 0xf1, 0x80, 0x5d, - 0xda, 0x24, 0x67, 0x2f, 0x8c, 0xee, 0x80, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0xec, 0x59, 0x36, 0x61, - 0x3b, 0x76, 0x4a, 0x67, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0xd7, 0x1a, - 0xc0, 0xee, 0xc0, 0x4d, 0x0e, 0x8d, 0x59, 0xc8, 0x0c, 0xa9, 0x60, 0x11, 0x16, 0xbc, 0xc1, 0x62, - 0x82, 0x18, 0x0e, 0xf1, 0x62, 0x82, 0x36, 0xd0, 0x55, 0xc8, 0xf5, 0x6d, 0x32, 0x3c, 0x38, 0x19, - 0x32, 0x25, 0x53, 0x7a, 0x96, 0x36, 0x37, 0x87, 0xe8, 0x16, 0x14, 0x3b, 0x47, 0xa6, 0x65, 0x93, - 0x03, 0x2e, 0x2b, 0xc3, 0x7a, 0x0b, 0x9c, 0xc6, 0xec, 0x56, 0x58, 0xb8, 0xe0, 0xac, 0xca, 0xb2, - 0x45, 0x49, 0xd8, 0x84, 0x02, 0x33, 0x75, 0x2c, 0xf7, 0x3d, 0xf0, 0x6d, 0x4c, 0xb1, 0x61, 0x51, - 0x17, 0x0a, 0xab, 0xf1, 0x27, 0x80, 0xd6, 0x48, 0x97, 0xb8, 0x64, 0x9c, 0xec, 0xa1, 0xf8, 0x24, - 0xad, 0xfa, 0x04, 0xff, 0xb7, 0x06, 0x33, 0x01, 0xf1, 0x63, 0x4d, 0xab, 0x0a, 0xb9, 0x36, 0x13, - 0xc6, 0x2d, 0x48, 0xeb, 0xb2, 0x89, 0x1e, 0xc1, 0x94, 0x30, 0xc0, 0xa9, 0xa6, 0x13, 0x36, 0x4d, - 0x8e, 0xdb, 0xe4, 0xe0, 0xaf, 0x53, 0x90, 0x17, 0x13, 0xdd, 0xe9, 0xa3, 0x15, 0x28, 0xd9, 0xbc, - 0x71, 0xc0, 0xe6, 0x23, 0x2c, 0xaa, 0x25, 0x27, 0xa1, 0xf5, 0x09, 0xbd, 0x28, 0x86, 0x30, 0x32, - 0xfa, 0x3b, 0x28, 0x48, 0x11, 0xfd, 0x81, 0x2b, 0x5c, 0x5e, 0x0d, 0x0a, 0xf0, 0xf7, 0xdf, 0xfa, - 0x84, 0x0e, 0x82, 0x7d, 0x77, 0xe0, 0xa2, 0x26, 0xcc, 0xca, 0xc1, 0x7c, 0x36, 0xc2, 0x8c, 0x34, - 0x93, 0xb2, 0x10, 0x94, 0x12, 0x5d, 0xaa, 0xf5, 0x09, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0x6a, 0x92, - 0x7b, 0xca, 0x93, 0x77, 0xc4, 0xa4, 0xe6, 0xa9, 0x19, 0x35, 0xa9, 0x79, 0x6a, 0x3e, 0xcd, 0x43, - 0x4e, 0xb4, 0xf0, 0xcf, 0x52, 0x00, 0x72, 0x35, 0x76, 0xfa, 0x68, 0x0d, 0xca, 0xb6, 0x68, 0x05, - 0xbc, 0x75, 0x2d, 0xd6, 0x5b, 0x62, 0x11, 0x27, 0xf4, 0x92, 0x1c, 0xc4, 0x8d, 0x7b, 0x17, 0x8a, - 0x9e, 0x14, 0xdf, 0x61, 0x73, 0x31, 0x0e, 0xf3, 0x24, 0x14, 0xe4, 0x00, 0xea, 0xb2, 0x8f, 0xe0, - 0xb2, 0x37, 0x3e, 0xc6, 0x67, 0xb7, 0x46, 0xf8, 0xcc, 0x13, 0x38, 0x23, 0x25, 0xa8, 0x5e, 0x53, - 0x0d, 0xf3, 0xdd, 0x36, 0x17, 0xe3, 0xb6, 0xa8, 0x61, 0xd4, 0x71, 0x40, 0xeb, 0x25, 0x6f, 0xe2, - 0x3f, 0xa4, 0x21, 0xb7, 0x6a, 0xf5, 0xfa, 0x86, 0x4d, 0x57, 0x23, 0x6b, 0x13, 0x67, 0xd0, 0x75, - 0x99, 0xbb, 0xca, 0xcb, 0xb7, 0x83, 0x12, 0x05, 0x9b, 0xfc, 0x57, 0x67, 0xac, 0xba, 0x18, 0x42, - 0x07, 0x8b, 0xf2, 0x98, 0xba, 0xc0, 0x60, 0x51, 0x1c, 0xc5, 0x10, 0x19, 0xc8, 0x69, 0x3f, 0x90, - 0x6b, 0x90, 0x1b, 0x12, 0xdb, 0x2f, 0xe9, 0xeb, 0x13, 0xba, 0x24, 0xa0, 0x07, 0x30, 0x1d, 0x2e, - 0x2f, 0x19, 0xc1, 0x53, 0x6e, 0x05, 0xab, 0xd1, 0x6d, 0x28, 0x06, 0x6a, 0x5c, 0x56, 0xf0, 0x15, - 0x7a, 0x4a, 0x89, 0xbb, 0x22, 0xf3, 0x2a, 0xad, 0xc7, 0xc5, 0xf5, 0x09, 0x99, 0x59, 0xaf, 0xc8, - 0xcc, 0x3a, 0x25, 0x46, 0x89, 0xdc, 0x1a, 0x48, 0x32, 0xef, 0x05, 0x93, 0x0c, 0x7e, 0x0f, 0x4a, - 0x01, 0x07, 0xd1, 0xba, 0xd3, 0xf8, 0x70, 0x7f, 0x65, 0x8b, 0x17, 0xa9, 0x67, 0xac, 0x2e, 0xe9, - 0x15, 0x8d, 0xd6, 0xba, 0xad, 0xc6, 0xde, 0x5e, 0x25, 0x85, 0x4a, 0x90, 0xdf, 0xde, 0x69, 0x1e, - 0x70, 0xae, 0x34, 0x7e, 0xe6, 0x49, 0x10, 0x45, 0x4e, 0xa9, 0x6d, 0x13, 0x4a, 0x6d, 0xd3, 0x64, - 0x6d, 0x4b, 0xf9, 0xb5, 0x8d, 0x95, 0xb9, 0xad, 0xc6, 0xca, 0x5e, 0xa3, 0x32, 0xf9, 0xb4, 0x0c, - 0x45, 0xee, 0xdf, 0x83, 0x81, 0x49, 0x4b, 0xed, 0x4f, 0x34, 0x00, 0x3f, 0x9a, 0x50, 0x1d, 0x72, - 0x2d, 0xae, 0xa7, 0xaa, 0xb1, 0x64, 0x74, 0x39, 0x76, 0xc9, 0x74, 0xc9, 0x85, 0xde, 0x82, 0x9c, - 0x33, 0x68, 0xb5, 0x88, 0x23, 0x4b, 0xde, 0xd5, 0x70, 0x3e, 0x14, 0xd9, 0x4a, 0x97, 0x7c, 0x74, - 0xc8, 0x4b, 0xa3, 0xd3, 0x1d, 0xb0, 0x02, 0x38, 0x7a, 0x88, 0xe0, 0xc3, 0xff, 0xa7, 0x41, 0x41, - 0xd9, 0xbc, 0xdf, 0x31, 0x09, 0x5f, 0x87, 0x3c, 0xb3, 0x81, 0xb4, 0x45, 0x1a, 0x9e, 0xd2, 0x7d, - 0x02, 0xfa, 0x5b, 0xc8, 0xcb, 0x08, 0x90, 0x99, 0xb8, 0x1a, 0x2f, 0x76, 0xa7, 0xaf, 0xfb, 0xac, - 0x78, 0x13, 0x2e, 0x31, 0xaf, 0xb4, 0xe8, 0xe1, 0x5a, 0xfa, 0x51, 0x3d, 0x7e, 0x6a, 0xa1, 0xe3, - 0x67, 0x0d, 0xa6, 0xfa, 0xc7, 0x67, 0x4e, 0xa7, 0x65, 0x74, 0x85, 0x15, 0x5e, 0x1b, 0x7f, 0x00, - 0x48, 0x15, 0x36, 0xce, 0x74, 0x71, 0x09, 0x0a, 0xeb, 0x86, 0x73, 0x2c, 0x4c, 0xc2, 0x8f, 0xa0, - 0x44, 0x9b, 0x9b, 0x2f, 0x2e, 0x60, 0x23, 0xbb, 0x1c, 0x48, 0xee, 0xb1, 0x7c, 0x8e, 0x60, 0xf2, - 0xd8, 0x70, 0x8e, 0xd9, 0x44, 0x4b, 0x3a, 0xfb, 0x46, 0x0f, 0xa0, 0xd2, 0xe2, 0x93, 0x3c, 0x08, - 0x5d, 0x19, 0xa6, 0x05, 0xdd, 0x3b, 0x09, 0x7e, 0x0c, 0x45, 0x3e, 0x87, 0xef, 0xdb, 0x08, 0x7c, - 0x09, 0xa6, 0xf7, 0x4c, 0xa3, 0xef, 0x1c, 0x5b, 0xb2, 0xba, 0xd1, 0x49, 0x57, 0x7c, 0xda, 0x58, - 0x1a, 0xef, 0xc3, 0xb4, 0x4d, 0x7a, 0x46, 0xc7, 0xec, 0x98, 0x47, 0x07, 0x87, 0x67, 0x2e, 0x71, - 0xc4, 0x85, 0xa9, 0xec, 0x91, 0x9f, 0x52, 0x2a, 0x35, 0xed, 0xb0, 0x6b, 0x1d, 0x8a, 0x34, 0xc7, - 0xbe, 0xf1, 0x17, 0x29, 0x28, 0x7e, 0x64, 0xb8, 0x2d, 0xb9, 0x74, 0x68, 0x03, 0xca, 0x5e, 0x72, - 0x63, 0x14, 0x61, 0x4b, 0xa8, 0xc4, 0xb2, 0x31, 0xf2, 0x28, 0x2d, 0xab, 0x63, 0xa9, 0xa5, 0x12, - 0x98, 0x28, 0xc3, 0x6c, 0x91, 0xae, 0x27, 0x2a, 0x95, 0x2c, 0x8a, 0x31, 0xaa, 0xa2, 0x54, 0x02, - 0xda, 0x81, 0x4a, 0xdf, 0xb6, 0x8e, 0x6c, 0xe2, 0x38, 0x9e, 0x30, 0x5e, 0xc6, 0x70, 0x8c, 0xb0, - 0x5d, 0xc1, 0xea, 0x8b, 0x9b, 0xee, 0x07, 0x49, 0x4f, 0xa7, 0xfd, 0xf3, 0x0c, 0x4f, 0x4e, 0xbf, - 0x4e, 0x01, 0x8a, 0x4e, 0xea, 0xdb, 0x1e, 0xf1, 0xee, 0x42, 0xd9, 0x71, 0x0d, 0x3b, 0xb2, 0xd9, - 0x4a, 0x8c, 0xea, 0x65, 0xfc, 0xfb, 0xe0, 0x19, 0x74, 0x60, 0x5a, 0x6e, 0xe7, 0xe5, 0x99, 0x38, - 0x25, 0x97, 0x25, 0x79, 0x9b, 0x51, 0x51, 0x03, 0x72, 0x2f, 0x3b, 0x5d, 0x97, 0xd8, 0x4e, 0x35, - 0xb3, 0x90, 0x5e, 0x2c, 0x2f, 0x3f, 0x3a, 0x6f, 0x19, 0x96, 0xde, 0x67, 0xfc, 0xcd, 0xb3, 0x3e, - 0xd1, 0xe5, 0x58, 0xf5, 0xe4, 0x99, 0x0d, 0x9c, 0xc6, 0xe7, 0x60, 0xea, 0x15, 0x15, 0x41, 0x6f, - 0xd9, 0x39, 0x7e, 0x58, 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0x69, 0x1b, 0x47, 0x3d, 0x62, 0xba, 0xf2, - 0x1e, 0x28, 0xdb, 0xf8, 0x2e, 0x80, 0xaf, 0x86, 0xa6, 0xfc, 0xed, 0x9d, 0xdd, 0xfd, 0x66, 0x65, - 0x02, 0x15, 0x61, 0x6a, 0x7b, 0x67, 0xad, 0xb1, 0xd5, 0xa0, 0xf5, 0x01, 0xd7, 0xa5, 0x4b, 0x03, - 0x6b, 0xa9, 0xea, 0xd4, 0x02, 0x3a, 0xf1, 0x15, 0x98, 0x8d, 0x5b, 0x40, 0x7a, 0x16, 0x2d, 0x89, - 0x5d, 0x3a, 0x56, 0xa8, 0xa8, 0xaa, 0x53, 0xc1, 0xe9, 0x56, 0x21, 0xc7, 0x77, 0x6f, 0x5b, 0x1c, - 0xce, 0x65, 0x93, 0x3a, 0x82, 0x6f, 0x46, 0xd2, 0x16, 0xab, 0xe4, 0xb5, 0x63, 0xd3, 0x4b, 0x26, - 0x36, 0xbd, 0xa0, 0xdb, 0x50, 0xf2, 0xa2, 0xc1, 0x70, 0xc4, 0x59, 0x20, 0xaf, 0x17, 0xe5, 0x46, - 0xa7, 0xb4, 0x80, 0xd3, 0x73, 0x41, 0xa7, 0xa3, 0xbb, 0x90, 0x25, 0x43, 0x62, 0xba, 0x4e, 0xb5, - 0xc0, 0x2a, 0x46, 0x49, 0x9e, 0xdd, 0x1b, 0x94, 0xaa, 0x8b, 0x4e, 0xfc, 0x37, 0x70, 0x89, 0xdd, - 0x91, 0x9e, 0xd9, 0x86, 0xa9, 0x5e, 0xe6, 0x9a, 0xcd, 0x2d, 0xe1, 0x6e, 0xfa, 0x89, 0xca, 0x90, - 0xda, 0x58, 0x13, 0x4e, 0x48, 0x6d, 0xac, 0xe1, 0xcf, 0x34, 0x40, 0xea, 0xb8, 0xb1, 0xfc, 0x1c, - 0x12, 0x2e, 0xd5, 0xa7, 0x7d, 0xf5, 0xb3, 0x90, 0x21, 0xb6, 0x6d, 0xd9, 0xcc, 0xa3, 0x79, 0x9d, - 0x37, 0xf0, 0x1d, 0x61, 0x83, 0x4e, 0x86, 0xd6, 0x89, 0x17, 0x83, 0x5c, 0x9a, 0xe6, 0x99, 0xba, - 0x09, 0x33, 0x01, 0xae, 0xb1, 0x2a, 0xd7, 0x7d, 0xb8, 0xcc, 0x84, 0x6d, 0x12, 0xd2, 0x5f, 0xe9, - 0x76, 0x86, 0x89, 0x5a, 0xfb, 0x70, 0x25, 0xcc, 0xf8, 0xc3, 0xfa, 0x08, 0xff, 0xbd, 0xd0, 0xd8, - 0xec, 0xf4, 0x48, 0xd3, 0xda, 0x4a, 0xb6, 0x8d, 0x66, 0xf6, 0x13, 0x72, 0xe6, 0x88, 0x12, 0xcf, - 0xbe, 0xf1, 0x4f, 0x35, 0xb8, 0x1a, 0x19, 0xfe, 0x03, 0xaf, 0xea, 0x3c, 0xc0, 0x11, 0xdd, 0x3e, - 0xa4, 0x4d, 0x3b, 0x38, 0xba, 0xa0, 0x50, 0x3c, 0x3b, 0x69, 0x2e, 0x2b, 0x0a, 0x3b, 0x67, 0xc5, - 0x9a, 0xb3, 0x3f, 0x5e, 0xc4, 0xdf, 0x80, 0x02, 0x23, 0xec, 0xb9, 0x86, 0x3b, 0x70, 0x22, 0x8b, - 0xf1, 0x6f, 0x62, 0x0b, 0xc8, 0x41, 0x63, 0xcd, 0xeb, 0x2d, 0xc8, 0xb2, 0x83, 0xb5, 0x3c, 0x56, - 0x86, 0x6e, 0x32, 0x8a, 0x1d, 0xba, 0x60, 0xc4, 0xc7, 0x90, 0x7d, 0xce, 0xd0, 0x48, 0xc5, 0xb2, - 0x49, 0xb9, 0x14, 0xa6, 0xd1, 0xe3, 0x18, 0x49, 0x5e, 0x67, 0xdf, 0xec, 0x14, 0x46, 0x88, 0xbd, - 0xaf, 0x6f, 0xf1, 0xd3, 0x5e, 0x5e, 0xf7, 0xda, 0xd4, 0x65, 0xad, 0x6e, 0x87, 0x98, 0x2e, 0xeb, - 0x9d, 0x64, 0xbd, 0x0a, 0x05, 0x2f, 0x41, 0x85, 0x6b, 0x5a, 0x69, 0xb7, 0x95, 0xd3, 0x94, 0x27, - 0x4f, 0x0b, 0xca, 0xc3, 0x5f, 0x69, 0x70, 0x49, 0x19, 0x30, 0x96, 0x63, 0x1e, 0x43, 0x96, 0x63, - 0xae, 0xa2, 0x70, 0xcf, 0x06, 0x47, 0x71, 0x35, 0xba, 0xe0, 0x41, 0x4b, 0x90, 0xe3, 0x5f, 0xf2, - 0x48, 0x1b, 0xcf, 0x2e, 0x99, 0xf0, 0x5d, 0x98, 0x11, 0x24, 0xd2, 0xb3, 0xe2, 0xf6, 0x36, 0x73, - 0x28, 0xfe, 0x14, 0x66, 0x83, 0x6c, 0x63, 0x4d, 0x49, 0x31, 0x32, 0x75, 0x11, 0x23, 0x57, 0xa4, - 0x91, 0xfb, 0xfd, 0xb6, 0x72, 0x2c, 0x08, 0xaf, 0xba, 0xba, 0x22, 0xa9, 0xd0, 0x8a, 0x78, 0x13, - 0x90, 0x22, 0x7e, 0xd4, 0x09, 0xcc, 0xc8, 0xed, 0xb0, 0xd5, 0x71, 0xbc, 0xd3, 0xe7, 0x6b, 0x40, - 0x2a, 0xf1, 0xc7, 0x36, 0x68, 0x8d, 0xc8, 0xa2, 0x26, 0x0d, 0xfa, 0x00, 0x90, 0x4a, 0x1c, 0x2b, - 0xa3, 0xd7, 0xe1, 0xd2, 0x73, 0x6b, 0x48, 0x53, 0x03, 0xa5, 0xfa, 0x21, 0xc3, 0xef, 0xa2, 0xde, - 0xb2, 0x79, 0x6d, 0xaa, 0x5c, 0x1d, 0x30, 0x96, 0xf2, 0x5f, 0x6a, 0x50, 0x5c, 0xe9, 0x1a, 0x76, - 0x4f, 0x2a, 0x7e, 0x17, 0xb2, 0xfc, 0x86, 0x25, 0x40, 0x8d, 0x7b, 0x41, 0x31, 0x2a, 0x2f, 0x6f, - 0xac, 0xf0, 0xfb, 0x98, 0x18, 0x45, 0x0d, 0x17, 0xef, 0x1e, 0x6b, 0xa1, 0x77, 0x90, 0x35, 0xf4, - 0x06, 0x64, 0x0c, 0x3a, 0x84, 0xa5, 0xe0, 0x72, 0xf8, 0x6e, 0xcb, 0xa4, 0xb1, 0x73, 0x20, 0xe7, - 0xc2, 0xef, 0x40, 0x41, 0xd1, 0x40, 0x6f, 0xef, 0xcf, 0x1a, 0xe2, 0xd0, 0xb6, 0xb2, 0xda, 0xdc, - 0x78, 0xc1, 0x2f, 0xf5, 0x65, 0x80, 0xb5, 0x86, 0xd7, 0x4e, 0xe1, 0x8f, 0xc5, 0x28, 0x91, 0xef, - 0x54, 0x7b, 0xb4, 0x24, 0x7b, 0x52, 0x17, 0xb2, 0xe7, 0x14, 0x4a, 0x62, 0xfa, 0xe3, 0xa6, 0x6f, - 0x26, 0x2f, 0x21, 0x7d, 0x2b, 0xc6, 0xeb, 0x82, 0x11, 0x4f, 0x43, 0x49, 0x24, 0x74, 0xb1, 0xff, - 0x7e, 0xa1, 0x41, 0x59, 0x52, 0xc6, 0x05, 0x5f, 0x25, 0x6e, 0xc4, 0x2b, 0x80, 0x87, 0x1a, 0x5d, - 0x81, 0x6c, 0xfb, 0x70, 0xaf, 0xf3, 0x5a, 0x02, 0xe5, 0xa2, 0x45, 0xe9, 0x5d, 0xae, 0x87, 0xbf, - 0x56, 0x89, 0x16, 0xba, 0xce, 0x1f, 0xb2, 0x36, 0xcc, 0x36, 0x39, 0x65, 0x67, 0xca, 0x49, 0xdd, - 0x27, 0xb0, 0x0b, 0xb5, 0x78, 0xd5, 0x62, 0x07, 0x49, 0xf5, 0x95, 0x6b, 0x06, 0x2e, 0xad, 0x0c, - 0xdc, 0xe3, 0x86, 0x69, 0x1c, 0x76, 0x65, 0xc6, 0xa2, 0x65, 0x96, 0x12, 0xd7, 0x3a, 0x8e, 0x4a, - 0x6d, 0xc0, 0x0c, 0xa5, 0x12, 0xd3, 0xed, 0xb4, 0x94, 0xf4, 0x26, 0x8b, 0x98, 0x16, 0x2a, 0x62, - 0x86, 0xe3, 0xbc, 0xb2, 0xec, 0xb6, 0x98, 0x9a, 0xd7, 0xc6, 0x6b, 0x5c, 0xf8, 0xbe, 0x13, 0x28, - 0x53, 0xdf, 0x56, 0xca, 0xa2, 0x2f, 0xe5, 0x19, 0x71, 0x47, 0x48, 0xc1, 0x8f, 0xe0, 0xb2, 0xe4, - 0x14, 0xc0, 0xe4, 0x08, 0xe6, 0x1d, 0xb8, 0x21, 0x99, 0x57, 0x8f, 0xe9, 0x45, 0x6d, 0x57, 0x28, - 0xfc, 0xae, 0x76, 0x3e, 0x85, 0xaa, 0x67, 0x27, 0x3b, 0x2c, 0x5b, 0x5d, 0xd5, 0x80, 0x81, 0x23, - 0xf6, 0x4c, 0x5e, 0x67, 0xdf, 0x94, 0x66, 0x5b, 0x5d, 0xef, 0x48, 0x40, 0xbf, 0xf1, 0x2a, 0xcc, - 0x49, 0x19, 0xe2, 0x18, 0x1b, 0x14, 0x12, 0x31, 0x28, 0x4e, 0x88, 0x70, 0x18, 0x1d, 0x3a, 0xda, - 0xed, 0x2a, 0x67, 0xd0, 0xb5, 0x4c, 0xa6, 0xa6, 0xc8, 0xbc, 0xcc, 0x77, 0x04, 0x35, 0x4c, 0xad, - 0x18, 0x82, 0x4c, 0x05, 0xa8, 0x64, 0xb1, 0x10, 0x94, 0x1c, 0x59, 0x88, 0x88, 0xe8, 0x4f, 0x60, - 0xde, 0x33, 0x82, 0xfa, 0x6d, 0x97, 0xd8, 0xbd, 0x8e, 0xe3, 0x28, 0x50, 0x56, 0xdc, 0xc4, 0xef, - 0xc1, 0x64, 0x9f, 0x88, 0x9c, 0x52, 0x58, 0x46, 0x4b, 0xfc, 0xed, 0x79, 0x49, 0x19, 0xcc, 0xfa, - 0x71, 0x1b, 0x6e, 0x4a, 0xe9, 0xdc, 0xa3, 0xb1, 0xe2, 0xc3, 0x46, 0xc9, 0x0b, 0x3e, 0x77, 0x6b, - 0xf4, 0x82, 0x9f, 0xe6, 0x6b, 0xef, 0xc1, 0xab, 0x1f, 0x70, 0x47, 0xca, 0xd8, 0x1a, 0xab, 0x56, - 0x6c, 0x72, 0x9f, 0x7a, 0x21, 0x39, 0x96, 0xb0, 0x43, 0x98, 0x0d, 0x46, 0xf2, 0x58, 0x69, 0x6c, - 0x16, 0x32, 0xae, 0x75, 0x42, 0x64, 0x12, 0xe3, 0x0d, 0x69, 0xb0, 0x17, 0xe6, 0x63, 0x19, 0x6c, - 0xf8, 0xc2, 0xd8, 0x96, 0x1c, 0xd7, 0x5e, 0xba, 0x9a, 0xf2, 0xf0, 0xc5, 0x1b, 0x78, 0x1b, 0xae, - 0x84, 0xd3, 0xc4, 0x58, 0x26, 0xbf, 0xe0, 0x1b, 0x38, 0x2e, 0x93, 0x8c, 0x25, 0xf7, 0x43, 0x3f, - 0x19, 0x28, 0x09, 0x65, 0x2c, 0x91, 0x3a, 0xd4, 0xe2, 0xf2, 0xcb, 0xf7, 0xb1, 0x5f, 0xbd, 0x74, - 0x33, 0x96, 0x30, 0xc7, 0x17, 0x36, 0xfe, 0xf2, 0xfb, 0x39, 0x22, 0x3d, 0x32, 0x47, 0x88, 0x20, - 0xf1, 0xb3, 0xd8, 0x0f, 0xb0, 0xe9, 0x84, 0x0e, 0x3f, 0x81, 0x8e, 0xab, 0x83, 0xd6, 0x10, 0x4f, - 0x07, 0x6b, 0xc8, 0x8d, 0xad, 0xa6, 0xdd, 0xb1, 0x16, 0xe3, 0x23, 0x3f, 0x77, 0x46, 0x32, 0xf3, - 0x58, 0x82, 0x3f, 0x86, 0x85, 0xe4, 0xa4, 0x3c, 0x8e, 0xe4, 0x87, 0x75, 0xc8, 0x7b, 0x07, 0x4a, - 0xe5, 0x77, 0x1b, 0x05, 0xc8, 0x6d, 0xef, 0xec, 0xed, 0xae, 0xac, 0x36, 0xf8, 0x0f, 0x37, 0x56, - 0x77, 0x74, 0x7d, 0x7f, 0xb7, 0x59, 0x49, 0x2d, 0xff, 0x29, 0x0d, 0xa9, 0xcd, 0x17, 0xe8, 0x1f, - 0x21, 0xc3, 0x5f, 0x31, 0x47, 0x3c, 0x5d, 0xd7, 0x46, 0x3d, 0xd4, 0xe2, 0x6b, 0x9f, 0xfd, 0xe6, - 0xf7, 0x5f, 0xa6, 0x2e, 0xe3, 0x4a, 0x7d, 0xf8, 0xf6, 0x21, 0x71, 0x8d, 0xfa, 0xc9, 0xb0, 0xce, - 0xea, 0xc3, 0x13, 0xed, 0x21, 0xda, 0x87, 0xf4, 0xee, 0xc0, 0x45, 0x89, 0xcf, 0xda, 0xb5, 0xe4, - 0xf7, 0x5b, 0x3c, 0xc7, 0x04, 0xcf, 0xe0, 0xb2, 0x22, 0xb8, 0x3f, 0x70, 0xa9, 0xd8, 0x01, 0x14, - 0xd4, 0x17, 0xd8, 0x73, 0xdf, 0xbb, 0x6b, 0xe7, 0xbf, 0xee, 0xe2, 0x5b, 0x4c, 0xdd, 0x35, 0x7c, - 0x45, 0x51, 0xc7, 0xdf, 0x89, 0xd5, 0xd9, 0x34, 0x4f, 0x4d, 0x94, 0xf8, 0x22, 0x5e, 0x4b, 0x7e, - 0xf4, 0x8d, 0x9d, 0x8d, 0x7b, 0x6a, 0x52, 0xb1, 0xa6, 0x78, 0xf3, 0x6d, 0xb9, 0xe8, 0x66, 0xcc, - 0x9b, 0x9f, 0xfa, 0xba, 0x55, 0x5b, 0x48, 0x66, 0x10, 0x8a, 0x16, 0x98, 0xa2, 0x1a, 0xbe, 0xac, - 0x28, 0x6a, 0x79, 0x6c, 0x4f, 0xb4, 0x87, 0xcb, 0x47, 0x90, 0x61, 0xe8, 0x31, 0xfa, 0x27, 0xf9, - 0x51, 0x8b, 0x81, 0xd1, 0x13, 0x16, 0x3f, 0x80, 0x3b, 0xe3, 0x2a, 0x53, 0x86, 0x70, 0x49, 0x2a, - 0x63, 0xf8, 0xf1, 0x13, 0xed, 0xe1, 0xa2, 0xf6, 0xa6, 0xb6, 0xfc, 0xc7, 0x49, 0xc8, 0x30, 0xb8, - 0x08, 0x59, 0x00, 0x3e, 0x9a, 0x1a, 0x9e, 0x65, 0x04, 0x9f, 0x0d, 0xcf, 0x32, 0x0a, 0xc4, 0xe2, - 0x79, 0xa6, 0xb8, 0x8a, 0x67, 0xa4, 0x62, 0x86, 0x44, 0xd5, 0x19, 0xb8, 0x46, 0x7d, 0x3a, 0x14, - 0x80, 0x19, 0x0f, 0x33, 0x14, 0x27, 0x30, 0x80, 0xaa, 0x86, 0x77, 0x48, 0x0c, 0xa2, 0x8a, 0x31, - 0xd3, 0x79, 0x1d, 0x5f, 0x55, 0x3c, 0xcb, 0xd5, 0xda, 0x8c, 0x91, 0xea, 0xfd, 0x0f, 0x0d, 0xca, - 0x41, 0x5c, 0x14, 0xdd, 0x8e, 0x91, 0x1c, 0x86, 0x57, 0x6b, 0x77, 0x46, 0x33, 0x25, 0x59, 0xc0, - 0xd5, 0x9f, 0x10, 0xd2, 0x37, 0x28, 0xa3, 0x70, 0x3c, 0xfa, 0x42, 0x83, 0xe9, 0x10, 0xd8, 0x89, - 0xe2, 0x34, 0x44, 0xa0, 0xd4, 0xda, 0xdd, 0x73, 0xb8, 0x84, 0x21, 0xf7, 0x98, 0x21, 0x0b, 0xf8, - 0x5a, 0xc4, 0x15, 0x6e, 0xa7, 0x47, 0x5c, 0x4b, 0x18, 0xe3, 0x2d, 0x03, 0x07, 0x26, 0x63, 0x97, - 0x21, 0x00, 0x74, 0xc6, 0x2e, 0x43, 0x10, 0xd5, 0x1c, 0xb1, 0x0c, 0x1c, 0x8d, 0xa4, 0x5b, 0xfc, - 0xcf, 0x69, 0xc8, 0xad, 0xf2, 0x5f, 0x4f, 0x22, 0x07, 0xf2, 0x1e, 0x02, 0x88, 0xe6, 0xe3, 0xd0, - 0x18, 0xff, 0xb6, 0x50, 0xbb, 0x99, 0xd8, 0x2f, 0xb4, 0xdf, 0x65, 0xda, 0x6f, 0xe2, 0x9a, 0xd4, - 0x2e, 0x7e, 0xa4, 0x59, 0xe7, 0xd7, 0xfe, 0xba, 0xd1, 0x6e, 0xd3, 0x89, 0xff, 0x3b, 0x14, 0x55, - 0x98, 0x0e, 0xdd, 0x8a, 0x45, 0x81, 0x54, 0xa4, 0xaf, 0x86, 0x47, 0xb1, 0x08, 0xed, 0x8b, 0x4c, - 0x3b, 0xc6, 0x37, 0x12, 0xb4, 0xdb, 0x8c, 0x3d, 0x60, 0x00, 0x87, 0xd9, 0xe2, 0x0d, 0x08, 0xa0, - 0x78, 0xf1, 0x06, 0x04, 0x51, 0xba, 0x73, 0x0d, 0x18, 0x30, 0x76, 0x6a, 0xc0, 0x2b, 0x00, 0x1f, - 0x54, 0x43, 0xb1, 0x7e, 0x55, 0xae, 0x4e, 0xe1, 0x90, 0x8f, 0xe2, 0x71, 0xd1, 0x3d, 0x17, 0x52, - 0xdd, 0xed, 0x38, 0x34, 0xf4, 0x97, 0xbf, 0xca, 0x42, 0xe1, 0xb9, 0xd1, 0x31, 0x5d, 0x62, 0x1a, - 0x66, 0x8b, 0xa0, 0x97, 0x90, 0x61, 0xa5, 0x31, 0x9c, 0xe5, 0x54, 0xac, 0x29, 0x9c, 0xe5, 0x02, - 0x40, 0x0c, 0xbe, 0xc3, 0x34, 0xcf, 0xe3, 0x39, 0xa9, 0xb9, 0xe7, 0x8b, 0xaf, 0x33, 0x0c, 0x85, - 0x4e, 0xf8, 0x9f, 0x21, 0x2b, 0xe0, 0xf9, 0x90, 0xb0, 0x00, 0xb6, 0x52, 0xbb, 0x1e, 0xdf, 0x99, - 0xb4, 0xbd, 0x54, 0x55, 0x0e, 0xe3, 0xa5, 0xba, 0x5e, 0x03, 0xf8, 0x00, 0x61, 0xd8, 0xb9, 0x11, - 0x3c, 0xb1, 0xb6, 0x90, 0xcc, 0x20, 0xf4, 0x3e, 0x60, 0x7a, 0x6f, 0xe3, 0xf9, 0x38, 0xbd, 0x6d, - 0x8f, 0x9f, 0xea, 0x3e, 0x84, 0xc9, 0x75, 0xc3, 0x39, 0x46, 0xa1, 0x62, 0xa7, 0xfc, 0xe0, 0xa1, - 0x56, 0x8b, 0xeb, 0x12, 0x9a, 0x6e, 0x33, 0x4d, 0x37, 0x70, 0x35, 0x4e, 0xd3, 0xb1, 0xe1, 0xd0, - 0xea, 0x81, 0x8e, 0x21, 0xcb, 0x7f, 0x03, 0x11, 0xf6, 0x65, 0xe0, 0x77, 0x14, 0x61, 0x5f, 0x06, - 0x7f, 0x36, 0x71, 0x31, 0x4d, 0x2e, 0x4c, 0xc9, 0x1f, 0x1e, 0xa0, 0x1b, 0xa1, 0xa5, 0x09, 0xfe, - 0x48, 0xa1, 0x36, 0x9f, 0xd4, 0x2d, 0xf4, 0xdd, 0x67, 0xfa, 0x6e, 0xe1, 0xeb, 0xb1, 0x6b, 0x27, - 0xb8, 0x9f, 0x68, 0x0f, 0xdf, 0xd4, 0x68, 0x99, 0x00, 0x1f, 0x64, 0x8d, 0x44, 0x47, 0x18, 0xaf, - 0x8d, 0x44, 0x47, 0x04, 0x9f, 0xc5, 0xcb, 0x4c, 0xf9, 0x63, 0x7c, 0x3f, 0x4e, 0xb9, 0x6b, 0x1b, - 0xa6, 0xf3, 0x92, 0xd8, 0x6f, 0x70, 0x30, 0xcd, 0x39, 0xee, 0xf4, 0x69, 0xa4, 0xfc, 0x65, 0x1a, - 0x26, 0xe9, 0x79, 0x94, 0x96, 0x67, 0xff, 0x1a, 0x1f, 0xb6, 0x26, 0x02, 0x9e, 0x85, 0xad, 0x89, - 0x22, 0x00, 0xd1, 0xf2, 0xcc, 0x7e, 0x27, 0x4f, 0x18, 0x13, 0xf5, 0xba, 0x03, 0x05, 0xe5, 0xae, - 0x8f, 0x62, 0x04, 0x06, 0x91, 0xb9, 0x70, 0x5d, 0x88, 0x01, 0x0a, 0xf0, 0x4d, 0xa6, 0x73, 0x0e, - 0xcf, 0x06, 0x74, 0xb6, 0x39, 0x17, 0x55, 0xfa, 0xaf, 0x50, 0x54, 0x31, 0x01, 0x14, 0x23, 0x33, - 0x84, 0xfc, 0x85, 0x53, 0x62, 0x1c, 0xa4, 0x10, 0xcd, 0x0e, 0xde, 0xff, 0x09, 0x90, 0xac, 0x54, - 0x79, 0x1f, 0x72, 0x02, 0x28, 0x88, 0x9b, 0x6d, 0x10, 0x2a, 0x8c, 0x9b, 0x6d, 0x08, 0x65, 0x88, - 0x1e, 0xf3, 0x98, 0x56, 0x7a, 0x1f, 0x92, 0x25, 0x48, 0x68, 0x7c, 0x46, 0xdc, 0x24, 0x8d, 0x3e, - 0xf6, 0x95, 0xa4, 0x51, 0xb9, 0x8b, 0x8e, 0xd2, 0x78, 0x44, 0x5c, 0x11, 0x4b, 0xf2, 0x9e, 0x87, - 0x12, 0x04, 0xaa, 0x29, 0x1f, 0x8f, 0x62, 0x49, 0x3a, 0x95, 0xfb, 0x4a, 0x45, 0xbe, 0x47, 0x9f, - 0x02, 0xf8, 0x90, 0x46, 0xf8, 0xb4, 0x15, 0x8b, 0x8b, 0x86, 0x4f, 0x5b, 0xf1, 0xa8, 0x48, 0x34, - 0x7f, 0xf8, 0xba, 0xf9, 0xc5, 0x80, 0x6a, 0xff, 0x1f, 0x0d, 0x50, 0x14, 0x01, 0x41, 0x8f, 0xe2, - 0x35, 0xc4, 0x22, 0xae, 0xb5, 0xc7, 0x17, 0x63, 0x4e, 0x2a, 0x11, 0xbe, 0x59, 0x2d, 0x36, 0xa2, - 0xff, 0x8a, 0x1a, 0xf6, 0xb9, 0x06, 0xa5, 0x00, 0x84, 0x82, 0xee, 0x25, 0xac, 0x71, 0x08, 0xb4, - 0xad, 0xdd, 0x3f, 0x97, 0x2f, 0xe9, 0x24, 0xa6, 0xec, 0x08, 0x79, 0x10, 0xff, 0x2f, 0x0d, 0xca, - 0x41, 0xd8, 0x05, 0x25, 0xc8, 0x8f, 0x00, 0xbf, 0xb5, 0xc5, 0xf3, 0x19, 0xcf, 0x5f, 0x2a, 0xff, - 0x6c, 0xde, 0x87, 0x9c, 0x00, 0x6b, 0xe2, 0x02, 0x22, 0x08, 0x1b, 0xc7, 0x05, 0x44, 0x08, 0xe9, - 0x49, 0x08, 0x08, 0xdb, 0xea, 0x12, 0x25, 0x04, 0x05, 0xa2, 0x93, 0xa4, 0x71, 0x74, 0x08, 0x86, - 0xe0, 0xa0, 0x51, 0x1a, 0xfd, 0x10, 0x94, 0x70, 0x0e, 0x4a, 0x10, 0x78, 0x4e, 0x08, 0x86, 0xd1, - 0xa0, 0x84, 0x10, 0x64, 0x4a, 0x95, 0x10, 0xf4, 0xc1, 0x97, 0xb8, 0x10, 0x8c, 0x20, 0xe2, 0x71, - 0x21, 0x18, 0xc5, 0x6f, 0x12, 0xd6, 0x95, 0xe9, 0x0e, 0x84, 0xe0, 0x4c, 0x0c, 0x56, 0x83, 0x1e, - 0x27, 0x38, 0x34, 0x16, 0x6c, 0xaf, 0xbd, 0x71, 0x41, 0xee, 0x91, 0x7b, 0x9f, 0x2f, 0x85, 0xdc, - 0xfb, 0xff, 0xaf, 0xc1, 0x6c, 0x1c, 0xd6, 0x83, 0x12, 0x74, 0x25, 0x00, 0xf5, 0xb5, 0xa5, 0x8b, - 0xb2, 0x9f, 0xef, 0x35, 0x2f, 0x1a, 0x9e, 0x56, 0x7e, 0xfe, 0xcd, 0xbc, 0xf6, 0xab, 0x6f, 0xe6, - 0xb5, 0xdf, 0x7e, 0x33, 0xaf, 0xfd, 0xef, 0xef, 0xe6, 0x27, 0x0e, 0xb3, 0xec, 0x7f, 0xa7, 0xbd, - 0xfd, 0xd7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x77, 0x6b, 0x63, 0x24, 0x37, 0x00, 0x00, + proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value) + proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value) + proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value) + proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value) + proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value) } // Reference imports to suppress errors if they are not otherwise used. @@ -6183,9 +3431,8 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// KVClient is the client API for KV service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for KV service + type KVClient interface { // Range gets the keys in the range from the key-value store. Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) @@ -6218,7 +3465,7 @@ func NewKVClient(cc *grpc.ClientConn) KVClient { func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) { out := new(RangeResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6227,7 +3474,7 @@ func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.Cal func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { out := new(PutResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6236,7 +3483,7 @@ func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOpt func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) { out := new(DeleteRangeResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6245,7 +3492,7 @@ func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) { out := new(TxnResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6254,14 +3501,15 @@ func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOpt func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) { out := new(CompactionResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// KVServer is the server API for KV service. +// Server API for KV service + type KVServer interface { // Range gets the keys in the range from the key-value store. Range(context.Context, *RangeRequest) (*RangeResponse, error) @@ -6284,26 +3532,6 @@ type KVServer interface { Compact(context.Context, *CompactionRequest) (*CompactionResponse, error) } -// UnimplementedKVServer can be embedded to have forward compatible implementations. -type UnimplementedKVServer struct { -} - -func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Range not implemented") -} -func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") -} -func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented") -} -func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented") -} -func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented") -} - func RegisterKVServer(s *grpc.Server, srv KVServer) { s.RegisterService(&_KV_serviceDesc, srv) } @@ -6427,9 +3655,8 @@ var _KV_serviceDesc = grpc.ServiceDesc{ Metadata: "rpc.proto", } -// WatchClient is the client API for Watch service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Watch service + type WatchClient interface { // Watch watches for events happening or that have happened. Both input and output // are streams; the input stream is for creating and canceling watchers and the output @@ -6448,7 +3675,7 @@ func NewWatchClient(cc *grpc.ClientConn) WatchClient { } func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...) + stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...) if err != nil { return nil, err } @@ -6478,7 +3705,8 @@ func (x *watchWatchClient) Recv() (*WatchResponse, error) { return m, nil } -// WatchServer is the server API for Watch service. +// Server API for Watch service + type WatchServer interface { // Watch watches for events happening or that have happened. Both input and output // are streams; the input stream is for creating and canceling watchers and the output @@ -6488,14 +3716,6 @@ type WatchServer interface { Watch(Watch_WatchServer) error } -// UnimplementedWatchServer can be embedded to have forward compatible implementations. -type UnimplementedWatchServer struct { -} - -func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") -} - func RegisterWatchServer(s *grpc.Server, srv WatchServer) { s.RegisterService(&_Watch_serviceDesc, srv) } @@ -6541,9 +3761,8 @@ var _Watch_serviceDesc = grpc.ServiceDesc{ Metadata: "rpc.proto", } -// LeaseClient is the client API for Lease service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Lease service + type LeaseClient interface { // LeaseGrant creates a lease which expires if the server does not receive a keepAlive // within a given time to live period. All keys attached to the lease will be expired and @@ -6570,7 +3789,7 @@ func NewLeaseClient(cc *grpc.ClientConn) LeaseClient { func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) { out := new(LeaseGrantResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6579,7 +3798,7 @@ func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opt func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) { out := new(LeaseRevokeResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6587,7 +3806,7 @@ func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, o } func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...) + stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...) if err != nil { return nil, err } @@ -6619,7 +3838,7 @@ func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) { func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) { out := new(LeaseTimeToLiveResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6628,14 +3847,15 @@ func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRe func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) { out := new(LeaseLeasesResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// LeaseServer is the server API for Lease service. +// Server API for Lease service + type LeaseServer interface { // LeaseGrant creates a lease which expires if the server does not receive a keepAlive // within a given time to live period. All keys attached to the lease will be expired and @@ -6652,26 +3872,6 @@ type LeaseServer interface { LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error) } -// UnimplementedLeaseServer can be embedded to have forward compatible implementations. -type UnimplementedLeaseServer struct { -} - -func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented") -} -func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented") -} -func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error { - return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented") -} -func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented") -} -func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented") -} - func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) { s.RegisterService(&_Lease_serviceDesc, srv) } @@ -6806,9 +4006,8 @@ var _Lease_serviceDesc = grpc.ServiceDesc{ Metadata: "rpc.proto", } -// ClusterClient is the client API for Cluster service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Cluster service + type ClusterClient interface { // MemberAdd adds a member into the cluster. MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) @@ -6818,6 +4017,8 @@ type ClusterClient interface { MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) // MemberList lists all the members in the cluster. MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) } type clusterClient struct { @@ -6830,7 +4031,7 @@ func NewClusterClient(cc *grpc.ClientConn) ClusterClient { func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) { out := new(MemberAddResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6839,7 +4040,7 @@ func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opt func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) { out := new(MemberRemoveResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6848,7 +4049,7 @@ func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveReques func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) { out := new(MemberUpdateResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -6857,14 +4058,24 @@ func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateReques func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) { out := new(MemberListResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterClient) MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) { + out := new(MemberPromoteResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberPromote", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// ClusterServer is the server API for Cluster service. +// Server API for Cluster service + type ClusterServer interface { // MemberAdd adds a member into the cluster. MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error) @@ -6874,23 +4085,8 @@ type ClusterServer interface { MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error) // MemberList lists all the members in the cluster. MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error) -} - -// UnimplementedClusterServer can be embedded to have forward compatible implementations. -type UnimplementedClusterServer struct { -} - -func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented") -} -func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented") -} -func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented") -} -func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented") + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(context.Context, *MemberPromoteRequest) (*MemberPromoteResponse, error) } func RegisterClusterServer(s *grpc.Server, srv ClusterServer) { @@ -6969,6 +4165,24 @@ func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _Cluster_MemberPromote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MemberPromoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterServer).MemberPromote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Cluster/MemberPromote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterServer).MemberPromote(ctx, req.(*MemberPromoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Cluster_serviceDesc = grpc.ServiceDesc{ ServiceName: "etcdserverpb.Cluster", HandlerType: (*ClusterServer)(nil), @@ -6989,14 +4203,17 @@ var _Cluster_serviceDesc = grpc.ServiceDesc{ MethodName: "MemberList", Handler: _Cluster_MemberList_Handler, }, + { + MethodName: "MemberPromote", + Handler: _Cluster_MemberPromote_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "rpc.proto", } -// MaintenanceClient is the client API for Maintenance service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Maintenance service + type MaintenanceClient interface { // Alarm activates, deactivates, and queries alarms regarding cluster health. Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) @@ -7004,11 +4221,15 @@ type MaintenanceClient interface { Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) // Defragment defragments a member's backend database to recover storage space. Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) @@ -7026,7 +4247,7 @@ func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient { func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) { out := new(AlarmResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7035,7 +4256,7 @@ func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts .. func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7044,7 +4265,7 @@ func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) { out := new(DefragmentResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7053,7 +4274,7 @@ func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentReques func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) { out := new(HashResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7062,7 +4283,7 @@ func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...g func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) { out := new(HashKVResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7070,7 +4291,7 @@ func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts } func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) { - stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...) + stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...) if err != nil { return nil, err } @@ -7103,14 +4324,15 @@ func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { out := new(MoveLeaderResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// MaintenanceServer is the server API for Maintenance service. +// Server API for Maintenance service + type MaintenanceServer interface { // Alarm activates, deactivates, and queries alarms regarding cluster health. Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error) @@ -7118,11 +4340,15 @@ type MaintenanceServer interface { Status(context.Context, *StatusRequest) (*StatusResponse, error) // Defragment defragments a member's backend database to recover storage space. Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error) - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. Hash(context.Context, *HashRequest) (*HashResponse, error) // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error) // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error @@ -7130,32 +4356,6 @@ type MaintenanceServer interface { MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) } -// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations. -type UnimplementedMaintenanceServer struct { -} - -func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented") -} -func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented") -} -func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented") -} -func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented") -} -func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error { - return status.Errorf(codes.Unimplemented, "method Snapshot not implemented") -} -func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented") -} - func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { s.RegisterService(&_Maintenance_serviceDesc, srv) } @@ -7328,9 +4528,8 @@ var _Maintenance_serviceDesc = grpc.ServiceDesc{ Metadata: "rpc.proto", } -// AuthClient is the client API for Auth service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// Client API for Auth service + type AuthClient interface { // AuthEnable enables authentication. AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) @@ -7338,7 +4537,7 @@ type AuthClient interface { AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) // Authenticate processes an authenticate request. Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) - // UserAdd adds a new user. + // UserAdd adds a new user. User name cannot be empty. UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) // UserGet gets detailed user information. UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) @@ -7352,7 +4551,7 @@ type AuthClient interface { UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) // UserRevokeRole revokes a role of specified user. UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. + // RoleAdd adds a new role. Role name cannot be empty. RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) // RoleGet gets detailed role information. RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) @@ -7376,7 +4575,7 @@ func NewAuthClient(cc *grpc.ClientConn) AuthClient { func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) { out := new(AuthEnableResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7385,7 +4584,7 @@ func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) { out := new(AuthDisableResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7394,7 +4593,7 @@ func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, op func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) { out := new(AuthenticateResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7403,7 +4602,7 @@ func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) { out := new(AuthUserAddResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7412,7 +4611,7 @@ func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts . func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) { out := new(AuthUserGetResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7421,7 +4620,7 @@ func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts . func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) { out := new(AuthUserListResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7430,7 +4629,7 @@ func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) { out := new(AuthUserDeleteResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7439,7 +4638,7 @@ func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) { out := new(AuthUserChangePasswordResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7448,7 +4647,7 @@ func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangeP func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) { out := new(AuthUserGrantRoleResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7457,7 +4656,7 @@ func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleReq func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) { out := new(AuthUserRevokeRoleResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7466,7 +4665,7 @@ func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleR func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) { out := new(AuthRoleAddResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7475,7 +4674,7 @@ func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts . func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) { out := new(AuthRoleGetResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7484,7 +4683,7 @@ func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts . func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) { out := new(AuthRoleListResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7493,7 +4692,7 @@ func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) { out := new(AuthRoleDeleteResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7502,7 +4701,7 @@ func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) { out := new(AuthRoleGrantPermissionResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...) if err != nil { return nil, err } @@ -7511,14 +4710,15 @@ func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantP func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) { out := new(AuthRoleRevokePermissionResponse) - err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...) + err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } -// AuthServer is the server API for Auth service. +// Server API for Auth service + type AuthServer interface { // AuthEnable enables authentication. AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error) @@ -7526,7 +4726,7 @@ type AuthServer interface { AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error) // Authenticate processes an authenticate request. Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) - // UserAdd adds a new user. + // UserAdd adds a new user. User name cannot be empty. UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error) // UserGet gets detailed user information. UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error) @@ -7540,7 +4740,7 @@ type AuthServer interface { UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) // UserRevokeRole revokes a role of specified user. UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) - // RoleAdd adds a new role. + // RoleAdd adds a new role. Role name cannot be empty. RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error) // RoleGet gets detailed role information. RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error) @@ -7554,59 +4754,6 @@ type AuthServer interface { RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) } -// UnimplementedAuthServer can be embedded to have forward compatible implementations. -type UnimplementedAuthServer struct { -} - -func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented") -} -func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented") -} -func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented") -} -func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented") -} -func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented") -} -func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented") -} -func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented") -} -func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented") -} -func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented") -} -func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented") -} -func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented") -} -func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented") -} -func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented") -} -func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented") -} -func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented") -} -func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented") -} - func RegisterAuthServer(s *grpc.Server, srv AuthServer) { s.RegisterService(&_Auth_serviceDesc, srv) } @@ -7975,7 +5122,7 @@ var _Auth_serviceDesc = grpc.ServiceDesc{ func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -7983,46 +5130,37 @@ func (m *ResponseHeader) Marshal() (dAtA []byte, err error) { } func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.ClusterId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) } - if m.RaftTerm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - i-- - dAtA[i] = 0x20 + if m.MemberId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) } if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) } - if m.MemberId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MemberId)) - i-- - dAtA[i] = 0x10 - } - if m.ClusterId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId)) - i-- - dAtA[i] = 0x8 + if m.RaftTerm != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) } - return len(dAtA) - i, nil + return i, nil } func (m *RangeRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8030,110 +5168,99 @@ func (m *RangeRequest) Marshal() (dAtA []byte, err error) { } func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.MaxCreateRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) - i-- - dAtA[i] = 0x68 + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } - if m.MinCreateRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) - i-- - dAtA[i] = 0x60 + if m.Limit != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) } - if m.MaxModRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) - i-- - dAtA[i] = 0x58 + if m.Revision != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) } - if m.MinModRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) - i-- - dAtA[i] = 0x50 + if m.SortOrder != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) } - if m.CountOnly { - i-- - if m.CountOnly { + if m.SortTarget != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) + } + if m.Serializable { + dAtA[i] = 0x38 + i++ + if m.Serializable { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x48 + i++ } if m.KeysOnly { - i-- + dAtA[i] = 0x40 + i++ if m.KeysOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x40 + i++ } - if m.Serializable { - i-- - if m.Serializable { + if m.CountOnly { + dAtA[i] = 0x48 + i++ + if m.CountOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x38 - } - if m.SortTarget != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget)) - i-- - dAtA[i] = 0x30 - } - if m.SortOrder != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder)) - i-- - dAtA[i] = 0x28 + i++ } - if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x20 + if m.MinModRevision != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision)) } - if m.Limit != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x18 + if m.MaxModRevision != 0 { + dAtA[i] = 0x58 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision)) } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x12 + if m.MinCreateRevision != 0 { + dAtA[i] = 0x60 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision)) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + if m.MaxCreateRevision != 0 { + dAtA[i] = 0x68 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision)) } - return len(dAtA) - i, nil + return i, nil } func (m *RangeResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8141,67 +5268,54 @@ func (m *RangeResponse) Marshal() (dAtA []byte, err error) { } func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } - if m.Count != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x20 + if len(m.Kvs) > 0 { + for _, msg := range m.Kvs { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } if m.More { - i-- + dAtA[i] = 0x18 + i++ if m.More { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - } - if len(m.Kvs) > 0 { - for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + i++ } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if m.Count != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Count)) } - return len(dAtA) - i, nil + return i, nil } func (m *PutRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8209,75 +5323,64 @@ func (m *PutRequest) Marshal() (dAtA []byte, err error) { } func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.IgnoreLease { - i-- - if m.IgnoreLease { + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.Lease != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + } + if m.PrevKv { + dAtA[i] = 0x20 + i++ + if m.PrevKv { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x30 + i++ } if m.IgnoreValue { - i-- + dAtA[i] = 0x28 + i++ if m.IgnoreValue { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x28 + i++ } - if m.PrevKv { - i-- - if m.PrevKv { + if m.IgnoreLease { + dAtA[i] = 0x30 + i++ + if m.IgnoreLease { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 - } - if m.Lease != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x18 - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *PutResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8285,50 +5388,37 @@ func (m *PutResponse) Marshal() (dAtA []byte, err error) { } func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } if m.PrevKv != nil { - { - size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size())) + n3, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n3 } - return len(dAtA) - i, nil + return i, nil } func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8336,50 +5426,39 @@ func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) { } func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } if m.PrevKv { - i-- + dAtA[i] = 0x18 + i++ if m.PrevKv { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8387,57 +5466,44 @@ func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) { } func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.PrevKvs) > 0 { - for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n4, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n4 } if m.Deleted != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Deleted)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if len(m.PrevKvs) > 0 { + for _, msg := range m.PrevKvs { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *RequestOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8445,115 +5511,80 @@ func (m *RequestOp) Marshal() (dAtA []byte, err error) { } func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Request != nil { - { - size := m.Request.Size() - i -= size - if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } + nn5, err := m.Request.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += nn5 } - return len(dAtA) - i, nil + return i, nil } func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.RequestRange != nil { - { - size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size())) + n6, err := m.RequestRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 } - return len(dAtA) - i, nil + return i, nil } func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.RequestPut != nil { - { - size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size())) + n7, err := m.RequestPut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 } - return len(dAtA) - i, nil + return i, nil } func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.RequestDeleteRange != nil { - { - size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size())) + n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 } - return len(dAtA) - i, nil + return i, nil } func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.RequestTxn != nil { - { - size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size())) + n9, err := m.RequestTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8561,115 +5592,80 @@ func (m *ResponseOp) Marshal() (dAtA []byte, err error) { } func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Response != nil { - { - size := m.Response.Size() - i -= size - if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } + nn10, err := m.Response.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += nn10 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.ResponseRange != nil { - { - size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) + n11, err := m.ResponseRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.ResponsePut != nil { - { - size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) + n12, err := m.ResponsePut.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.ResponseDeleteRange != nil { - { - size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) + n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 } - return len(dAtA) - i, nil + return i, nil } func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.ResponseTxn != nil { - { - size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size())) + n14, err := m.ResponseTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 } - return len(dAtA) - i, nil + return i, nil } func (m *Compare) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8677,120 +5673,86 @@ func (m *Compare) Marshal() (dAtA []byte, err error) { } func (m *Compare) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x4 - i-- - dAtA[i] = 0x82 + if m.Result != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Result)) } - if m.TargetUnion != nil { - { - size := m.TargetUnion.Size() - i -= size - if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } + if m.Target != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Target)) } if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.Target != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Target)) - i-- - dAtA[i] = 0x10 + if m.TargetUnion != nil { + nn15, err := m.TargetUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn15 } - if m.Result != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Result)) - i-- - dAtA[i] = 0x8 + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } - return len(dAtA) - i, nil + return i, nil } func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.Version)) - i-- + i := 0 dAtA[i] = 0x20 - return len(dAtA) - i, nil + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Version)) + return i, nil } func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) - i-- + i := 0 dAtA[i] = 0x28 - return len(dAtA) - i, nil + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision)) + return i, nil } func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) - i-- + i := 0 dAtA[i] = 0x30 - return len(dAtA) - i, nil + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision)) + return i, nil } func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.Value != nil { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) - i-- dAtA[i] = 0x3a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - return len(dAtA) - i, nil + return i, nil } func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) - i-- + i := 0 dAtA[i] = 0x40 - return len(dAtA) - i, nil + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + return i, nil } func (m *TxnRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8798,68 +5760,53 @@ func (m *TxnRequest) Marshal() (dAtA []byte, err error) { } func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Failure) > 0 { - for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + if len(m.Compare) > 0 { + for _, msg := range m.Compare { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n } } if len(m.Success) > 0 { - for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Success { dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.Compare) > 0 { - for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + if len(m.Failure) > 0 { + for _, msg := range m.Failure { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *TxnResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8867,62 +5814,49 @@ func (m *TxnResponse) Marshal() (dAtA []byte, err error) { } func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Responses) > 0 { - for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n16, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n16 } if m.Succeeded { - i-- + dAtA[i] = 0x10 + i++ if m.Succeeded { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 + i++ } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Responses) > 0 { + for _, msg := range m.Responses { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8930,41 +5864,32 @@ func (m *CompactionRequest) Marshal() (dAtA []byte, err error) { } func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Revision != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) } if m.Physical { - i-- + dAtA[i] = 0x10 + i++ if m.Physical { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 - } - if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -8972,38 +5897,27 @@ func (m *CompactionResponse) Marshal() (dAtA []byte, err error) { } func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n17, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 } - return len(dAtA) - i, nil + return i, nil } func (m *HashRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9011,26 +5925,17 @@ func (m *HashRequest) Marshal() (dAtA []byte, err error) { } func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9038,31 +5943,22 @@ func (m *HashKVRequest) Marshal() (dAtA []byte, err error) { } func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Revision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Revision)) } - return len(dAtA) - i, nil + return i, nil } func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9070,48 +5966,37 @@ func (m *HashKVResponse) Marshal() (dAtA []byte, err error) { } func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.CompactRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - i-- - dAtA[i] = 0x18 + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n18, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 } if m.Hash != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if m.CompactRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) } - return len(dAtA) - i, nil + return i, nil } func (m *HashResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9119,43 +6004,32 @@ func (m *HashResponse) Marshal() (dAtA []byte, err error) { } func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n19, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 } if m.Hash != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Hash)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9163,26 +6037,17 @@ func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) { } func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9190,50 +6055,38 @@ func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) { } func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Blob) > 0 { - i -= len(m.Blob) - copy(dAtA[i:], m.Blob) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) - i-- - dAtA[i] = 0x1a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n20, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 } if m.RemainingBytes != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if len(m.Blob) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob))) + i += copy(dAtA[i:], m.Blob) } - return len(dAtA) - i, nil + return i, nil } func (m *WatchRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9241,95 +6094,66 @@ func (m *WatchRequest) Marshal() (dAtA []byte, err error) { } func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.RequestUnion != nil { - { - size := m.RequestUnion.Size() - i -= size - if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } + nn21, err := m.RequestUnion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += nn21 } - return len(dAtA) - i, nil + return i, nil } func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.CreateRequest != nil { - { - size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) + n22, err := m.CreateRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 } - return len(dAtA) - i, nil + return i, nil } func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.CancelRequest != nil { - { - size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) + n23, err := m.CancelRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 } - return len(dAtA) - i, nil + return i, nil } func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + i := 0 if m.ProgressRequest != nil { - { - size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ProgressRequest.Size())) + n24, err := m.ProgressRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 } - return len(dAtA) - i, nil + return i, nil } func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9337,98 +6161,86 @@ func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) { } func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.Fragment { - i-- - if m.Fragment { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } - if m.WatchId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - i-- - dAtA[i] = 0x38 + if m.StartRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) } - if m.PrevKv { - i-- - if m.PrevKv { + if m.ProgressNotify { + dAtA[i] = 0x20 + i++ + if m.ProgressNotify { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x30 + i++ } if len(m.Filters) > 0 { - dAtA22 := make([]byte, len(m.Filters)*10) - var j21 int + dAtA26 := make([]byte, len(m.Filters)*10) + var j25 int for _, num := range m.Filters { for num >= 1<<7 { - dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80) + dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j21++ + j25++ } - dAtA22[j21] = uint8(num) - j21++ + dAtA26[j25] = uint8(num) + j25++ } - i -= j21 - copy(dAtA[i:], dAtA22[:j21]) - i = encodeVarintRpc(dAtA, i, uint64(j21)) - i-- dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(j25)) + i += copy(dAtA[i:], dAtA26[:j25]) } - if m.ProgressNotify { - i-- - if m.ProgressNotify { + if m.PrevKv { + dAtA[i] = 0x30 + i++ + if m.PrevKv { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 - } - if m.StartRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision)) - i-- - dAtA[i] = 0x18 + i++ } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x12 + if m.WatchId != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + if m.Fragment { + dAtA[i] = 0x40 + i++ + if m.Fragment { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9436,31 +6248,22 @@ func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) { } func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.WatchId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) } - return len(dAtA) - i, nil + return i, nil } func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9468,26 +6271,17 @@ func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) { } func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *WatchResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9495,99 +6289,85 @@ func (m *WatchResponse) Marshal() (dAtA []byte, err error) { } func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n27, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n27 } - if m.Fragment { - i-- - if m.Fragment { + if m.WatchId != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) + } + if m.Created { + dAtA[i] = 0x18 + i++ + if m.Created { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x38 - } - if len(m.CancelReason) > 0 { - i -= len(m.CancelReason) - copy(dAtA[i:], m.CancelReason) - i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) - i-- - dAtA[i] = 0x32 - } - if m.CompactRevision != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) - i-- - dAtA[i] = 0x28 + i++ } if m.Canceled { - i-- + dAtA[i] = 0x20 + i++ if m.Canceled { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 + i++ } - if m.Created { - i-- - if m.Created { + if m.CompactRevision != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) + } + if len(m.CancelReason) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i += copy(dAtA[i:], m.CancelReason) + } + if m.Fragment { + dAtA[i] = 0x38 + i++ + if m.Fragment { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 + i++ } - if m.WatchId != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.WatchId)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0x5a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9595,36 +6375,27 @@ func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) { } func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.TTL != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) } if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } - if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9632,55 +6403,43 @@ func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) { } func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n28, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 } - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x22 + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if len(m.Error) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9688,31 +6447,22 @@ func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) { } func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9720,161 +6470,207 @@ func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) { } func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n29, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 } - return len(dAtA) - i, nil + return i, nil } -func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { +func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LeaseCheckpoint) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.Remaining_TTL != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Remaining_TTL)) } - return len(dAtA) - i, nil + return i, nil } -func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { +func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LeaseCheckpointRequest) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- - dAtA[i] = 0x18 - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x10 - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Checkpoints) > 0 { + for _, msg := range m.Checkpoints { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } -func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { +func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LeaseCheckpointResponse) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n30, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n31, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + if m.TTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) + } + return i, nil +} + +func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } if m.Keys { - i-- + dAtA[i] = 0x10 + i++ if m.Keys { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 - } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9882,62 +6678,50 @@ func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) { } func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Keys) > 0 { - for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Keys[iNdEx]) - copy(dAtA[i:], m.Keys[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx]))) - i-- - dAtA[i] = 0x2a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n32, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n32 } - if m.GrantedTTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) - i-- - dAtA[i] = 0x20 + if m.ID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } if m.TTL != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) - i-- dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TTL)) } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x10 + if m.GrantedTTL != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + if len(m.Keys) > 0 { + for _, b := range m.Keys { + dAtA[i] = 0x2a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9945,26 +6729,17 @@ func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) { } func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -9972,31 +6747,22 @@ func (m *LeaseStatus) Marshal() (dAtA []byte, err error) { } func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10004,52 +6770,39 @@ func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) { } func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n33, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 } if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Leases { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *Member) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10057,56 +6810,68 @@ func (m *Member) Marshal() (dAtA []byte, err error) { } func (m *Member) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } - if len(m.ClientURLs) > 0 { - for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ClientURLs[iNdEx]) - copy(dAtA[i:], m.ClientURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.PeerURLs) > 0 { - for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PeerURLs[iNdEx]) - copy(dAtA[i:], m.PeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) - i-- + for _, s := range m.PeerURLs { dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 + if len(m.ClientURLs) > 0 { + for _, s := range m.ClientURLs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 + if m.IsLearner { + dAtA[i] = 0x28 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10114,35 +6879,42 @@ func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) { } func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.PeerURLs) > 0 { - for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PeerURLs[iNdEx]) - copy(dAtA[i:], m.PeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) - i-- + for _, s := range m.PeerURLs { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.IsLearner { + dAtA[i] = 0x10 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - return len(dAtA) - i, nil + return i, nil } func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10150,64 +6922,49 @@ func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) { } func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n34, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n34 } if m.Member != nil { - { - size, err := m.Member.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) + n35, err := m.Member.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10215,31 +6972,22 @@ func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) { } func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } - return len(dAtA) - i, nil + return i, nil } func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10247,52 +6995,39 @@ func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) { } func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n36, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 } if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Members { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10300,40 +7035,37 @@ func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) { } func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) } if len(m.PeerURLs) > 0 { - for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PeerURLs[iNdEx]) - copy(dAtA[i:], m.PeerURLs[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx]))) - i-- + for _, s := range m.PeerURLs { dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if m.ID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return i, nil } func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10341,52 +7073,39 @@ func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) { } func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n37, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 } if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Members { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10394,26 +7113,17 @@ func (m *MemberListRequest) Marshal() (dAtA []byte, err error) { } func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10421,118 +7131,148 @@ func (m *MemberListResponse) Marshal() (dAtA []byte, err error) { } func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n38, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 } if len(m.Members) > 0 { - for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Members { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } -func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { +func (m *MemberPromoteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *MemberPromoteRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ID)) + } + return i, nil +} + +func (m *MemberPromoteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *MemberPromoteResponse) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n39, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - return len(dAtA) - i, nil + return i, nil } -func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { +func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n40, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 } - return len(dAtA) - i, nil + return i, nil } func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10540,31 +7280,22 @@ func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { } func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.TargetID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) } - return len(dAtA) - i, nil + return i, nil } func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10572,38 +7303,27 @@ func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { } func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n41, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 } - return len(dAtA) - i, nil + return i, nil } func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10611,41 +7331,32 @@ func (m *AlarmRequest) Marshal() (dAtA []byte, err error) { } func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Alarm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - i-- - dAtA[i] = 0x18 + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Action)) } if m.MemberID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) } - if m.Action != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Action)) - i-- - dAtA[i] = 0x8 + if m.Alarm != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) } - return len(dAtA) - i, nil + return i, nil } func (m *AlarmMember) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10653,36 +7364,27 @@ func (m *AlarmMember) Marshal() (dAtA []byte, err error) { } func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.MemberID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) } if m.Alarm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Alarm)) } - if m.MemberID != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.MemberID)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return i, nil } func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10690,52 +7392,39 @@ func (m *AlarmResponse) Marshal() (dAtA []byte, err error) { } func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n42, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 } if len(m.Alarms) > 0 { - for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Alarms { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *StatusRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10743,26 +7432,17 @@ func (m *StatusRequest) Marshal() (dAtA []byte, err error) { } func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *StatusResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10770,65 +7450,88 @@ func (m *StatusResponse) Marshal() (dAtA []byte, err error) { } func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n43, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 } - if m.RaftTerm != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) - i-- - dAtA[i] = 0x30 + if len(m.Version) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) } - if m.RaftIndex != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) - i-- - dAtA[i] = 0x28 + if m.DbSize != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) } if m.Leader != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) - i-- dAtA[i] = 0x20 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Leader)) } - if m.DbSize != 0 { - i = encodeVarintRpc(dAtA, i, uint64(m.DbSize)) - i-- - dAtA[i] = 0x18 + if m.RaftIndex != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex)) } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 + if m.RaftTerm != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm)) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.RaftAppliedIndex != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RaftAppliedIndex)) + } + if len(m.Errors) > 0 { + for _, s := range m.Errors { + dAtA[i] = 0x42 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + if m.DbSizeInUse != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse)) + } + if m.IsLearner { + dAtA[i] = 0x50 + i++ + if m.IsLearner { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil } func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10836,26 +7539,17 @@ func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10863,26 +7557,17 @@ func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10890,40 +7575,29 @@ func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10931,40 +7605,39 @@ func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if m.Options != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Options.Size())) + n44, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -10972,33 +7645,23 @@ func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11006,33 +7669,23 @@ func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11040,40 +7693,29 @@ func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Password))) + i += copy(dAtA[i:], m.Password) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11081,40 +7723,29 @@ func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.User) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) } if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) } - if len(m.User) > 0 { - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintRpc(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11122,40 +7753,29 @@ func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- - dAtA[i] = 0x12 - } if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Role) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11163,33 +7783,23 @@ func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11197,33 +7807,23 @@ func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11231,26 +7831,17 @@ func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11258,26 +7849,17 @@ func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11285,33 +7867,23 @@ func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11319,45 +7891,33 @@ func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) } if m.Perm != nil { - { - size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) + n45, err := m.Perm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11365,47 +7925,35 @@ func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RangeEnd) > 0 { - i -= len(m.RangeEnd) - copy(dAtA[i:], m.RangeEnd) - i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) - i-- - dAtA[i] = 0x1a + if len(m.Role) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) } if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if len(m.Role) > 0 { - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Role))) - i-- - dAtA[i] = 0xa + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } - return len(dAtA) - i, nil + return i, nil } func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11413,38 +7961,27 @@ func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n46, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11452,38 +7989,27 @@ func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n47, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11491,45 +8017,33 @@ func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n48, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 } if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11537,38 +8051,27 @@ func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n49, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11576,47 +8079,42 @@ func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n50, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 } if len(m.Roles) > 0 { - for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Roles[iNdEx]) - copy(dAtA[i:], m.Roles[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx]))) - i-- + for _, s := range m.Roles { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11624,38 +8122,27 @@ func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n51, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11663,38 +8150,27 @@ func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n52, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11702,38 +8178,27 @@ func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n53, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11741,38 +8206,27 @@ func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n54, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11780,38 +8234,27 @@ func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n55, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11819,52 +8262,39 @@ func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n56, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 } if len(m.Perm) > 0 { - for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Perm { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11872,47 +8302,42 @@ func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n57, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 } if len(m.Roles) > 0 { - for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Roles[iNdEx]) - copy(dAtA[i:], m.Roles[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx]))) - i-- + for _, s := range m.Roles { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11920,47 +8345,42 @@ func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n58, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 } if len(m.Users) > 0 { - for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Users[iNdEx]) - copy(dAtA[i:], m.Users[iNdEx]) - i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx]))) - i-- + for _, s := range m.Users { dAtA[i] = 0x12 - } - } - if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -11968,38 +8388,27 @@ func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n59, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -12007,38 +8416,27 @@ func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n60, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n60 } - return len(dAtA) - i, nil + return i, nil } func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -12046,49 +8444,33 @@ func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) { } func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.Header != nil { - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRpc(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n61, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n61 } - return len(dAtA) - i, nil + return i, nil } func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { - offset -= sovRpc(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *ResponseHeader) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ClusterId != 0 { @@ -12103,16 +8485,10 @@ func (m *ResponseHeader) Size() (n int) { if m.RaftTerm != 0 { n += 1 + sovRpc(uint64(m.RaftTerm)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *RangeRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -12156,16 +8532,10 @@ func (m *RangeRequest) Size() (n int) { if m.MaxCreateRevision != 0 { n += 1 + sovRpc(uint64(m.MaxCreateRevision)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *RangeResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12184,16 +8554,10 @@ func (m *RangeResponse) Size() (n int) { if m.Count != 0 { n += 1 + sovRpc(uint64(m.Count)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *PutRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -12216,16 +8580,10 @@ func (m *PutRequest) Size() (n int) { if m.IgnoreLease { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *PutResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12236,16 +8594,10 @@ func (m *PutResponse) Size() (n int) { l = m.PrevKv.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *DeleteRangeRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -12259,16 +8611,10 @@ func (m *DeleteRangeRequest) Size() (n int) { if m.PrevKv { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *DeleteRangeResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12284,31 +8630,19 @@ func (m *DeleteRangeResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *RequestOp) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Request != nil { n += m.Request.Size() } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *RequestOp_RequestRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.RequestRange != nil { @@ -12318,9 +8652,6 @@ func (m *RequestOp_RequestRange) Size() (n int) { return n } func (m *RequestOp_RequestPut) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.RequestPut != nil { @@ -12330,9 +8661,6 @@ func (m *RequestOp_RequestPut) Size() (n int) { return n } func (m *RequestOp_RequestDeleteRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.RequestDeleteRange != nil { @@ -12342,9 +8670,6 @@ func (m *RequestOp_RequestDeleteRange) Size() (n int) { return n } func (m *RequestOp_RequestTxn) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.RequestTxn != nil { @@ -12354,24 +8679,15 @@ func (m *RequestOp_RequestTxn) Size() (n int) { return n } func (m *ResponseOp) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Response != nil { n += m.Response.Size() } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *ResponseOp_ResponseRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResponseRange != nil { @@ -12381,9 +8697,6 @@ func (m *ResponseOp_ResponseRange) Size() (n int) { return n } func (m *ResponseOp_ResponsePut) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResponsePut != nil { @@ -12393,9 +8706,6 @@ func (m *ResponseOp_ResponsePut) Size() (n int) { return n } func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResponseDeleteRange != nil { @@ -12405,9 +8715,6 @@ func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { return n } func (m *ResponseOp_ResponseTxn) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResponseTxn != nil { @@ -12417,9 +8724,6 @@ func (m *ResponseOp_ResponseTxn) Size() (n int) { return n } func (m *Compare) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Result != 0 { @@ -12439,43 +8743,28 @@ func (m *Compare) Size() (n int) { if l > 0 { n += 2 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *Compare_Version) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRpc(uint64(m.Version)) return n } func (m *Compare_CreateRevision) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRpc(uint64(m.CreateRevision)) return n } func (m *Compare_ModRevision) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRpc(uint64(m.ModRevision)) return n } func (m *Compare_Value) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Value != nil { @@ -12485,18 +8774,12 @@ func (m *Compare_Value) Size() (n int) { return n } func (m *Compare_Lease) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRpc(uint64(m.Lease)) return n } func (m *TxnRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Compare) > 0 { @@ -12517,16 +8800,10 @@ func (m *TxnRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *TxnResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12542,16 +8819,10 @@ func (m *TxnResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *CompactionRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Revision != 0 { @@ -12560,59 +8831,35 @@ func (m *CompactionRequest) Size() (n int) { if m.Physical { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *CompactionResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *HashRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *HashKVRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Revision != 0 { n += 1 + sovRpc(uint64(m.Revision)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *HashKVResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12625,16 +8872,10 @@ func (m *HashKVResponse) Size() (n int) { if m.CompactRevision != 0 { n += 1 + sovRpc(uint64(m.CompactRevision)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *HashResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12644,28 +8885,16 @@ func (m *HashResponse) Size() (n int) { if m.Hash != 0 { n += 1 + sovRpc(uint64(m.Hash)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *SnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *SnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12679,31 +8908,19 @@ func (m *SnapshotResponse) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *WatchRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.RequestUnion != nil { n += m.RequestUnion.Size() } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *WatchRequest_CreateRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.CreateRequest != nil { @@ -12713,9 +8930,6 @@ func (m *WatchRequest_CreateRequest) Size() (n int) { return n } func (m *WatchRequest_CancelRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.CancelRequest != nil { @@ -12725,9 +8939,6 @@ func (m *WatchRequest_CancelRequest) Size() (n int) { return n } func (m *WatchRequest_ProgressRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ProgressRequest != nil { @@ -12737,9 +8948,6 @@ func (m *WatchRequest_ProgressRequest) Size() (n int) { return n } func (m *WatchCreateRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -12772,43 +8980,25 @@ func (m *WatchCreateRequest) Size() (n int) { if m.Fragment { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *WatchCancelRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.WatchId != 0 { n += 1 + sovRpc(uint64(m.WatchId)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *WatchProgressRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *WatchResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12840,16 +9030,10 @@ func (m *WatchResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseGrantRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.TTL != 0 { @@ -12858,16 +9042,10 @@ func (m *LeaseGrantRequest) Size() (n int) { if m.ID != 0 { n += 1 + sovRpc(uint64(m.ID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseGrantResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12884,62 +9062,72 @@ func (m *LeaseGrantResponse) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseRevokeRequest) Size() (n int) { - if m == nil { - return 0 + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) + } + return n +} + +func (m *LeaseRevokeResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) } + return n +} + +func (m *LeaseCheckpoint) Size() (n int) { var l int _ = l if m.ID != 0 { n += 1 + sovRpc(uint64(m.ID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Remaining_TTL != 0 { + n += 1 + sovRpc(uint64(m.Remaining_TTL)) } return n } -func (m *LeaseRevokeResponse) Size() (n int) { - if m == nil { - return 0 +func (m *LeaseCheckpointRequest) Size() (n int) { + var l int + _ = l + if len(m.Checkpoints) > 0 { + for _, e := range m.Checkpoints { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } } + return n +} + +func (m *LeaseCheckpointResponse) Size() (n int) { var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseKeepAliveRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { n += 1 + sovRpc(uint64(m.ID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseKeepAliveResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -12952,16 +9140,10 @@ func (m *LeaseKeepAliveResponse) Size() (n int) { if m.TTL != 0 { n += 1 + sovRpc(uint64(m.TTL)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseTimeToLiveRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -12970,16 +9152,10 @@ func (m *LeaseTimeToLiveRequest) Size() (n int) { if m.Keys { n += 2 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseTimeToLiveResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13001,43 +9177,25 @@ func (m *LeaseTimeToLiveResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseLeasesRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { n += 1 + sovRpc(uint64(m.ID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseLeasesResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13050,16 +9208,10 @@ func (m *LeaseLeasesResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *Member) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -13081,16 +9233,13 @@ func (m *Member) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.IsLearner { + n += 2 } return n } func (m *MemberAddRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.PeerURLs) > 0 { @@ -13099,16 +9248,13 @@ func (m *MemberAddRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.IsLearner { + n += 2 } return n } func (m *MemberAddResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13125,31 +9271,19 @@ func (m *MemberAddResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberRemoveRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { n += 1 + sovRpc(uint64(m.ID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberRemoveResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13162,16 +9296,10 @@ func (m *MemberRemoveResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberUpdateRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -13183,16 +9311,10 @@ func (m *MemberUpdateRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberUpdateResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13205,28 +9327,16 @@ func (m *MemberUpdateResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberListRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MemberListResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13239,75 +9349,70 @@ func (m *MemberListResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } -func (m *DefragmentRequest) Size() (n int) { - if m == nil { - return 0 +func (m *MemberPromoteRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRpc(uint64(m.ID)) } + return n +} + +func (m *MemberPromoteResponse) Size() (n int) { var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } } return n } +func (m *DefragmentRequest) Size() (n int) { + var l int + _ = l + return n +} + func (m *DefragmentResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MoveLeaderRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.TargetID != 0 { n += 1 + sovRpc(uint64(m.TargetID)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *MoveLeaderResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AlarmRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Action != 0 { @@ -13319,16 +9424,10 @@ func (m *AlarmRequest) Size() (n int) { if m.Alarm != 0 { n += 1 + sovRpc(uint64(m.Alarm)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AlarmMember) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MemberID != 0 { @@ -13337,16 +9436,10 @@ func (m *AlarmMember) Size() (n int) { if m.Alarm != 0 { n += 1 + sovRpc(uint64(m.Alarm)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AlarmResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13359,28 +9452,16 @@ func (m *AlarmResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *StatusRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *StatusResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13403,40 +9484,37 @@ func (m *StatusResponse) Size() (n int) { if m.RaftTerm != 0 { n += 1 + sovRpc(uint64(m.RaftTerm)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.RaftAppliedIndex != 0 { + n += 1 + sovRpc(uint64(m.RaftAppliedIndex)) + } + if len(m.Errors) > 0 { + for _, s := range m.Errors { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.DbSizeInUse != 0 { + n += 1 + sovRpc(uint64(m.DbSizeInUse)) + } + if m.IsLearner { + n += 2 } return n } func (m *AuthEnableRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthDisableRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthenticateRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -13447,16 +9525,10 @@ func (m *AuthenticateRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserAddRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -13467,48 +9539,34 @@ func (m *AuthUserAddRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovRpc(uint64(l)) } return n } func (m *AuthUserGetRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserDeleteRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserChangePasswordRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -13519,16 +9577,10 @@ func (m *AuthUserChangePasswordRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserGrantRoleRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.User) @@ -13539,16 +9591,10 @@ func (m *AuthUserGrantRoleRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserRevokeRoleRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -13559,88 +9605,52 @@ func (m *AuthUserRevokeRoleRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleAddRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleGetRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Role) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserListRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleListRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleDeleteRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Role) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleGrantPermissionRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -13651,16 +9661,10 @@ func (m *AuthRoleGrantPermissionRequest) Size() (n int) { l = m.Perm.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleRevokePermissionRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Role) @@ -13675,48 +9679,30 @@ func (m *AuthRoleRevokePermissionRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthEnableResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthDisableResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthenticateResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13727,32 +9713,20 @@ func (m *AuthenticateResponse) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserAddResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserGetResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13765,96 +9739,60 @@ func (m *AuthUserGetResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserDeleteResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserChangePasswordResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserGrantRoleResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserRevokeRoleResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleAddResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleGetResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13867,16 +9805,10 @@ func (m *AuthRoleGetResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleListResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13889,16 +9821,10 @@ func (m *AuthRoleListResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthUserListResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { @@ -13911,63 +9837,49 @@ func (m *AuthUserListResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleDeleteResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleGrantPermissionResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *AuthRoleRevokePermissionResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Header != nil { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovRpc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} func sozRpc(x uint64) (n int) { return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } @@ -13986,7 +9898,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14014,7 +9926,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ClusterId |= uint64(b&0x7F) << shift + m.ClusterId |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14033,7 +9945,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MemberId |= uint64(b&0x7F) << shift + m.MemberId |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14052,7 +9964,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14071,7 +9983,7 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RaftTerm |= uint64(b&0x7F) << shift + m.RaftTerm |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14085,13 +9997,9 @@ func (m *ResponseHeader) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14116,7 +10024,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14144,7 +10052,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14153,9 +10061,6 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14178,7 +10083,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14187,9 +10092,6 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14212,7 +10114,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Limit |= int64(b&0x7F) << shift + m.Limit |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14231,7 +10133,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14250,7 +10152,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift + m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift if b < 0x80 { break } @@ -14269,7 +10171,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift + m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift if b < 0x80 { break } @@ -14288,7 +10190,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14308,7 +10210,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14328,7 +10230,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14348,7 +10250,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinModRevision |= int64(b&0x7F) << shift + m.MinModRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14367,7 +10269,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxModRevision |= int64(b&0x7F) << shift + m.MaxModRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14386,7 +10288,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinCreateRevision |= int64(b&0x7F) << shift + m.MinCreateRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14405,7 +10307,7 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxCreateRevision |= int64(b&0x7F) << shift + m.MaxCreateRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14419,13 +10321,9 @@ func (m *RangeRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14450,7 +10348,7 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14478,7 +10376,7 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14487,9 +10385,6 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14514,7 +10409,7 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14523,9 +10418,6 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14548,7 +10440,7 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14568,7 +10460,7 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= int64(b&0x7F) << shift + m.Count |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14582,13 +10474,9 @@ func (m *RangeResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14613,7 +10501,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14641,7 +10529,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14650,9 +10538,6 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14675,7 +10560,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14684,9 +10569,6 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14709,7 +10591,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lease |= int64(b&0x7F) << shift + m.Lease |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14728,7 +10610,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14748,7 +10630,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14768,7 +10650,7 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14783,13 +10665,9 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14814,7 +10692,7 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14842,7 +10720,7 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14851,9 +10729,6 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14878,7 +10753,7 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14887,9 +10762,6 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14909,13 +10781,9 @@ func (m *PutResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -14940,7 +10808,7 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14968,7 +10836,7 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14977,9 +10845,6 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15002,7 +10867,7 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15011,9 +10876,6 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15036,7 +10898,7 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15051,13 +10913,9 @@ func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15082,7 +10940,7 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15110,7 +10968,7 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15119,9 +10977,6 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15146,7 +11001,7 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Deleted |= int64(b&0x7F) << shift + m.Deleted |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15165,7 +11020,7 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15174,9 +11029,6 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15194,13 +11046,9 @@ func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15225,7 +11073,7 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15253,7 +11101,7 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15262,9 +11110,6 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15288,7 +11133,7 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15297,9 +11142,6 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15323,7 +11165,7 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15332,9 +11174,6 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15358,7 +11197,7 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15367,9 +11206,6 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15388,13 +11224,9 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15419,7 +11251,7 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15447,7 +11279,7 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15456,9 +11288,6 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15482,7 +11311,7 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15491,9 +11320,6 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15517,7 +11343,7 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15526,9 +11352,6 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15552,7 +11375,7 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15561,9 +11384,6 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15582,13 +11402,9 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15613,7 +11429,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15641,7 +11457,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Result |= Compare_CompareResult(b&0x7F) << shift + m.Result |= (Compare_CompareResult(b) & 0x7F) << shift if b < 0x80 { break } @@ -15660,7 +11476,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Target |= Compare_CompareTarget(b&0x7F) << shift + m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift if b < 0x80 { break } @@ -15679,7 +11495,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15688,9 +11504,6 @@ func (m *Compare) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15713,7 +11526,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15733,7 +11546,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15753,7 +11566,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15773,7 +11586,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15782,9 +11595,6 @@ func (m *Compare) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15806,7 +11616,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15826,7 +11636,7 @@ func (m *Compare) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15835,9 +11645,6 @@ func (m *Compare) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15855,13 +11662,9 @@ func (m *Compare) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -15886,7 +11689,7 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15914,7 +11717,7 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15923,9 +11726,6 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15948,7 +11748,7 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15957,9 +11757,6 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15982,7 +11779,7 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15991,9 +11788,6 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16011,13 +11805,9 @@ func (m *TxnRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16042,7 +11832,7 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16070,7 +11860,7 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16079,9 +11869,6 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16106,7 +11893,7 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16126,7 +11913,7 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16135,9 +11922,6 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16155,13 +11939,9 @@ func (m *TxnResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16186,7 +11966,7 @@ func (m *CompactionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16214,7 +11994,7 @@ func (m *CompactionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16233,7 +12013,7 @@ func (m *CompactionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16248,13 +12028,9 @@ func (m *CompactionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16279,7 +12055,7 @@ func (m *CompactionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16307,7 +12083,7 @@ func (m *CompactionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16316,9 +12092,6 @@ func (m *CompactionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16338,13 +12111,9 @@ func (m *CompactionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16369,7 +12138,7 @@ func (m *HashRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16392,13 +12161,9 @@ func (m *HashRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16423,7 +12188,7 @@ func (m *HashKVRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16451,7 +12216,7 @@ func (m *HashKVRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16465,13 +12230,9 @@ func (m *HashKVRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16496,7 +12257,7 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16524,7 +12285,7 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16533,9 +12294,6 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16560,7 +12318,7 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Hash |= uint32(b&0x7F) << shift + m.Hash |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -16579,7 +12337,7 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CompactRevision |= int64(b&0x7F) << shift + m.CompactRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16593,13 +12351,9 @@ func (m *HashKVResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16624,7 +12378,7 @@ func (m *HashResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16652,7 +12406,7 @@ func (m *HashResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16661,9 +12415,6 @@ func (m *HashResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16688,7 +12439,7 @@ func (m *HashResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Hash |= uint32(b&0x7F) << shift + m.Hash |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -16702,13 +12453,9 @@ func (m *HashResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16733,7 +12480,7 @@ func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16756,13 +12503,9 @@ func (m *SnapshotRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16787,7 +12530,7 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16815,7 +12558,7 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16824,9 +12567,6 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16851,7 +12591,7 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RemainingBytes |= uint64(b&0x7F) << shift + m.RemainingBytes |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16870,7 +12610,7 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16879,9 +12619,6 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16899,13 +12636,9 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -16930,7 +12663,7 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -16958,7 +12691,7 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -16967,9 +12700,6 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -16993,7 +12723,7 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17002,9 +12732,6 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17028,7 +12755,7 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17037,9 +12764,6 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17058,13 +12782,9 @@ func (m *WatchRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17089,7 +12809,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17117,7 +12837,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17126,9 +12846,6 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17151,7 +12868,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17160,9 +12877,6 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17185,7 +12899,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartRevision |= int64(b&0x7F) << shift + m.StartRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17204,7 +12918,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17222,7 +12936,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= WatchCreateRequest_FilterType(b&0x7F) << shift + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift if b < 0x80 { break } @@ -17239,7 +12953,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= int(b&0x7F) << shift + packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17248,16 +12962,9 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } - var elementCount int - if elementCount != 0 && len(m.Filters) == 0 { - m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount) - } for iNdEx < postIndex { var v WatchCreateRequest_FilterType for shift := uint(0); ; shift += 7 { @@ -17269,7 +12976,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= WatchCreateRequest_FilterType(b&0x7F) << shift + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift if b < 0x80 { break } @@ -17293,7 +13000,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17313,7 +13020,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.WatchId |= int64(b&0x7F) << shift + m.WatchId |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17332,7 +13039,7 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17347,13 +13054,9 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17378,7 +13081,7 @@ func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17406,7 +13109,7 @@ func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.WatchId |= int64(b&0x7F) << shift + m.WatchId |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17420,13 +13123,9 @@ func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17451,7 +13150,7 @@ func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17474,13 +13173,9 @@ func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17505,7 +13200,7 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17533,7 +13228,7 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17542,9 +13237,6 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17569,7 +13261,7 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.WatchId |= int64(b&0x7F) << shift + m.WatchId |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17578,7 +13270,284 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) } - var v int + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Created = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Canceled = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + } + m.CompactRevision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactRevision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Fragment = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &mvccpb.Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) + } + m.TTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -17588,17 +13557,30 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Created = bool(v != 0) - case 4: + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } - var v int + m.ID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -17608,17 +13590,16 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - m.Canceled = bool(v != 0) - case 5: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) } - m.CompactRevision = 0 + m.TTL = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -17628,14 +13609,14 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CompactRevision |= int64(b&0x7F) << shift + m.TTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - case 6: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17647,7 +13628,7 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17657,67 +13638,10 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CancelReason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Fragment = bool(v != 0) - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &mvccpb.Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -17728,13 +13652,9 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17744,7 +13664,7 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { +func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17759,7 +13679,7 @@ func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17767,32 +13687,13 @@ func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } @@ -17806,7 +13707,7 @@ func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17820,13 +13721,9 @@ func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17836,7 +13733,7 @@ func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { +func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17851,7 +13748,7 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -17859,10 +13756,10 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -17879,7 +13776,7 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -17888,9 +13785,6 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -17901,7 +13795,57 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseCheckpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } @@ -17915,35 +13859,16 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - case 3: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType) - } - m.TTL = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TTL |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Remaining_TTL", wireType) } - var stringLen uint64 + m.Remaining_TTL = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -17953,24 +13878,11 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Remaining_TTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -17980,13 +13892,9 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -17996,7 +13904,7 @@ func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { +func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18011,7 +13919,7 @@ func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18019,17 +13927,17 @@ func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LeaseCheckpointRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LeaseCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Checkpoints", wireType) } - m.ID = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -18039,11 +13947,23 @@ func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Checkpoints = append(m.Checkpoints, &LeaseCheckpoint{}) + if err := m.Checkpoints[len(m.Checkpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -18053,13 +13973,9 @@ func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18069,7 +13985,7 @@ func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { +func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18084,7 +14000,7 @@ func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18092,10 +14008,10 @@ func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LeaseCheckpointResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LeaseCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -18112,7 +14028,7 @@ func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18121,9 +14037,6 @@ func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18143,13 +14056,9 @@ func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18174,7 +14083,7 @@ func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18202,7 +14111,7 @@ func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18216,13 +14125,9 @@ func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18247,7 +14152,7 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18275,7 +14180,7 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18284,9 +14189,6 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18311,7 +14213,7 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18330,7 +14232,7 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TTL |= int64(b&0x7F) << shift + m.TTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18344,13 +14246,9 @@ func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18375,7 +14273,7 @@ func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18403,7 +14301,7 @@ func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18422,7 +14320,7 @@ func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18437,13 +14335,9 @@ func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18468,7 +14362,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18496,7 +14390,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18505,9 +14399,6 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18532,7 +14423,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18551,7 +14442,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TTL |= int64(b&0x7F) << shift + m.TTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18570,7 +14461,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.GrantedTTL |= int64(b&0x7F) << shift + m.GrantedTTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18589,7 +14480,7 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18598,9 +14489,6 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18616,13 +14504,9 @@ func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18647,7 +14531,7 @@ func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18670,13 +14554,9 @@ func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18701,7 +14581,7 @@ func (m *LeaseStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18729,7 +14609,7 @@ func (m *LeaseStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18743,13 +14623,9 @@ func (m *LeaseStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18774,7 +14650,7 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18802,7 +14678,7 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18811,9 +14687,6 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18838,7 +14711,7 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -18847,9 +14720,6 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18867,13 +14737,9 @@ func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -18898,7 +14764,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18926,7 +14792,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18945,7 +14811,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18955,9 +14821,6 @@ func (m *Member) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -18977,7 +14840,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -18987,9 +14850,6 @@ func (m *Member) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19009,7 +14869,7 @@ func (m *Member) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19019,14 +14879,31 @@ func (m *Member) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -19036,13 +14913,9 @@ func (m *Member) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19067,7 +14940,7 @@ func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19095,7 +14968,7 @@ func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19105,14 +14978,31 @@ func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLearner = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -19122,13 +15012,9 @@ func (m *MemberAddRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19153,7 +15039,7 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19181,7 +15067,7 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19190,9 +15076,6 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19217,7 +15100,7 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19226,9 +15109,6 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19253,7 +15133,7 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19262,9 +15142,6 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19282,13 +15159,192 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthRpc } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19298,7 +15354,7 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { +func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19313,7 +15369,7 @@ func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19321,10 +15377,10 @@ func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19341,11 +15397,40 @@ func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -19355,13 +15440,9 @@ func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19371,7 +15452,7 @@ func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { +func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19386,7 +15467,7 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19394,10 +15475,10 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19414,7 +15495,7 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19423,9 +15504,6 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19450,7 +15528,7 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19459,9 +15537,6 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19479,13 +15554,9 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19495,7 +15566,7 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { +func (m *MemberListRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19510,7 +15581,7 @@ func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19518,63 +15589,12 @@ func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -19584,13 +15604,9 @@ func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19600,7 +15616,7 @@ func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { +func (m *MemberListResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19615,7 +15631,7 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19623,10 +15639,10 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19643,7 +15659,7 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19652,9 +15668,6 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19679,7 +15692,7 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19688,9 +15701,6 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19708,13 +15718,9 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19724,7 +15730,7 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberListRequest) Unmarshal(dAtA []byte) error { +func (m *MemberPromoteRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19739,7 +15745,7 @@ func (m *MemberListRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19747,12 +15753,31 @@ func (m *MemberListRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MemberPromoteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberPromoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -19762,13 +15787,9 @@ func (m *MemberListRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19778,7 +15799,7 @@ func (m *MemberListRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *MemberListResponse) Unmarshal(dAtA []byte) error { +func (m *MemberPromoteResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19793,7 +15814,7 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19801,10 +15822,10 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MemberPromoteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MemberPromoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -19821,7 +15842,7 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19830,9 +15851,6 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19857,7 +15875,7 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -19866,9 +15884,6 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -19886,13 +15901,9 @@ func (m *MemberListResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19917,7 +15928,7 @@ func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19940,13 +15951,9 @@ func (m *DefragmentRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -19971,7 +15978,7 @@ func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -19999,7 +16006,7 @@ func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -20008,9 +16015,6 @@ func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20030,13 +16034,9 @@ func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20061,7 +16061,7 @@ func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20089,7 +16089,7 @@ func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TargetID |= uint64(b&0x7F) << shift + m.TargetID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20103,13 +16103,9 @@ func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20134,7 +16130,7 @@ func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20162,7 +16158,7 @@ func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -20171,9 +16167,6 @@ func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20193,13 +16186,9 @@ func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20224,7 +16213,7 @@ func (m *AlarmRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20252,7 +16241,7 @@ func (m *AlarmRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift + m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift if b < 0x80 { break } @@ -20271,7 +16260,7 @@ func (m *AlarmRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MemberID |= uint64(b&0x7F) << shift + m.MemberID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20290,7 +16279,7 @@ func (m *AlarmRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Alarm |= AlarmType(b&0x7F) << shift + m.Alarm |= (AlarmType(b) & 0x7F) << shift if b < 0x80 { break } @@ -20304,13 +16293,9 @@ func (m *AlarmRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20335,7 +16320,7 @@ func (m *AlarmMember) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20363,7 +16348,7 @@ func (m *AlarmMember) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MemberID |= uint64(b&0x7F) << shift + m.MemberID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20382,7 +16367,7 @@ func (m *AlarmMember) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Alarm |= AlarmType(b&0x7F) << shift + m.Alarm |= (AlarmType(b) & 0x7F) << shift if b < 0x80 { break } @@ -20396,13 +16381,9 @@ func (m *AlarmMember) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20427,7 +16408,7 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20455,7 +16436,7 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -20464,9 +16445,6 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20491,7 +16469,7 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -20500,9 +16478,6 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20520,13 +16495,9 @@ func (m *AlarmResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20551,7 +16522,7 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20574,13 +16545,9 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20605,7 +16572,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20633,7 +16600,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -20642,9 +16609,6 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20669,7 +16633,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20679,9 +16643,6 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20701,7 +16662,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DbSize |= int64(b&0x7F) << shift + m.DbSize |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20710,7 +16671,83 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) } - m.Leader = 0 + m.Leader = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Leader |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) + } + m.RaftIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + } + m.RaftTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftAppliedIndex", wireType) + } + m.RaftAppliedIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftAppliedIndex |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -20720,16 +16757,26 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Leader |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - case 5: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType) } - m.RaftIndex = 0 + m.DbSizeInUse = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -20739,16 +16786,16 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RaftIndex |= uint64(b&0x7F) << shift + m.DbSizeInUse |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - case 6: + case 10: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType) } - m.RaftTerm = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -20758,11 +16805,12 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RaftTerm |= uint64(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + m.IsLearner = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -20772,13 +16820,9 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20803,7 +16847,7 @@ func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20826,13 +16870,9 @@ func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20857,7 +16897,7 @@ func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20880,13 +16920,9 @@ func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -20911,7 +16947,7 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20939,7 +16975,7 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20949,9 +16985,6 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20971,7 +17004,7 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -20981,9 +17014,6 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -20998,13 +17028,9 @@ func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21029,7 +17055,7 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21057,7 +17083,7 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21067,9 +17093,6 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21089,7 +17112,7 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21099,13 +17122,43 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthRpc } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Password = string(dAtA[iNdEx:postIndex]) + if m.Options == nil { + m.Options = &authpb.UserAddOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -21116,13 +17169,9 @@ func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21147,7 +17196,7 @@ func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21175,7 +17224,7 @@ func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21185,9 +17234,6 @@ func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21202,13 +17248,9 @@ func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21233,7 +17275,7 @@ func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21261,7 +17303,7 @@ func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21271,9 +17313,6 @@ func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21288,13 +17327,9 @@ func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21319,7 +17354,7 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21347,7 +17382,7 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21357,9 +17392,6 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21379,7 +17411,7 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21389,9 +17421,6 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21406,13 +17435,9 @@ func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21437,7 +17462,7 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21465,7 +17490,7 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21475,9 +17500,6 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21497,7 +17519,7 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21507,9 +17529,6 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21524,13 +17543,9 @@ func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21555,7 +17570,7 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21583,7 +17598,7 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21593,9 +17608,6 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21615,7 +17627,7 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21625,9 +17637,6 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21642,13 +17651,9 @@ func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21673,7 +17678,7 @@ func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21701,7 +17706,7 @@ func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21711,9 +17716,6 @@ func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21728,13 +17730,9 @@ func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21759,7 +17757,7 @@ func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21787,7 +17785,7 @@ func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21797,9 +17795,6 @@ func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -21814,13 +17809,9 @@ func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21845,7 +17836,7 @@ func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21868,13 +17859,9 @@ func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21899,7 +17886,7 @@ func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21922,13 +17909,9 @@ func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -21953,7 +17936,7 @@ func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21981,7 +17964,7 @@ func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -21991,9 +17974,6 @@ func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22008,13 +17988,9 @@ func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22039,7 +18015,7 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22067,7 +18043,7 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22077,9 +18053,6 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22099,7 +18072,7 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22108,9 +18081,6 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22130,13 +18100,9 @@ func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22161,7 +18127,7 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22189,7 +18155,7 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22199,9 +18165,6 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22211,7 +18174,7 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -22221,29 +18184,28 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if byteLen < 0 { return ErrInvalidLengthRpc } + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRpc @@ -22253,23 +18215,22 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRpc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if byteLen < 0 { return ErrInvalidLengthRpc } + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - m.RangeEnd = string(dAtA[iNdEx:postIndex]) + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -22280,13 +18241,9 @@ func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22311,7 +18268,7 @@ func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22339,7 +18296,7 @@ func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22348,9 +18305,6 @@ func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22370,13 +18324,9 @@ func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22401,7 +18351,7 @@ func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22429,7 +18379,7 @@ func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22438,9 +18388,6 @@ func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22460,13 +18407,9 @@ func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22491,7 +18434,7 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22519,7 +18462,7 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22528,9 +18471,6 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22555,7 +18495,7 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22565,9 +18505,6 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22582,13 +18519,9 @@ func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22613,7 +18546,7 @@ func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22641,7 +18574,7 @@ func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22650,9 +18583,6 @@ func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22672,13 +18602,9 @@ func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22703,7 +18629,7 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22731,7 +18657,7 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22740,9 +18666,6 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22767,7 +18690,7 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22777,9 +18700,6 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22794,13 +18714,9 @@ func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22825,7 +18741,7 @@ func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22853,7 +18769,7 @@ func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22862,9 +18778,6 @@ func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22884,13 +18797,9 @@ func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -22915,7 +18824,7 @@ func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -22943,7 +18852,7 @@ func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -22952,9 +18861,6 @@ func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -22974,13 +18880,9 @@ func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23005,7 +18907,7 @@ func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23033,7 +18935,7 @@ func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23042,9 +18944,6 @@ func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23064,13 +18963,9 @@ func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23095,7 +18990,7 @@ func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23123,7 +19018,7 @@ func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23132,9 +19027,6 @@ func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23154,13 +19046,9 @@ func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23185,7 +19073,7 @@ func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23213,7 +19101,7 @@ func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23222,9 +19110,6 @@ func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23244,13 +19129,9 @@ func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23275,7 +19156,7 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23303,7 +19184,7 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23312,9 +19193,6 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23339,7 +19217,7 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23348,9 +19226,6 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23368,13 +19243,9 @@ func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23399,7 +19270,7 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23427,7 +19298,7 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23436,9 +19307,6 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23463,7 +19331,7 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23473,9 +19341,6 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23490,13 +19355,9 @@ func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23521,7 +19382,7 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23549,7 +19410,7 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23558,9 +19419,6 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23585,7 +19443,7 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23595,9 +19453,6 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23612,13 +19467,9 @@ func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23643,7 +19494,7 @@ func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23671,7 +19522,7 @@ func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23680,9 +19531,6 @@ func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23702,13 +19550,9 @@ func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23733,7 +19577,7 @@ func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23761,7 +19605,7 @@ func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23770,9 +19614,6 @@ func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23792,13 +19633,9 @@ func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23823,7 +19660,7 @@ func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -23851,7 +19688,7 @@ func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -23860,9 +19697,6 @@ func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRpc } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRpc - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -23882,13 +19716,9 @@ func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRpc } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRpc - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -23952,11 +19782,8 @@ func skipRpc(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthRpc - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthRpc } return iNdEx, nil @@ -23987,9 +19814,6 @@ func skipRpc(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRpc - } } return iNdEx, nil case 4: @@ -24008,3 +19832,255 @@ var ( ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } + +var fileDescriptorRpc = []byte{ + // 3928 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x6f, 0x23, 0xc9, + 0x75, 0x56, 0x93, 0xe2, 0xed, 0xf0, 0x22, 0xaa, 0x74, 0x19, 0x0e, 0x67, 0x46, 0xa3, 0xad, 0xd9, + 0xd9, 0xd5, 0xce, 0xec, 0x8a, 0x6b, 0xd9, 0x4e, 0x80, 0x49, 0xe2, 0x58, 0x23, 0x71, 0x67, 0xb4, + 0xd2, 0x88, 0xda, 0x16, 0x67, 0xf6, 0x02, 0x23, 0x42, 0x8b, 0x2c, 0x49, 0x1d, 0x91, 0xdd, 0x74, + 0x77, 0x93, 0x23, 0x6d, 0x2e, 0x0e, 0x0c, 0xc7, 0x40, 0xf2, 0x68, 0x03, 0x41, 0xf2, 0x90, 0xa7, + 0x20, 0x08, 0xfc, 0x90, 0xe7, 0x00, 0xf9, 0x05, 0x79, 0xca, 0x05, 0xf9, 0x03, 0xc1, 0xc6, 0x2f, + 0xc9, 0xaf, 0x30, 0xea, 0xd6, 0x5d, 0x7d, 0xa3, 0xc6, 0xa6, 0x77, 0x5f, 0xa4, 0xae, 0x53, 0xa7, + 0xce, 0x39, 0x75, 0xaa, 0xea, 0x9c, 0xd3, 0x5f, 0x17, 0xa1, 0xe4, 0x8c, 0x7a, 0x9b, 0x23, 0xc7, + 0xf6, 0x6c, 0x54, 0x21, 0x5e, 0xaf, 0xef, 0x12, 0x67, 0x42, 0x9c, 0xd1, 0x69, 0x73, 0xf9, 0xdc, + 0x3e, 0xb7, 0x59, 0x47, 0x8b, 0x3e, 0x71, 0x9e, 0xe6, 0x6d, 0xca, 0xd3, 0x1a, 0x4e, 0x7a, 0x3d, + 0xf6, 0x67, 0x74, 0xda, 0xba, 0x9c, 0x88, 0xae, 0x3b, 0xac, 0xcb, 0x18, 0x7b, 0x17, 0xec, 0xcf, + 0xe8, 0x94, 0xfd, 0x13, 0x9d, 0x77, 0xcf, 0x6d, 0xfb, 0x7c, 0x40, 0x5a, 0xc6, 0xc8, 0x6c, 0x19, + 0x96, 0x65, 0x7b, 0x86, 0x67, 0xda, 0x96, 0xcb, 0x7b, 0xf1, 0x5f, 0x6a, 0x50, 0xd3, 0x89, 0x3b, + 0xb2, 0x2d, 0x97, 0x3c, 0x27, 0x46, 0x9f, 0x38, 0xe8, 0x1e, 0x40, 0x6f, 0x30, 0x76, 0x3d, 0xe2, + 0x9c, 0x98, 0xfd, 0x86, 0xb6, 0xae, 0x6d, 0xcc, 0xeb, 0x25, 0x41, 0xd9, 0xeb, 0xa3, 0x3b, 0x50, + 0x1a, 0x92, 0xe1, 0x29, 0xef, 0xcd, 0xb0, 0xde, 0x22, 0x27, 0xec, 0xf5, 0x51, 0x13, 0x8a, 0x0e, + 0x99, 0x98, 0xae, 0x69, 0x5b, 0x8d, 0xec, 0xba, 0xb6, 0x91, 0xd5, 0xfd, 0x36, 0x1d, 0xe8, 0x18, + 0x67, 0xde, 0x89, 0x47, 0x9c, 0x61, 0x63, 0x9e, 0x0f, 0xa4, 0x84, 0x2e, 0x71, 0x86, 0xf8, 0x27, + 0x39, 0xa8, 0xe8, 0x86, 0x75, 0x4e, 0x74, 0xf2, 0xc3, 0x31, 0x71, 0x3d, 0x54, 0x87, 0xec, 0x25, + 0xb9, 0x66, 0xea, 0x2b, 0x3a, 0x7d, 0xe4, 0xe3, 0xad, 0x73, 0x72, 0x42, 0x2c, 0xae, 0xb8, 0x42, + 0xc7, 0x5b, 0xe7, 0xa4, 0x6d, 0xf5, 0xd1, 0x32, 0xe4, 0x06, 0xe6, 0xd0, 0xf4, 0x84, 0x56, 0xde, + 0x08, 0x99, 0x33, 0x1f, 0x31, 0x67, 0x07, 0xc0, 0xb5, 0x1d, 0xef, 0xc4, 0x76, 0xfa, 0xc4, 0x69, + 0xe4, 0xd6, 0xb5, 0x8d, 0xda, 0xd6, 0xdb, 0x9b, 0xea, 0x42, 0x6c, 0xaa, 0x06, 0x6d, 0x1e, 0xdb, + 0x8e, 0xd7, 0xa1, 0xbc, 0x7a, 0xc9, 0x95, 0x8f, 0xe8, 0x23, 0x28, 0x33, 0x21, 0x9e, 0xe1, 0x9c, + 0x13, 0xaf, 0x91, 0x67, 0x52, 0x1e, 0xde, 0x20, 0xa5, 0xcb, 0x98, 0x75, 0xa6, 0x9e, 0x3f, 0x23, + 0x0c, 0x15, 0x97, 0x38, 0xa6, 0x31, 0x30, 0xbf, 0x34, 0x4e, 0x07, 0xa4, 0x51, 0x58, 0xd7, 0x36, + 0x8a, 0x7a, 0x88, 0x46, 0xe7, 0x7f, 0x49, 0xae, 0xdd, 0x13, 0xdb, 0x1a, 0x5c, 0x37, 0x8a, 0x8c, + 0xa1, 0x48, 0x09, 0x1d, 0x6b, 0x70, 0xcd, 0x16, 0xcd, 0x1e, 0x5b, 0x1e, 0xef, 0x2d, 0xb1, 0xde, + 0x12, 0xa3, 0xb0, 0xee, 0x0d, 0xa8, 0x0f, 0x4d, 0xeb, 0x64, 0x68, 0xf7, 0x4f, 0x7c, 0x87, 0x00, + 0x73, 0x48, 0x6d, 0x68, 0x5a, 0x2f, 0xec, 0xbe, 0x2e, 0xdd, 0x42, 0x39, 0x8d, 0xab, 0x30, 0x67, + 0x59, 0x70, 0x1a, 0x57, 0x2a, 0xe7, 0x26, 0x2c, 0x51, 0x99, 0x3d, 0x87, 0x18, 0x1e, 0x09, 0x98, + 0x2b, 0x8c, 0x79, 0x71, 0x68, 0x5a, 0x3b, 0xac, 0x27, 0xc4, 0x6f, 0x5c, 0xc5, 0xf8, 0xab, 0x82, + 0xdf, 0xb8, 0x0a, 0xf3, 0xe3, 0x4d, 0x28, 0xf9, 0x3e, 0x47, 0x45, 0x98, 0x3f, 0xec, 0x1c, 0xb6, + 0xeb, 0x73, 0x08, 0x20, 0xbf, 0x7d, 0xbc, 0xd3, 0x3e, 0xdc, 0xad, 0x6b, 0xa8, 0x0c, 0x85, 0xdd, + 0x36, 0x6f, 0x64, 0xf0, 0x53, 0x80, 0xc0, 0xbb, 0xa8, 0x00, 0xd9, 0xfd, 0xf6, 0xe7, 0xf5, 0x39, + 0xca, 0xf3, 0xaa, 0xad, 0x1f, 0xef, 0x75, 0x0e, 0xeb, 0x1a, 0x1d, 0xbc, 0xa3, 0xb7, 0xb7, 0xbb, + 0xed, 0x7a, 0x86, 0x72, 0xbc, 0xe8, 0xec, 0xd6, 0xb3, 0xa8, 0x04, 0xb9, 0x57, 0xdb, 0x07, 0x2f, + 0xdb, 0xf5, 0x79, 0xfc, 0x73, 0x0d, 0xaa, 0x62, 0xbd, 0xf8, 0x99, 0x40, 0xdf, 0x81, 0xfc, 0x05, + 0x3b, 0x17, 0x6c, 0x2b, 0x96, 0xb7, 0xee, 0x46, 0x16, 0x37, 0x74, 0x76, 0x74, 0xc1, 0x8b, 0x30, + 0x64, 0x2f, 0x27, 0x6e, 0x23, 0xb3, 0x9e, 0xdd, 0x28, 0x6f, 0xd5, 0x37, 0xf9, 0x81, 0xdd, 0xdc, + 0x27, 0xd7, 0xaf, 0x8c, 0xc1, 0x98, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0x1f, 0xda, 0x0e, 0x61, 0x3b, + 0xb6, 0xa8, 0xb3, 0x67, 0xba, 0x8d, 0xd9, 0xa2, 0x89, 0xdd, 0xca, 0x1b, 0xf8, 0x17, 0x1a, 0xc0, + 0xd1, 0xd8, 0x4b, 0x3f, 0x1a, 0xcb, 0x90, 0x9b, 0x50, 0xc1, 0xe2, 0x58, 0xf0, 0x06, 0x3b, 0x13, + 0xc4, 0x70, 0x89, 0x7f, 0x26, 0x68, 0x03, 0xdd, 0x82, 0xc2, 0xc8, 0x21, 0x93, 0x93, 0xcb, 0x09, + 0x53, 0x52, 0xd4, 0xf3, 0xb4, 0xb9, 0x3f, 0x41, 0x6f, 0x41, 0xc5, 0x3c, 0xb7, 0x6c, 0x87, 0x9c, + 0x70, 0x59, 0x39, 0xd6, 0x5b, 0xe6, 0x34, 0x66, 0xb7, 0xc2, 0xc2, 0x05, 0xe7, 0x55, 0x96, 0x03, + 0x4a, 0xc2, 0x16, 0x94, 0x99, 0xa9, 0x33, 0xb9, 0xef, 0xbd, 0xc0, 0xc6, 0x0c, 0x1b, 0x16, 0x77, + 0xa1, 0xb0, 0x1a, 0xff, 0x00, 0xd0, 0x2e, 0x19, 0x10, 0x8f, 0xcc, 0x12, 0x3d, 0x14, 0x9f, 0x64, + 0x55, 0x9f, 0xe0, 0x9f, 0x69, 0xb0, 0x14, 0x12, 0x3f, 0xd3, 0xb4, 0x1a, 0x50, 0xe8, 0x33, 0x61, + 0xdc, 0x82, 0xac, 0x2e, 0x9b, 0xe8, 0x31, 0x14, 0x85, 0x01, 0x6e, 0x23, 0x9b, 0xb2, 0x69, 0x0a, + 0xdc, 0x26, 0x17, 0xff, 0x22, 0x03, 0x25, 0x31, 0xd1, 0xce, 0x08, 0x6d, 0x43, 0xd5, 0xe1, 0x8d, + 0x13, 0x36, 0x1f, 0x61, 0x51, 0x33, 0x3d, 0x08, 0x3d, 0x9f, 0xd3, 0x2b, 0x62, 0x08, 0x23, 0xa3, + 0xdf, 0x83, 0xb2, 0x14, 0x31, 0x1a, 0x7b, 0xc2, 0xe5, 0x8d, 0xb0, 0x80, 0x60, 0xff, 0x3d, 0x9f, + 0xd3, 0x41, 0xb0, 0x1f, 0x8d, 0x3d, 0xd4, 0x85, 0x65, 0x39, 0x98, 0xcf, 0x46, 0x98, 0x91, 0x65, + 0x52, 0xd6, 0xc3, 0x52, 0xe2, 0x4b, 0xf5, 0x7c, 0x4e, 0x47, 0x62, 0xbc, 0xd2, 0xa9, 0x9a, 0xe4, + 0x5d, 0xf1, 0xe0, 0x1d, 0x33, 0xa9, 0x7b, 0x65, 0xc5, 0x4d, 0xea, 0x5e, 0x59, 0x4f, 0x4b, 0x50, + 0x10, 0x2d, 0xfc, 0x2f, 0x19, 0x00, 0xb9, 0x1a, 0x9d, 0x11, 0xda, 0x85, 0x9a, 0x23, 0x5a, 0x21, + 0x6f, 0xdd, 0x49, 0xf4, 0x96, 0x58, 0xc4, 0x39, 0xbd, 0x2a, 0x07, 0x71, 0xe3, 0xbe, 0x07, 0x15, + 0x5f, 0x4a, 0xe0, 0xb0, 0xdb, 0x09, 0x0e, 0xf3, 0x25, 0x94, 0xe5, 0x00, 0xea, 0xb2, 0x4f, 0x61, + 0xc5, 0x1f, 0x9f, 0xe0, 0xb3, 0xb7, 0xa6, 0xf8, 0xcc, 0x17, 0xb8, 0x24, 0x25, 0xa8, 0x5e, 0x53, + 0x0d, 0x0b, 0xdc, 0x76, 0x3b, 0xc1, 0x6d, 0x71, 0xc3, 0xa8, 0xe3, 0x80, 0xe6, 0x4b, 0xde, 0xc4, + 0xff, 0x97, 0x85, 0xc2, 0x8e, 0x3d, 0x1c, 0x19, 0x0e, 0x5d, 0x8d, 0xbc, 0x43, 0xdc, 0xf1, 0xc0, + 0x63, 0xee, 0xaa, 0x6d, 0x3d, 0x08, 0x4b, 0x14, 0x6c, 0xf2, 0xbf, 0xce, 0x58, 0x75, 0x31, 0x84, + 0x0e, 0x16, 0xe9, 0x31, 0xf3, 0x06, 0x83, 0x45, 0x72, 0x14, 0x43, 0xe4, 0x41, 0xce, 0x06, 0x07, + 0xb9, 0x09, 0x85, 0x09, 0x71, 0x82, 0x94, 0xfe, 0x7c, 0x4e, 0x97, 0x04, 0xf4, 0x1e, 0x2c, 0x44, + 0xd3, 0x4b, 0x4e, 0xf0, 0xd4, 0x7a, 0xe1, 0x6c, 0xf4, 0x00, 0x2a, 0xa1, 0x1c, 0x97, 0x17, 0x7c, + 0xe5, 0xa1, 0x92, 0xe2, 0x56, 0x65, 0x5c, 0xa5, 0xf9, 0xb8, 0xf2, 0x7c, 0x4e, 0x46, 0xd6, 0x55, + 0x19, 0x59, 0x8b, 0x62, 0x94, 0x88, 0xad, 0xa1, 0x20, 0xf3, 0xfd, 0x70, 0x90, 0xc1, 0xdf, 0x87, + 0x6a, 0xc8, 0x41, 0x34, 0xef, 0xb4, 0x3f, 0x79, 0xb9, 0x7d, 0xc0, 0x93, 0xd4, 0x33, 0x96, 0x97, + 0xf4, 0xba, 0x46, 0x73, 0xdd, 0x41, 0xfb, 0xf8, 0xb8, 0x9e, 0x41, 0x55, 0x28, 0x1d, 0x76, 0xba, + 0x27, 0x9c, 0x2b, 0x8b, 0x9f, 0xf9, 0x12, 0x44, 0x92, 0x53, 0x72, 0xdb, 0x9c, 0x92, 0xdb, 0x34, + 0x99, 0xdb, 0x32, 0x41, 0x6e, 0x63, 0x69, 0xee, 0xa0, 0xbd, 0x7d, 0xdc, 0xae, 0xcf, 0x3f, 0xad, + 0x41, 0x85, 0xfb, 0xf7, 0x64, 0x6c, 0xd1, 0x54, 0xfb, 0x0f, 0x1a, 0x40, 0x70, 0x9a, 0x50, 0x0b, + 0x0a, 0x3d, 0xae, 0xa7, 0xa1, 0xb1, 0x60, 0xb4, 0x92, 0xb8, 0x64, 0xba, 0xe4, 0x42, 0xdf, 0x82, + 0x82, 0x3b, 0xee, 0xf5, 0x88, 0x2b, 0x53, 0xde, 0xad, 0x68, 0x3c, 0x14, 0xd1, 0x4a, 0x97, 0x7c, + 0x74, 0xc8, 0x99, 0x61, 0x0e, 0xc6, 0x2c, 0x01, 0x4e, 0x1f, 0x22, 0xf8, 0xf0, 0xdf, 0x69, 0x50, + 0x56, 0x36, 0xef, 0x6f, 0x18, 0x84, 0xef, 0x42, 0x89, 0xd9, 0x40, 0xfa, 0x22, 0x0c, 0x17, 0xf5, + 0x80, 0x80, 0x7e, 0x07, 0x4a, 0xf2, 0x04, 0xc8, 0x48, 0xdc, 0x48, 0x16, 0xdb, 0x19, 0xe9, 0x01, + 0x2b, 0xde, 0x87, 0x45, 0xe6, 0x95, 0x1e, 0x2d, 0xae, 0xa5, 0x1f, 0xd5, 0xf2, 0x53, 0x8b, 0x94, + 0x9f, 0x4d, 0x28, 0x8e, 0x2e, 0xae, 0x5d, 0xb3, 0x67, 0x0c, 0x84, 0x15, 0x7e, 0x1b, 0x7f, 0x0c, + 0x48, 0x15, 0x36, 0xcb, 0x74, 0x71, 0x15, 0xca, 0xcf, 0x0d, 0xf7, 0x42, 0x98, 0x84, 0x1f, 0x43, + 0x95, 0x36, 0xf7, 0x5f, 0xbd, 0x81, 0x8d, 0xec, 0xe5, 0x40, 0x72, 0xcf, 0xe4, 0x73, 0x04, 0xf3, + 0x17, 0x86, 0x7b, 0xc1, 0x26, 0x5a, 0xd5, 0xd9, 0x33, 0x7a, 0x0f, 0xea, 0x3d, 0x3e, 0xc9, 0x93, + 0xc8, 0x2b, 0xc3, 0x82, 0xa0, 0xfb, 0x95, 0xe0, 0x67, 0x50, 0xe1, 0x73, 0xf8, 0x6d, 0x1b, 0x81, + 0x17, 0x61, 0xe1, 0xd8, 0x32, 0x46, 0xee, 0x85, 0x2d, 0xb3, 0x1b, 0x9d, 0x74, 0x3d, 0xa0, 0xcd, + 0xa4, 0xf1, 0x5d, 0x58, 0x70, 0xc8, 0xd0, 0x30, 0x2d, 0xd3, 0x3a, 0x3f, 0x39, 0xbd, 0xf6, 0x88, + 0x2b, 0x5e, 0x98, 0x6a, 0x3e, 0xf9, 0x29, 0xa5, 0x52, 0xd3, 0x4e, 0x07, 0xf6, 0xa9, 0x08, 0x73, + 0xec, 0x19, 0xff, 0x34, 0x03, 0x95, 0x4f, 0x0d, 0xaf, 0x27, 0x97, 0x0e, 0xed, 0x41, 0xcd, 0x0f, + 0x6e, 0x8c, 0x22, 0x6c, 0x89, 0xa4, 0x58, 0x36, 0x46, 0x96, 0xd2, 0x32, 0x3b, 0x56, 0x7b, 0x2a, + 0x81, 0x89, 0x32, 0xac, 0x1e, 0x19, 0xf8, 0xa2, 0x32, 0xe9, 0xa2, 0x18, 0xa3, 0x2a, 0x4a, 0x25, + 0xa0, 0x0e, 0xd4, 0x47, 0x8e, 0x7d, 0xee, 0x10, 0xd7, 0xf5, 0x85, 0xf1, 0x34, 0x86, 0x13, 0x84, + 0x1d, 0x09, 0xd6, 0x40, 0xdc, 0xc2, 0x28, 0x4c, 0x7a, 0xba, 0x10, 0xd4, 0x33, 0x3c, 0x38, 0xfd, + 0x57, 0x06, 0x50, 0x7c, 0x52, 0xbf, 0x6e, 0x89, 0xf7, 0x10, 0x6a, 0xae, 0x67, 0x38, 0xb1, 0xcd, + 0x56, 0x65, 0x54, 0x3f, 0xe2, 0xbf, 0x0b, 0xbe, 0x41, 0x27, 0x96, 0xed, 0x99, 0x67, 0xd7, 0xa2, + 0x4a, 0xae, 0x49, 0xf2, 0x21, 0xa3, 0xa2, 0x36, 0x14, 0xce, 0xcc, 0x81, 0x47, 0x1c, 0xb7, 0x91, + 0x5b, 0xcf, 0x6e, 0xd4, 0xb6, 0x1e, 0xdf, 0xb4, 0x0c, 0x9b, 0x1f, 0x31, 0xfe, 0xee, 0xf5, 0x88, + 0xe8, 0x72, 0xac, 0x5a, 0x79, 0xe6, 0x43, 0xd5, 0xf8, 0x6d, 0x28, 0xbe, 0xa6, 0x22, 0xe8, 0x5b, + 0x76, 0x81, 0x17, 0x8b, 0xac, 0xcd, 0x5f, 0xb2, 0xcf, 0x1c, 0xe3, 0x7c, 0x48, 0x2c, 0x4f, 0xbe, + 0x07, 0xca, 0x36, 0x7e, 0x08, 0x10, 0xa8, 0xa1, 0x21, 0xff, 0xb0, 0x73, 0xf4, 0xb2, 0x5b, 0x9f, + 0x43, 0x15, 0x28, 0x1e, 0x76, 0x76, 0xdb, 0x07, 0x6d, 0x9a, 0x1f, 0x70, 0x4b, 0xba, 0x34, 0xb4, + 0x96, 0xaa, 0x4e, 0x2d, 0xa4, 0x13, 0xaf, 0xc2, 0x72, 0xd2, 0x02, 0xd2, 0x5a, 0xb4, 0x2a, 0x76, + 0xe9, 0x4c, 0x47, 0x45, 0x55, 0x9d, 0x09, 0x4f, 0xb7, 0x01, 0x05, 0xbe, 0x7b, 0xfb, 0xa2, 0x38, + 0x97, 0x4d, 0xea, 0x08, 0xbe, 0x19, 0x49, 0x5f, 0xac, 0x92, 0xdf, 0x4e, 0x0c, 0x2f, 0xb9, 0xc4, + 0xf0, 0x82, 0x1e, 0x40, 0xd5, 0x3f, 0x0d, 0x86, 0x2b, 0x6a, 0x81, 0x92, 0x5e, 0x91, 0x1b, 0x9d, + 0xd2, 0x42, 0x4e, 0x2f, 0x84, 0x9d, 0x8e, 0x1e, 0x42, 0x9e, 0x4c, 0x88, 0xe5, 0xb9, 0x8d, 0x32, + 0xcb, 0x18, 0x55, 0x59, 0xbb, 0xb7, 0x29, 0x55, 0x17, 0x9d, 0xf8, 0xbb, 0xb0, 0xc8, 0xde, 0x91, + 0x9e, 0x39, 0x86, 0xa5, 0xbe, 0xcc, 0x75, 0xbb, 0x07, 0xc2, 0xdd, 0xf4, 0x11, 0xd5, 0x20, 0xb3, + 0xb7, 0x2b, 0x9c, 0x90, 0xd9, 0xdb, 0xc5, 0x3f, 0xd6, 0x00, 0xa9, 0xe3, 0x66, 0xf2, 0x73, 0x44, + 0xb8, 0x54, 0x9f, 0x0d, 0xd4, 0x2f, 0x43, 0x8e, 0x38, 0x8e, 0xed, 0x30, 0x8f, 0x96, 0x74, 0xde, + 0xc0, 0x6f, 0x0b, 0x1b, 0x74, 0x32, 0xb1, 0x2f, 0xfd, 0x33, 0xc8, 0xa5, 0x69, 0xbe, 0xa9, 0xfb, + 0xb0, 0x14, 0xe2, 0x9a, 0x29, 0x73, 0x7d, 0x04, 0x0b, 0x4c, 0xd8, 0xce, 0x05, 0xe9, 0x5d, 0x8e, + 0x6c, 0xd3, 0x8a, 0xe9, 0xa3, 0x2b, 0x17, 0x04, 0x58, 0x3a, 0x0f, 0x3e, 0xb1, 0x8a, 0x4f, 0xec, + 0x76, 0x0f, 0xf0, 0xe7, 0xb0, 0x1a, 0x91, 0x23, 0xcd, 0xff, 0x43, 0x28, 0xf7, 0x7c, 0xa2, 0x2b, + 0x6a, 0x9d, 0x7b, 0x61, 0xe3, 0xa2, 0x43, 0xd5, 0x11, 0xb8, 0x03, 0xb7, 0x62, 0xa2, 0x67, 0x9a, + 0xf3, 0xbb, 0xb0, 0xc2, 0x04, 0xee, 0x13, 0x32, 0xda, 0x1e, 0x98, 0x93, 0x54, 0x4f, 0x8f, 0xc4, + 0xa4, 0x14, 0xc6, 0xaf, 0x77, 0x5f, 0xe0, 0xdf, 0x17, 0x1a, 0xbb, 0xe6, 0x90, 0x74, 0xed, 0x83, + 0x74, 0xdb, 0x68, 0x36, 0xbb, 0x24, 0xd7, 0xae, 0x28, 0x6b, 0xd8, 0x33, 0xfe, 0x47, 0x4d, 0xb8, + 0x4a, 0x1d, 0xfe, 0x35, 0xef, 0xe4, 0x35, 0x80, 0x73, 0x7a, 0x64, 0x48, 0x9f, 0x76, 0x70, 0x44, + 0x45, 0xa1, 0xf8, 0x76, 0xd2, 0xf8, 0x5d, 0x11, 0x76, 0x2e, 0x8b, 0x7d, 0xce, 0xfe, 0xf8, 0x51, + 0xee, 0x1e, 0x94, 0x19, 0xe1, 0xd8, 0x33, 0xbc, 0xb1, 0x1b, 0x5b, 0x8c, 0x3f, 0x17, 0xdb, 0x5e, + 0x0e, 0x9a, 0x69, 0x5e, 0xdf, 0x82, 0x3c, 0x7b, 0x99, 0x90, 0xa5, 0xf4, 0xed, 0x84, 0xfd, 0xc8, + 0xed, 0xd0, 0x05, 0x23, 0xfe, 0xa9, 0x06, 0xf9, 0x17, 0x0c, 0x82, 0x55, 0x4c, 0x9b, 0x97, 0x6b, + 0x61, 0x19, 0x43, 0x0e, 0x0c, 0x95, 0x74, 0xf6, 0xcc, 0x4a, 0x4f, 0x42, 0x9c, 0x97, 0xfa, 0x01, + 0x2f, 0x71, 0x4b, 0xba, 0xdf, 0xa6, 0x3e, 0xeb, 0x0d, 0x4c, 0x62, 0x79, 0xac, 0x77, 0x9e, 0xf5, + 0x2a, 0x14, 0x5a, 0x3d, 0x9b, 0xee, 0x01, 0x31, 0x1c, 0x4b, 0x80, 0xa6, 0x45, 0x3d, 0x20, 0xe0, + 0x03, 0xa8, 0x73, 0x3b, 0xb6, 0xfb, 0x7d, 0xa5, 0xc0, 0xf4, 0xb5, 0x69, 0x11, 0x6d, 0x21, 0x69, + 0x99, 0xa8, 0xb4, 0x7f, 0xd2, 0x60, 0x51, 0x11, 0x37, 0x93, 0x57, 0xdf, 0x87, 0x3c, 0x07, 0xa9, + 0x45, 0xa5, 0xb3, 0x1c, 0x1e, 0xc5, 0xd5, 0xe8, 0x82, 0x07, 0x6d, 0x42, 0x81, 0x3f, 0xc9, 0x77, + 0x80, 0x64, 0x76, 0xc9, 0x84, 0x1f, 0xc2, 0x92, 0x20, 0x91, 0xa1, 0x9d, 0x74, 0x30, 0xd8, 0x62, + 0xe0, 0x3f, 0x85, 0xe5, 0x30, 0xdb, 0x4c, 0x53, 0x52, 0x8c, 0xcc, 0xbc, 0x89, 0x91, 0xdb, 0xd2, + 0xc8, 0x97, 0xa3, 0xbe, 0x52, 0x47, 0x45, 0x77, 0x8c, 0xba, 0x5e, 0x99, 0xf0, 0x7a, 0x05, 0x13, + 0x90, 0x22, 0xbe, 0xd1, 0x09, 0x2c, 0xc9, 0xed, 0x70, 0x60, 0xba, 0x7e, 0xb9, 0xfe, 0x25, 0x20, + 0x95, 0xf8, 0x8d, 0x1a, 0xf4, 0x8e, 0x74, 0xc7, 0x91, 0x63, 0x0f, 0xed, 0x54, 0x97, 0xe2, 0x3f, + 0x83, 0x95, 0x08, 0xdf, 0x37, 0xed, 0xb7, 0x5d, 0x22, 0x8b, 0x15, 0xe9, 0xb7, 0x8f, 0x01, 0xa9, + 0xc4, 0x99, 0xb2, 0x56, 0x0b, 0x16, 0x5f, 0xd8, 0x13, 0x1a, 0xfe, 0x28, 0x35, 0x38, 0xf7, 0x1c, + 0x63, 0xf0, 0x5d, 0xe1, 0xb7, 0xa9, 0x72, 0x75, 0xc0, 0x4c, 0xca, 0xff, 0x43, 0x83, 0xca, 0xf6, + 0xc0, 0x70, 0x86, 0x52, 0xf1, 0xf7, 0x20, 0xcf, 0xdf, 0x9c, 0x05, 0x58, 0xf5, 0x4e, 0x58, 0x8c, + 0xca, 0xcb, 0x1b, 0xdb, 0xfc, 0x3d, 0x5b, 0x8c, 0xa2, 0x86, 0x8b, 0xef, 0x59, 0xbb, 0x91, 0xef, + 0x5b, 0xbb, 0xe8, 0x03, 0xc8, 0x19, 0x74, 0x08, 0x4b, 0x33, 0xb5, 0x28, 0x66, 0xc1, 0xa4, 0xb1, + 0xfa, 0x9e, 0x73, 0xe1, 0xef, 0x40, 0x59, 0xd1, 0x80, 0x0a, 0x90, 0x7d, 0xd6, 0x16, 0xc5, 0xf8, + 0xf6, 0x4e, 0x77, 0xef, 0x15, 0x07, 0x6b, 0x6a, 0x00, 0xbb, 0x6d, 0xbf, 0x9d, 0xc1, 0x9f, 0x89, + 0x51, 0x22, 0xa4, 0xab, 0xf6, 0x68, 0x69, 0xf6, 0x64, 0xde, 0xc8, 0x9e, 0x2b, 0xa8, 0x8a, 0xe9, + 0xcf, 0x9a, 0xa2, 0x98, 0xbc, 0x94, 0x14, 0xa5, 0x18, 0xaf, 0x0b, 0x46, 0xbc, 0x00, 0x55, 0x91, + 0xb4, 0xc4, 0xfe, 0xfb, 0xf7, 0x0c, 0xd4, 0x24, 0x65, 0x56, 0x50, 0x5d, 0xe2, 0x81, 0x3c, 0xc9, + 0xf9, 0x68, 0xe0, 0x2a, 0xe4, 0xfb, 0xa7, 0xc7, 0xe6, 0x97, 0xf2, 0x03, 0x88, 0x68, 0x51, 0xfa, + 0x80, 0xeb, 0xe1, 0x5f, 0x21, 0x45, 0x8b, 0x66, 0x23, 0xc7, 0x38, 0xf3, 0xf6, 0xac, 0x3e, 0xb9, + 0x62, 0xb9, 0x6d, 0x5e, 0x0f, 0x08, 0x0c, 0x28, 0x11, 0x5f, 0x2b, 0xd9, 0x0b, 0x82, 0xf2, 0xf5, + 0x12, 0x3d, 0x82, 0x3a, 0x7d, 0xde, 0x1e, 0x8d, 0x06, 0x26, 0xe9, 0x73, 0x01, 0x05, 0xc6, 0x13, + 0xa3, 0x53, 0xed, 0xac, 0xa4, 0x76, 0x1b, 0x45, 0x16, 0x5d, 0x45, 0x0b, 0xad, 0x43, 0x99, 0xdb, + 0xb7, 0x67, 0xbd, 0x74, 0x09, 0xfb, 0x84, 0x97, 0xd5, 0x55, 0x52, 0x38, 0x5b, 0x42, 0x34, 0x5b, + 0x2e, 0xc1, 0xe2, 0xf6, 0xd8, 0xbb, 0x68, 0x5b, 0xc6, 0xe9, 0x40, 0x46, 0x22, 0x5a, 0xce, 0x50, + 0xe2, 0xae, 0xe9, 0xaa, 0xd4, 0x36, 0x2c, 0x51, 0x2a, 0xb1, 0x3c, 0xb3, 0xa7, 0x64, 0x02, 0x59, + 0x2b, 0x68, 0x91, 0x5a, 0xc1, 0x70, 0xdd, 0xd7, 0xb6, 0xd3, 0x17, 0xee, 0xf5, 0xdb, 0x78, 0xc2, + 0x85, 0xbf, 0x74, 0x43, 0xf9, 0xfe, 0xd7, 0x94, 0x82, 0x3e, 0x84, 0x82, 0x3d, 0x62, 0x9f, 0xa4, + 0x05, 0x6e, 0xb0, 0xba, 0xc9, 0x3f, 0x62, 0x6f, 0x0a, 0xc1, 0x1d, 0xde, 0xab, 0x4b, 0x36, 0xbc, + 0x11, 0xe8, 0x7d, 0x46, 0xbc, 0x29, 0x7a, 0xf1, 0x63, 0x58, 0x91, 0x9c, 0x02, 0x26, 0x9f, 0xc2, + 0xdc, 0x81, 0x7b, 0x92, 0x79, 0xe7, 0xc2, 0xb0, 0xce, 0xc9, 0x91, 0x30, 0xf1, 0x37, 0xf5, 0xcf, + 0x53, 0x68, 0xf8, 0x76, 0xb2, 0x57, 0x37, 0x7b, 0xa0, 0x1a, 0x30, 0x76, 0xc5, 0x4e, 0x2f, 0xe9, + 0xec, 0x99, 0xd2, 0x1c, 0x7b, 0xe0, 0xd7, 0x6a, 0xf4, 0x19, 0xef, 0xc0, 0x6d, 0x29, 0x43, 0xbc, + 0x54, 0x85, 0x85, 0xc4, 0x0c, 0x4a, 0x12, 0x22, 0x1c, 0x46, 0x87, 0x4e, 0x5f, 0x28, 0x95, 0x33, + 0xec, 0x5a, 0x26, 0x53, 0x53, 0x64, 0xae, 0xf0, 0x3d, 0x44, 0x0d, 0x53, 0xd3, 0xb1, 0x20, 0x53, + 0x01, 0x2a, 0x59, 0x2c, 0x04, 0x25, 0xc7, 0x16, 0x22, 0x26, 0xfa, 0x07, 0xb0, 0xe6, 0x1b, 0x41, + 0xfd, 0x76, 0x44, 0x9c, 0xa1, 0xe9, 0xba, 0x0a, 0xb0, 0x9a, 0x34, 0xf1, 0x77, 0x60, 0x7e, 0x44, + 0x44, 0x24, 0x2c, 0x6f, 0x21, 0xb9, 0x89, 0x94, 0xc1, 0xac, 0x1f, 0xf7, 0xe1, 0xbe, 0x94, 0xce, + 0x3d, 0x9a, 0x28, 0x3e, 0x6a, 0x94, 0x84, 0x9b, 0x32, 0x29, 0x70, 0x53, 0x36, 0x02, 0xf6, 0x7f, + 0xcc, 0x1d, 0x29, 0x4f, 0xe3, 0x4c, 0x19, 0x6e, 0x9f, 0xfb, 0xd4, 0x3f, 0xc4, 0x33, 0x09, 0x3b, + 0x85, 0xe5, 0xf0, 0xd9, 0x9f, 0x29, 0xf8, 0x2e, 0x43, 0xce, 0xb3, 0x2f, 0x89, 0x0c, 0xbd, 0xbc, + 0x21, 0x0d, 0xf6, 0x03, 0xc3, 0x4c, 0x06, 0x1b, 0x81, 0x30, 0xb6, 0x25, 0x67, 0xb5, 0x97, 0xae, + 0xa6, 0xac, 0x6c, 0x79, 0x03, 0x1f, 0xc2, 0x6a, 0x34, 0x4c, 0xcc, 0x64, 0xf2, 0x2b, 0xbe, 0x81, + 0x93, 0x22, 0xc9, 0x4c, 0x72, 0x3f, 0x09, 0x82, 0x81, 0x12, 0x50, 0x66, 0x12, 0xa9, 0x43, 0x33, + 0x29, 0xbe, 0xfc, 0x36, 0xf6, 0xab, 0x1f, 0x6e, 0x66, 0x12, 0xe6, 0x06, 0xc2, 0x66, 0x5f, 0xfe, + 0x20, 0x46, 0x64, 0xa7, 0xc6, 0x08, 0x71, 0x48, 0x82, 0x28, 0xf6, 0x35, 0x6c, 0x3a, 0xa1, 0x23, + 0x08, 0xa0, 0xb3, 0xea, 0xa0, 0x39, 0xc4, 0xd7, 0xc1, 0x1a, 0x72, 0x63, 0xab, 0x61, 0x77, 0xa6, + 0xc5, 0xf8, 0x34, 0x88, 0x9d, 0xb1, 0xc8, 0x3c, 0x93, 0xe0, 0xcf, 0x60, 0x3d, 0x3d, 0x28, 0xcf, + 0x22, 0xf9, 0x51, 0x0b, 0x4a, 0x7e, 0x19, 0xac, 0xdc, 0x22, 0x2a, 0x43, 0xe1, 0xb0, 0x73, 0x7c, + 0xb4, 0xbd, 0xd3, 0xe6, 0xd7, 0x88, 0x76, 0x3a, 0xba, 0xfe, 0xf2, 0xa8, 0x5b, 0xcf, 0x6c, 0xfd, + 0x32, 0x0b, 0x99, 0xfd, 0x57, 0xe8, 0x73, 0xc8, 0xf1, 0x6f, 0xea, 0x53, 0x2e, 0x52, 0x34, 0xa7, + 0x5d, 0x1b, 0xc0, 0xb7, 0x7e, 0xfc, 0xdf, 0xbf, 0xfc, 0x79, 0x66, 0x11, 0x57, 0x5a, 0x93, 0x6f, + 0xb7, 0x2e, 0x27, 0x2d, 0x96, 0x1b, 0x9e, 0x68, 0x8f, 0xd0, 0x27, 0x90, 0x3d, 0x1a, 0x7b, 0x28, + 0xf5, 0x82, 0x45, 0x33, 0xfd, 0x26, 0x01, 0x5e, 0x61, 0x42, 0x17, 0x30, 0x08, 0xa1, 0xa3, 0xb1, + 0x47, 0x45, 0xfe, 0x10, 0xca, 0xea, 0x3d, 0x80, 0x1b, 0x6f, 0x5d, 0x34, 0x6f, 0xbe, 0x63, 0x80, + 0xef, 0x31, 0x55, 0xb7, 0x30, 0x12, 0xaa, 0xf8, 0x4d, 0x05, 0x75, 0x16, 0xdd, 0x2b, 0x0b, 0xa5, + 0xde, 0xc9, 0x68, 0xa6, 0x5f, 0x3b, 0x88, 0xcd, 0xc2, 0xbb, 0xb2, 0xa8, 0xc8, 0x3f, 0x16, 0x37, + 0x0e, 0x7a, 0x1e, 0xba, 0x9f, 0xf0, 0xc5, 0x59, 0xfd, 0xb6, 0xda, 0x5c, 0x4f, 0x67, 0x10, 0x4a, + 0xee, 0x32, 0x25, 0xab, 0x78, 0x51, 0x28, 0xe9, 0xf9, 0x2c, 0x4f, 0xb4, 0x47, 0x5b, 0x3d, 0xc8, + 0xb1, 0xef, 0x16, 0xe8, 0x0b, 0xf9, 0xd0, 0x4c, 0xf8, 0x80, 0x93, 0xb2, 0xd0, 0xa1, 0x2f, 0x1e, + 0x78, 0x99, 0x29, 0xaa, 0xe1, 0x12, 0x55, 0xc4, 0xbe, 0x5a, 0x3c, 0xd1, 0x1e, 0x6d, 0x68, 0x1f, + 0x6a, 0x5b, 0xff, 0x9c, 0x83, 0x1c, 0x03, 0xec, 0xd0, 0x25, 0x40, 0x80, 0xe1, 0x47, 0x67, 0x17, + 0xfb, 0x2a, 0x10, 0x9d, 0x5d, 0x1c, 0xfe, 0xc7, 0x4d, 0xa6, 0x74, 0x19, 0x2f, 0x50, 0xa5, 0x0c, + 0x07, 0x6c, 0x31, 0x68, 0x93, 0xfa, 0xf1, 0xaf, 0x34, 0x81, 0x57, 0xf2, 0xb3, 0x84, 0x92, 0xa4, + 0x85, 0x80, 0xfc, 0xe8, 0x76, 0x48, 0x00, 0xf1, 0xf1, 0x77, 0x99, 0xc2, 0x16, 0xae, 0x07, 0x0a, + 0x1d, 0xc6, 0xf1, 0x44, 0x7b, 0xf4, 0x45, 0x03, 0x2f, 0x09, 0x2f, 0x47, 0x7a, 0xd0, 0x8f, 0xa0, + 0x16, 0x06, 0xaa, 0xd1, 0x83, 0x04, 0x5d, 0x51, 0xbc, 0xbb, 0xf9, 0xf6, 0x74, 0x26, 0x61, 0xd3, + 0x1a, 0xb3, 0x49, 0x28, 0xe7, 0x9a, 0x2f, 0x09, 0x19, 0x19, 0x94, 0x49, 0xac, 0x01, 0xfa, 0x7b, + 0x4d, 0x7c, 0x47, 0x08, 0x90, 0x67, 0x94, 0x24, 0x3d, 0x86, 0x6b, 0x37, 0x1f, 0xde, 0xc0, 0x25, + 0x8c, 0xf8, 0x03, 0x66, 0xc4, 0xef, 0xe2, 0xe5, 0xc0, 0x08, 0xcf, 0x1c, 0x12, 0xcf, 0x16, 0x56, + 0x7c, 0x71, 0x17, 0xdf, 0x0a, 0x39, 0x27, 0xd4, 0x1b, 0x2c, 0x16, 0x47, 0x8f, 0x13, 0x17, 0x2b, + 0x84, 0x46, 0x27, 0x2e, 0x56, 0x18, 0x7a, 0x4e, 0x5a, 0x2c, 0x8e, 0x15, 0x27, 0x2d, 0x96, 0xdf, + 0xb3, 0xf5, 0xff, 0xf3, 0x50, 0xd8, 0xe1, 0x37, 0x7d, 0x91, 0x0d, 0x25, 0x1f, 0x7c, 0x45, 0x6b, + 0x49, 0x08, 0x53, 0xf0, 0x2e, 0xd1, 0xbc, 0x9f, 0xda, 0x2f, 0x0c, 0x7a, 0x8b, 0x19, 0x74, 0x07, + 0xaf, 0x52, 0xcd, 0xe2, 0x32, 0x71, 0x8b, 0xc3, 0x18, 0x2d, 0xa3, 0xdf, 0xa7, 0x8e, 0xf8, 0x13, + 0xa8, 0xa8, 0xe8, 0x28, 0x7a, 0x2b, 0x11, 0xd5, 0x52, 0x01, 0xd6, 0x26, 0x9e, 0xc6, 0x22, 0x34, + 0xbf, 0xcd, 0x34, 0xaf, 0xe1, 0xdb, 0x09, 0x9a, 0x1d, 0xc6, 0x1a, 0x52, 0xce, 0x91, 0xcd, 0x64, + 0xe5, 0x21, 0xe0, 0x34, 0x59, 0x79, 0x18, 0x18, 0x9d, 0xaa, 0x7c, 0xcc, 0x58, 0xa9, 0x72, 0x17, + 0x20, 0xc0, 0x30, 0x51, 0xa2, 0x2f, 0x95, 0x97, 0xa9, 0x68, 0x70, 0x88, 0xc3, 0x9f, 0x18, 0x33, + 0xb5, 0x62, 0xdf, 0x45, 0xd4, 0x0e, 0x4c, 0xd7, 0xe3, 0x07, 0xb3, 0x1a, 0x02, 0x25, 0x51, 0xe2, + 0x7c, 0xc2, 0xc8, 0x66, 0xf3, 0xc1, 0x54, 0x1e, 0xa1, 0xfd, 0x21, 0xd3, 0x7e, 0x1f, 0x37, 0x13, + 0xb4, 0x8f, 0x38, 0x2f, 0xdd, 0x6c, 0x7f, 0x9d, 0x87, 0xf2, 0x0b, 0xc3, 0xb4, 0x3c, 0x62, 0x19, + 0x56, 0x8f, 0xa0, 0x53, 0xc8, 0xb1, 0x4c, 0x1d, 0x0d, 0xc4, 0x2a, 0x60, 0x17, 0x0d, 0xc4, 0x21, + 0x34, 0x0b, 0xaf, 0x33, 0xc5, 0x4d, 0xbc, 0x42, 0x15, 0x0f, 0x03, 0xd1, 0x2d, 0x06, 0x42, 0xd1, + 0x49, 0x9f, 0x41, 0x5e, 0x7c, 0xc3, 0x89, 0x08, 0x0a, 0x81, 0x53, 0xcd, 0xbb, 0xc9, 0x9d, 0x49, + 0x7b, 0x59, 0x55, 0xe3, 0x32, 0x3e, 0xaa, 0x67, 0x02, 0x10, 0xa0, 0xab, 0xd1, 0x15, 0x8d, 0x81, + 0xb1, 0xcd, 0xf5, 0x74, 0x86, 0x24, 0x9f, 0xaa, 0x3a, 0xfb, 0x3e, 0x2f, 0xd5, 0xfb, 0x47, 0x30, + 0xff, 0xdc, 0x70, 0x2f, 0x50, 0x24, 0xf7, 0x2a, 0x37, 0x80, 0x9a, 0xcd, 0xa4, 0x2e, 0xa1, 0xe5, + 0x3e, 0xd3, 0x72, 0x9b, 0x87, 0x32, 0x55, 0xcb, 0x85, 0xe1, 0xd2, 0xa4, 0x86, 0xfa, 0x90, 0xe7, + 0x17, 0x82, 0xa2, 0xfe, 0x0b, 0x5d, 0x2a, 0x8a, 0xfa, 0x2f, 0x7c, 0x87, 0xe8, 0x66, 0x2d, 0x23, + 0x28, 0xca, 0x1b, 0x38, 0x28, 0xf2, 0x39, 0x36, 0x72, 0x5b, 0xa7, 0xb9, 0x96, 0xd6, 0x2d, 0x74, + 0x3d, 0x60, 0xba, 0xee, 0xe1, 0x46, 0x6c, 0xad, 0x04, 0xe7, 0x13, 0xed, 0xd1, 0x87, 0x1a, 0xfa, + 0x11, 0x40, 0x00, 0x48, 0xc7, 0x4e, 0x60, 0x14, 0xdb, 0x8e, 0x9d, 0xc0, 0x18, 0x96, 0x8d, 0x37, + 0x99, 0xde, 0x0d, 0xfc, 0x20, 0xaa, 0xd7, 0x73, 0x0c, 0xcb, 0x3d, 0x23, 0xce, 0x07, 0x1c, 0x74, + 0x74, 0x2f, 0xcc, 0x11, 0x3d, 0x0c, 0xff, 0xba, 0x00, 0xf3, 0xb4, 0x02, 0xa6, 0x85, 0x42, 0x00, + 0x1c, 0x44, 0x2d, 0x89, 0x01, 0x7c, 0x51, 0x4b, 0xe2, 0x98, 0x43, 0xb8, 0x50, 0x60, 0xbf, 0x11, + 0x21, 0x8c, 0x81, 0x3a, 0xda, 0x86, 0xb2, 0x82, 0x2c, 0xa0, 0x04, 0x61, 0x61, 0xe4, 0x30, 0x9a, + 0x7a, 0x12, 0x60, 0x09, 0x7c, 0x87, 0xe9, 0x5b, 0xe1, 0xa9, 0x87, 0xe9, 0xeb, 0x73, 0x0e, 0xaa, + 0xf0, 0x35, 0x54, 0x54, 0xf4, 0x01, 0x25, 0xc8, 0x8b, 0xa0, 0x92, 0xd1, 0x30, 0x9b, 0x04, 0x5e, + 0x84, 0x0f, 0xbe, 0xff, 0x3b, 0x18, 0xc9, 0x46, 0x15, 0x0f, 0xa0, 0x20, 0xe0, 0x88, 0xa4, 0x59, + 0x86, 0x21, 0xcc, 0xa4, 0x59, 0x46, 0xb0, 0x8c, 0x70, 0x71, 0xc9, 0x34, 0xd2, 0x37, 0x2e, 0x99, + 0xca, 0x84, 0xb6, 0x67, 0xc4, 0x4b, 0xd3, 0x16, 0xa0, 0x6b, 0x69, 0xda, 0x94, 0xb7, 0xdd, 0x34, + 0x6d, 0xe7, 0xc4, 0x13, 0xc7, 0x45, 0xbe, 0x45, 0xa2, 0x14, 0x61, 0x6a, 0xfa, 0xc0, 0xd3, 0x58, + 0x92, 0x6a, 0xff, 0x40, 0xa1, 0xcc, 0x1d, 0x57, 0x00, 0x01, 0x58, 0x12, 0x2d, 0xe8, 0x12, 0x11, + 0xd7, 0x68, 0x41, 0x97, 0x8c, 0xb7, 0x84, 0x43, 0x43, 0xa0, 0x97, 0xbf, 0x7a, 0x50, 0xcd, 0x3f, + 0xd3, 0x00, 0xc5, 0x71, 0x15, 0xf4, 0x38, 0x59, 0x7a, 0x22, 0x8e, 0xdb, 0x7c, 0xff, 0xcd, 0x98, + 0x93, 0xa2, 0x7d, 0x60, 0x52, 0x8f, 0x71, 0x8f, 0x5e, 0x53, 0xa3, 0xfe, 0x42, 0x83, 0x6a, 0x08, + 0x94, 0x41, 0xef, 0xa4, 0xac, 0x69, 0x04, 0x06, 0x6e, 0xbe, 0x7b, 0x23, 0x5f, 0x52, 0xa5, 0xab, + 0xec, 0x00, 0x59, 0xf2, 0xff, 0x44, 0x83, 0x5a, 0x18, 0xc4, 0x41, 0x29, 0xb2, 0x63, 0x30, 0x72, + 0x73, 0xe3, 0x66, 0xc6, 0xe9, 0xcb, 0x13, 0x54, 0xfb, 0x03, 0x28, 0x08, 0xd8, 0x27, 0x69, 0xe3, + 0x87, 0x01, 0xe8, 0xa4, 0x8d, 0x1f, 0xc1, 0x8c, 0x12, 0x36, 0xbe, 0x63, 0x0f, 0x88, 0x72, 0xcc, + 0x04, 0x2e, 0x94, 0xa6, 0x6d, 0xfa, 0x31, 0x8b, 0x80, 0x4a, 0x69, 0xda, 0x82, 0x63, 0x26, 0x01, + 0x21, 0x94, 0x22, 0xec, 0x86, 0x63, 0x16, 0xc5, 0x93, 0x12, 0x8e, 0x19, 0x53, 0xa8, 0x1c, 0xb3, + 0x00, 0xba, 0x49, 0x3a, 0x66, 0x31, 0x3c, 0x3d, 0xe9, 0x98, 0xc5, 0xd1, 0x9f, 0x84, 0x75, 0x64, + 0x7a, 0x43, 0xc7, 0x6c, 0x29, 0x01, 0xe5, 0x41, 0xef, 0xa7, 0x38, 0x31, 0x11, 0xa6, 0x6f, 0x7e, + 0xf0, 0x86, 0xdc, 0xa9, 0x7b, 0x9c, 0xbb, 0x5f, 0xee, 0xf1, 0xbf, 0xd1, 0x60, 0x39, 0x09, 0x21, + 0x42, 0x29, 0x7a, 0x52, 0xe0, 0xfd, 0xe6, 0xe6, 0x9b, 0xb2, 0x4f, 0xf7, 0x96, 0xbf, 0xeb, 0x9f, + 0xd6, 0xff, 0xed, 0xab, 0x35, 0xed, 0x3f, 0xbf, 0x5a, 0xd3, 0xfe, 0xe7, 0xab, 0x35, 0xed, 0x6f, + 0xff, 0x77, 0x6d, 0xee, 0x34, 0xcf, 0x7e, 0x5d, 0xf9, 0xed, 0x5f, 0x05, 0x00, 0x00, 0xff, 0xff, + 0x52, 0x4e, 0xd7, 0x33, 0xe4, 0x39, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto similarity index 88% rename from vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto rename to vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto index d9da43c09..423eabada 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto @@ -15,7 +15,7 @@ service KV { // Range gets the keys in the range from the key-value store. rpc Range(RangeRequest) returns (RangeResponse) { option (google.api.http) = { - post: "/v3beta/kv/range" + post: "/v3/kv/range" body: "*" }; } @@ -25,7 +25,7 @@ service KV { // and generates one event in the event history. rpc Put(PutRequest) returns (PutResponse) { option (google.api.http) = { - post: "/v3beta/kv/put" + post: "/v3/kv/put" body: "*" }; } @@ -35,7 +35,7 @@ service KV { // and generates a delete event in the event history for every deleted key. rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) { option (google.api.http) = { - post: "/v3beta/kv/deleterange" + post: "/v3/kv/deleterange" body: "*" }; } @@ -46,7 +46,7 @@ service KV { // It is not allowed to modify the same key several times within one txn. rpc Txn(TxnRequest) returns (TxnResponse) { option (google.api.http) = { - post: "/v3beta/kv/txn" + post: "/v3/kv/txn" body: "*" }; } @@ -56,7 +56,7 @@ service KV { // indefinitely. rpc Compact(CompactionRequest) returns (CompactionResponse) { option (google.api.http) = { - post: "/v3beta/kv/compaction" + post: "/v3/kv/compaction" body: "*" }; } @@ -70,7 +70,7 @@ service Watch { // last compaction revision. rpc Watch(stream WatchRequest) returns (stream WatchResponse) { option (google.api.http) = { - post: "/v3beta/watch" + post: "/v3/watch" body: "*" }; } @@ -82,7 +82,7 @@ service Lease { // deleted if the lease expires. Each expired key generates a delete event in the event history. rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) { option (google.api.http) = { - post: "/v3beta/lease/grant" + post: "/v3/lease/grant" body: "*" }; } @@ -90,8 +90,12 @@ service Lease { // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted. rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) { option (google.api.http) = { - post: "/v3beta/kv/lease/revoke" + post: "/v3/lease/revoke" body: "*" + additional_bindings { + post: "/v3/kv/lease/revoke" + body: "*" + } }; } @@ -99,7 +103,7 @@ service Lease { // to the server and streaming keep alive responses from the server to the client. rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) { option (google.api.http) = { - post: "/v3beta/lease/keepalive" + post: "/v3/lease/keepalive" body: "*" }; } @@ -107,16 +111,24 @@ service Lease { // LeaseTimeToLive retrieves lease information. rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) { option (google.api.http) = { - post: "/v3beta/kv/lease/timetolive" + post: "/v3/lease/timetolive" body: "*" + additional_bindings { + post: "/v3/kv/lease/timetolive" + body: "*" + } }; } // LeaseLeases lists all existing leases. rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) { option (google.api.http) = { - post: "/v3beta/kv/lease/leases" + post: "/v3/lease/leases" body: "*" + additional_bindings { + post: "/v3/kv/lease/leases" + body: "*" + } }; } } @@ -125,7 +137,7 @@ service Cluster { // MemberAdd adds a member into the cluster. rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) { option (google.api.http) = { - post: "/v3beta/cluster/member/add" + post: "/v3/cluster/member/add" body: "*" }; } @@ -133,7 +145,7 @@ service Cluster { // MemberRemove removes an existing member from the cluster. rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) { option (google.api.http) = { - post: "/v3beta/cluster/member/remove" + post: "/v3/cluster/member/remove" body: "*" }; } @@ -141,7 +153,7 @@ service Cluster { // MemberUpdate updates the member configuration. rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) { option (google.api.http) = { - post: "/v3beta/cluster/member/update" + post: "/v3/cluster/member/update" body: "*" }; } @@ -149,7 +161,15 @@ service Cluster { // MemberList lists all the members in the cluster. rpc MemberList(MemberListRequest) returns (MemberListResponse) { option (google.api.http) = { - post: "/v3beta/cluster/member/list" + post: "/v3/cluster/member/list" + body: "*" + }; + } + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) { + option (google.api.http) = { + post: "/v3/cluster/member/promote" body: "*" }; } @@ -159,7 +179,7 @@ service Maintenance { // Alarm activates, deactivates, and queries alarms regarding cluster health. rpc Alarm(AlarmRequest) returns (AlarmResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/alarm" + post: "/v3/maintenance/alarm" body: "*" }; } @@ -167,7 +187,7 @@ service Maintenance { // Status gets the status of the member. rpc Status(StatusRequest) returns (StatusResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/status" + post: "/v3/maintenance/status" body: "*" }; } @@ -175,25 +195,29 @@ service Maintenance { // Defragment defragments a member's backend database to recover storage space. rpc Defragment(DefragmentRequest) returns (DefragmentResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/defragment" + post: "/v3/maintenance/defragment" body: "*" }; } - // Hash computes the hash of the KV's backend. - // This is designed for testing; do not use this in production when there - // are ongoing transactions. + // Hash computes the hash of whole backend keyspace, + // including key, lease, and other buckets in storage. + // This is designed for testing ONLY! + // Do not rely on this in production with ongoing transactions, + // since Hash operation does not hold MVCC locks. + // Use "HashKV" API instead for "key" bucket consistency checks. rpc Hash(HashRequest) returns (HashResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/hash" + post: "/v3/maintenance/hash" body: "*" }; } // HashKV computes the hash of all MVCC keys up to a given revision. + // It only iterates "key" bucket in backend storage. rpc HashKV(HashKVRequest) returns (HashKVResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/hash" + post: "/v3/maintenance/hash" body: "*" }; } @@ -201,7 +225,7 @@ service Maintenance { // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/snapshot" + post: "/v3/maintenance/snapshot" body: "*" }; } @@ -209,7 +233,7 @@ service Maintenance { // MoveLeader requests current leader node to transfer its leadership to transferee. rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { option (google.api.http) = { - post: "/v3beta/maintenance/transfer-leadership" + post: "/v3/maintenance/transfer-leadership" body: "*" }; } @@ -219,7 +243,7 @@ service Auth { // AuthEnable enables authentication. rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) { option (google.api.http) = { - post: "/v3beta/auth/enable" + post: "/v3/auth/enable" body: "*" }; } @@ -227,7 +251,7 @@ service Auth { // AuthDisable disables authentication. rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) { option (google.api.http) = { - post: "/v3beta/auth/disable" + post: "/v3/auth/disable" body: "*" }; } @@ -235,15 +259,15 @@ service Auth { // Authenticate processes an authenticate request. rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) { option (google.api.http) = { - post: "/v3beta/auth/authenticate" + post: "/v3/auth/authenticate" body: "*" }; } - // UserAdd adds a new user. + // UserAdd adds a new user. User name cannot be empty. rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/add" + post: "/v3/auth/user/add" body: "*" }; } @@ -251,7 +275,7 @@ service Auth { // UserGet gets detailed user information. rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/get" + post: "/v3/auth/user/get" body: "*" }; } @@ -259,7 +283,7 @@ service Auth { // UserList gets a list of all users. rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/list" + post: "/v3/auth/user/list" body: "*" }; } @@ -267,7 +291,7 @@ service Auth { // UserDelete deletes a specified user. rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/delete" + post: "/v3/auth/user/delete" body: "*" }; } @@ -275,7 +299,7 @@ service Auth { // UserChangePassword changes the password of a specified user. rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/changepw" + post: "/v3/auth/user/changepw" body: "*" }; } @@ -283,7 +307,7 @@ service Auth { // UserGrant grants a role to a specified user. rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/grant" + post: "/v3/auth/user/grant" body: "*" }; } @@ -291,15 +315,15 @@ service Auth { // UserRevokeRole revokes a role of specified user. rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) { option (google.api.http) = { - post: "/v3beta/auth/user/revoke" + post: "/v3/auth/user/revoke" body: "*" }; } - // RoleAdd adds a new role. + // RoleAdd adds a new role. Role name cannot be empty. rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/add" + post: "/v3/auth/role/add" body: "*" }; } @@ -307,7 +331,7 @@ service Auth { // RoleGet gets detailed role information. rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/get" + post: "/v3/auth/role/get" body: "*" }; } @@ -315,7 +339,7 @@ service Auth { // RoleList gets lists of all roles. rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/list" + post: "/v3/auth/role/list" body: "*" }; } @@ -323,7 +347,7 @@ service Auth { // RoleDelete deletes a specified role. rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/delete" + post: "/v3/auth/role/delete" body: "*" }; } @@ -331,7 +355,7 @@ service Auth { // RoleGrantPermission grants a permission of a specified key or range to a specified role. rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/grant" + post: "/v3/auth/role/grant" body: "*" }; } @@ -339,7 +363,7 @@ service Auth { // RoleRevokePermission revokes a key or range permission of a specified role. rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) { option (google.api.http) = { - post: "/v3beta/auth/role/revoke" + post: "/v3/auth/role/revoke" body: "*" }; } @@ -418,7 +442,7 @@ message RangeRequest { int64 max_mod_revision = 11; // min_create_revision is the lower bound for returned key create revisions; all keys with - // lesser create trevisions will be filtered away. + // lesser create revisions will be filtered away. int64 min_create_revision = 12; // max_create_revision is the upper bound for returned key create revisions; all keys with @@ -519,7 +543,7 @@ message Compare { VERSION = 0; CREATE = 1; MOD = 2; - VALUE= 3; + VALUE = 3; LEASE = 4; } // result is logical comparison operation for this comparison. @@ -649,14 +673,17 @@ message WatchRequest { message WatchCreateRequest { // key is the key to register for watching. bytes key = 1; + // range_end is the end of the range [key, range_end) to watch. If range_end is not given, // only the key argument is watched. If range_end is equal to '\0', all keys greater than // or equal to the key argument are watched. // If the range_end is one bit larger than the given key, // then all keys with the prefix (the given key) will be watched. bytes range_end = 2; + // start_revision is an optional revision to watch from (inclusive). No start_revision is "now". int64 start_revision = 3; + // progress_notify is set so that the etcd server will periodically send a WatchResponse with // no events to the new watcher if there are no recent events. It is useful when clients // wish to recover a disconnected watcher starting from a recent known revision. @@ -664,11 +691,12 @@ message WatchCreateRequest { bool progress_notify = 4; enum FilterType { - // filter out put event. - NOPUT = 0; - // filter out delete event. - NODELETE = 1; + // filter out put event. + NOPUT = 0; + // filter out delete event. + NODELETE = 1; } + // filters filter the events at server side before it sends back to the watcher. repeated FilterType filters = 5; @@ -701,14 +729,17 @@ message WatchResponse { ResponseHeader header = 1; // watch_id is the ID of the watcher that corresponds to the response. int64 watch_id = 2; + // created is set to true if the response is for a create watch request. // The client should record the watch_id and expect to receive events for // the created watcher from the same stream. // All events sent to the created watcher will attach with the same watch_id. bool created = 3; + // canceled is set to true if the response is for a cancel watch request. // No further events will be sent to the canceled watcher. bool canceled = 4; + // compact_revision is set to the minimum index if a watcher tries to watch // at a compacted index. // @@ -717,7 +748,7 @@ message WatchResponse { // // The client should treat the watcher as canceled and should not try to create any // watcher with the same start_revision again. - int64 compact_revision = 5; + int64 compact_revision = 5; // cancel_reason indicates the reason for canceling the watcher. string cancel_reason = 6; @@ -753,6 +784,22 @@ message LeaseRevokeResponse { ResponseHeader header = 1; } +message LeaseCheckpoint { + // ID is the lease ID to checkpoint. + int64 ID = 1; + + // Remaining_TTL is the remaining time until expiry of the lease. + int64 remaining_TTL = 2; +} + +message LeaseCheckpointRequest { + repeated LeaseCheckpoint checkpoints = 1; +} + +message LeaseCheckpointResponse { + ResponseHeader header = 1; +} + message LeaseKeepAliveRequest { // ID is the lease ID for the lease to keep alive. int64 ID = 1; @@ -807,11 +854,15 @@ message Member { repeated string peerURLs = 3; // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. repeated string clientURLs = 4; + // isLearner indicates if the member is raft learner. + bool isLearner = 5; } message MemberAddRequest { // peerURLs is the list of URLs the added member will use to communicate with the cluster. repeated string peerURLs = 1; + // isLearner indicates if the added member is raft learner. + bool isLearner = 2; } message MemberAddResponse { @@ -855,6 +906,17 @@ message MemberListResponse { repeated Member members = 2; } +message MemberPromoteRequest { + // ID is the member ID of the member to promote. + uint64 ID = 1; +} + +message MemberPromoteResponse { + ResponseHeader header = 1; + // members is a list of all members after promoting the member. + repeated Member members = 2; +} + message DefragmentRequest { } @@ -914,14 +976,22 @@ message StatusResponse { ResponseHeader header = 1; // version is the cluster protocol version used by the responding member. string version = 2; - // dbSize is the size of the backend database, in bytes, of the responding member. + // dbSize is the size of the backend database physically allocated, in bytes, of the responding member. int64 dbSize = 3; // leader is the member ID which the responding member believes is the current leader. uint64 leader = 4; - // raftIndex is the current raft index of the responding member. + // raftIndex is the current raft committed index of the responding member. uint64 raftIndex = 5; // raftTerm is the current raft term of the responding member. uint64 raftTerm = 6; + // raftAppliedIndex is the current raft applied index of the responding member. + uint64 raftAppliedIndex = 7; + // errors contains alarm/health information and status. + repeated string errors = 8; + // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. + int64 dbSizeInUse = 9; + // isLearner indicates if the member is raft learner. + bool isLearner = 10; } message AuthEnableRequest { @@ -938,6 +1008,7 @@ message AuthenticateRequest { message AuthUserAddRequest { string name = 1; string password = 2; + authpb.UserAddOptions options = 3; } message AuthUserGetRequest { @@ -996,8 +1067,8 @@ message AuthRoleGrantPermissionRequest { message AuthRoleRevokePermissionRequest { string role = 1; - string key = 2; - string range_end = 3; + bytes key = 2; + bytes range_end = 3; } message AuthEnableResponse { diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/metrics.go similarity index 78% rename from vendor/github.com/coreos/etcd/etcdserver/metrics.go rename to vendor/go.etcd.io/etcd/etcdserver/metrics.go index 2fb07a554..e0c0cde85 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go +++ b/vendor/go.etcd.io/etcd/etcdserver/metrics.go @@ -18,9 +18,11 @@ import ( goruntime "runtime" "time" - "github.com/coreos/etcd/pkg/runtime" - "github.com/coreos/etcd/version" + "go.etcd.io/etcd/pkg/runtime" + "go.etcd.io/etcd/version" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" ) var ( @@ -42,6 +44,26 @@ var ( Name: "leader_changes_seen_total", Help: "The number of leader changes seen.", }) + isLearner = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "is_learner", + Help: "Whether or not this member is a learner. 1 if is, 0 otherwise.", + }) + learnerPromoteFailed = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "learner_promote_failures", + Help: "The total number of failed learner promotions (likely learner not ready) while this member is leader.", + }, + []string{"Reason"}, + ) + learnerPromoteSucceed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "learner_promote_successes", + Help: "The total number of successful learner promotions while this member is leader.", + }) heartbeatSendFailures = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "etcd", Subsystem: "server", @@ -84,12 +106,6 @@ var ( Name: "proposals_failed_total", Help: "The total number of failed proposals seen.", }) - leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "server", - Name: "lease_expired_total", - Help: "The total number of expired leases.", - }) slowReadIndex = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "etcd", Subsystem: "server", @@ -102,6 +118,12 @@ var ( Name: "read_indexes_failed_total", Help: "The total number of failed read indexes seen.", }) + leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "server", + Name: "lease_expired_total", + Help: "The total number of expired leases.", + }) quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "etcd", Subsystem: "server", @@ -142,13 +164,16 @@ func init() { prometheus.MustRegister(proposalsApplied) prometheus.MustRegister(proposalsPending) prometheus.MustRegister(proposalsFailed) - prometheus.MustRegister(leaseExpired) prometheus.MustRegister(slowReadIndex) prometheus.MustRegister(readIndexFailed) + prometheus.MustRegister(leaseExpired) prometheus.MustRegister(quotaBackendBytes) prometheus.MustRegister(currentVersion) prometheus.MustRegister(currentGoVersion) prometheus.MustRegister(serverID) + prometheus.MustRegister(isLearner) + prometheus.MustRegister(learnerPromoteSucceed) + prometheus.MustRegister(learnerPromoteFailed) currentVersion.With(prometheus.Labels{ "server_version": version.Version, @@ -158,22 +183,34 @@ func init() { }).Set(1) } -func monitorFileDescriptor(done <-chan struct{}) { +func monitorFileDescriptor(lg *zap.Logger, done <-chan struct{}) { ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() for { used, err := runtime.FDUsage() if err != nil { - plog.Errorf("cannot monitor file descriptor usage (%v)", err) + if lg != nil { + lg.Warn("failed to get file descriptor usage", zap.Error(err)) + } else { + plog.Errorf("cannot monitor file descriptor usage (%v)", err) + } return } limit, err := runtime.FDLimit() if err != nil { - plog.Errorf("cannot monitor file descriptor usage (%v)", err) + if lg != nil { + lg.Warn("failed to get file descriptor limit", zap.Error(err)) + } else { + plog.Errorf("cannot monitor file descriptor usage (%v)", err) + } return } if used >= limit/5*4 { - plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit) + if lg != nil { + lg.Warn("80% of file descriptors are used", zap.Uint64("used", used), zap.Uint64("limit", limit)) + } else { + plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit) + } } select { case <-ticker.C: diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/go.etcd.io/etcd/etcdserver/quota.go similarity index 65% rename from vendor/github.com/coreos/etcd/etcdserver/quota.go rename to vendor/go.etcd.io/etcd/etcdserver/quota.go index 882eb76f8..6d70430e7 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/quota.go +++ b/vendor/go.etcd.io/etcd/etcdserver/quota.go @@ -14,7 +14,14 @@ package etcdserver -import pb "github.com/coreos/etcd/etcdserver/etcdserverpb" +import ( + "sync" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + humanize "github.com/dustin/go-humanize" + "go.uber.org/zap" +) const ( // DefaultQuotaBytes is the number of bytes the backend Size may @@ -55,24 +62,75 @@ const ( kvOverhead = 256 ) -func NewBackendQuota(s *EtcdServer) Quota { +var ( + // only log once + quotaLogOnce sync.Once + + DefaultQuotaSize = humanize.Bytes(uint64(DefaultQuotaBytes)) + maxQuotaSize = humanize.Bytes(uint64(MaxQuotaBytes)) +) + +// NewBackendQuota creates a quota layer with the given storage limit. +func NewBackendQuota(s *EtcdServer, name string) Quota { + lg := s.getLogger() quotaBackendBytes.Set(float64(s.Cfg.QuotaBackendBytes)) if s.Cfg.QuotaBackendBytes < 0 { // disable quotas if negative - plog.Warningf("disabling backend quota") + quotaLogOnce.Do(func() { + if lg != nil { + lg.Info( + "disabled backend quota", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + ) + } else { + plog.Warningf("disabling backend quota") + } + }) return &passthroughQuota{} } if s.Cfg.QuotaBackendBytes == 0 { // use default size if no quota size given + quotaLogOnce.Do(func() { + if lg != nil { + lg.Info( + "enabled backend quota with default value", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", DefaultQuotaBytes), + zap.String("quota-size", DefaultQuotaSize), + ) + } + }) quotaBackendBytes.Set(float64(DefaultQuotaBytes)) return &backendQuota{s, DefaultQuotaBytes} } - if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { - plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) - } + quotaLogOnce.Do(func() { + if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { + if lg != nil { + lg.Warn( + "quota exceeds the maximum value", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes), + zap.String("quota-maximum-size", maxQuotaSize), + ) + } else { + plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) + } + } + if lg != nil { + lg.Info( + "enabled backend quota", + zap.String("quota-name", name), + zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + ) + } + }) return &backendQuota{s, s.Cfg.QuotaBackendBytes} } diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/go.etcd.io/etcd/etcdserver/raft.go similarity index 67% rename from vendor/github.com/coreos/etcd/etcdserver/raft.go rename to vendor/go.etcd.io/etcd/etcdserver/raft.go index 212dccbdc..c0fe97905 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/raft.go +++ b/vendor/go.etcd.io/etcd/etcdserver/raft.go @@ -17,32 +17,27 @@ package etcdserver import ( "encoding/json" "expvar" + "fmt" + "log" "sort" "sync" - "sync/atomic" "time" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/contention" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/rafthttp" - "github.com/coreos/etcd/wal" - "github.com/coreos/etcd/wal/walpb" - "github.com/coreos/pkg/capnslog" + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/etcdserver/api/rafthttp" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/contention" + "go.etcd.io/etcd/pkg/logutil" + "go.etcd.io/etcd/pkg/pbutil" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft" + "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/wal" + "go.etcd.io/etcd/wal/walpb" + "go.uber.org/zap" ) const ( - // Number of entries for slow follower to catch-up after compacting - // the raft storage entries. - // We expect the follower has a millisecond level latency with the leader. - // The max throughput is around 10K. Keep a 5K entries is enough for helping - // follower to catch up. - numberOfCatchUpEntries = 5000 - // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value). // Assuming the RTT is around 10ms, 1MB max size is large enough. maxSizePerMsg = 1 * 1024 * 1024 @@ -63,7 +58,6 @@ var ( ) func init() { - raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft")) expvar.Publish("raft.status", expvar.Func(func() interface{} { raftStatusMu.Lock() defer raftStatusMu.Unlock() @@ -71,11 +65,6 @@ func init() { })) } -type RaftTimer interface { - Index() uint64 - Term() uint64 -} - // apply contains entries, snapshot to be applied. Once // an apply is consumed, the entries will be persisted to // to raft storage concurrently; the application must read @@ -88,12 +77,7 @@ type apply struct { } type raftNode struct { - // Cache of the latest raft index and raft term the server has seen. - // These three unit64 fields must be the first elements to keep 64-bit - // alignment for atomic access to the fields. - index uint64 - term uint64 - lead uint64 + lg *zap.Logger tickMu *sync.Mutex raftNodeConfig @@ -117,6 +101,8 @@ type raftNode struct { } type raftNodeConfig struct { + lg *zap.Logger + // to check if msg receiver is removed from cluster isIDRemoved func(id uint64) bool raft.Node @@ -131,7 +117,20 @@ type raftNodeConfig struct { } func newRaftNode(cfg raftNodeConfig) *raftNode { + var lg raft.Logger + if cfg.lg != nil { + lg = logutil.NewRaftLoggerZap(cfg.lg) + } else { + lcfg := logutil.DefaultZapLoggerConfig + var err error + lg, err = logutil.NewRaftLogger(&lcfg) + if err != nil { + log.Fatalf("cannot create raft logger %v", err) + } + } + raft.SetLogger(lg) r := &raftNode{ + lg: cfg.lg, tickMu: new(sync.Mutex), raftNodeConfig: cfg, // set up contention detectors for raft heartbeat message. @@ -173,7 +172,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { r.tick() case rd := <-r.Ready(): if rd.SoftState != nil { - newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead + newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead if newLeader { leaderChanges.Inc() } @@ -184,7 +183,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { hasLeader.Set(1) } - atomic.StoreUint64(&r.lead, rd.SoftState.Lead) + rh.updateLead(rd.SoftState.Lead) islead = rd.RaftState == raft.StateLeader if islead { isLeader.Set(1) @@ -199,7 +198,11 @@ func (r *raftNode) start(rh *raftReadyHandler) { select { case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: case <-time.After(internalTimeout): - plog.Warningf("timed out sending read state") + if r.lg != nil { + r.lg.Warn("timed out sending read state", zap.Duration("timeout", internalTimeout)) + } else { + plog.Warningf("timed out sending read state") + } case <-r.stopped: return } @@ -228,19 +231,13 @@ func (r *raftNode) start(rh *raftReadyHandler) { r.transport.Send(r.processMessages(rd.Messages)) } - // Must save the snapshot file and WAL snapshot entry before saving any other entries or hardstate to - // ensure that recovery after a snapshot restore is possible. - if !raft.IsEmptySnap(rd.Snapshot) { - // gofail: var raftBeforeSaveSnap struct{} - if err := r.storage.SaveSnap(rd.Snapshot); err != nil { - plog.Fatalf("failed to save Raft snapshot %v", err) - } - // gofail: var raftAfterSaveSnap struct{} - } - // gofail: var raftBeforeSave struct{} if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { - plog.Fatalf("failed to raft save state and entries %v", err) + if r.lg != nil { + r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err)) + } else { + plog.Fatalf("raft save state and entries error: %v", err) + } } if !raft.IsEmptyHardState(rd.HardState) { proposalsCommitted.Set(float64(rd.HardState.Commit)) @@ -248,25 +245,25 @@ func (r *raftNode) start(rh *raftReadyHandler) { // gofail: var raftAfterSave struct{} if !raft.IsEmptySnap(rd.Snapshot) { - // Force WAL to fsync its hard state before Release() releases - // old data from the WAL. Otherwise could get an error like: - // panic: tocommit(107) is out of range [lastIndex(84)]. Was the raft log corrupted, truncated, or lost? - // See https://github.com/etcd-io/etcd/issues/10219 for more details. - if err := r.storage.Sync(); err != nil { - plog.Fatalf("failed to sync Raft snapshot %v", err) + // gofail: var raftBeforeSaveSnap struct{} + if err := r.storage.SaveSnap(rd.Snapshot); err != nil { + if r.lg != nil { + r.lg.Fatal("failed to save Raft snapshot", zap.Error(err)) + } else { + plog.Fatalf("raft save snapshot error: %v", err) + } } - // etcdserver now claim the snapshot has been persisted onto the disk notifyc <- struct{}{} // gofail: var raftAfterSaveSnap struct{} r.raftStorage.ApplySnapshot(rd.Snapshot) - plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) - // gofail: var raftAfterApplySnap struct{} - - if err := r.storage.Release(rd.Snapshot); err != nil { - plog.Fatalf("failed to release Raft wal %v", err) + if r.lg != nil { + r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index)) + } else { + plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) } + // gofail: var raftAfterApplySnap struct{} } r.raftStorage.Append(rd.Entries) @@ -362,8 +359,18 @@ func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { ok, exceed := r.td.Observe(ms[i].To) if !ok { // TODO: limit request rate. - plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v, to %x)", r.heartbeat, exceed, ms[i].To) - plog.Warningf("server is likely overloaded") + if r.lg != nil { + r.lg.Warn( + "leader failed to send out heartbeat on time; took too long, leader is overloaded likely from slow disk", + zap.String("to", fmt.Sprintf("%x", ms[i].To)), + zap.Duration("heartbeat-interval", r.heartbeat), + zap.Duration("expected-duration", 2*r.heartbeat), + zap.Duration("exceeded-duration", exceed), + ) + } else { + plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v, to %x)", r.heartbeat, exceed, ms[i].To) + plog.Warningf("server is likely overloaded") + } heartbeatSendFailures.Inc() } } @@ -385,7 +392,11 @@ func (r *raftNode) onStop() { r.ticker.Stop() r.transport.Stop() if err := r.storage.Close(); err != nil { - plog.Panicf("raft close storage error: %v", err) + if r.lg != nil { + r.lg.Panic("failed to close Raft storage", zap.Error(err)) + } else { + plog.Panicf("raft close storage error: %v", err) + } } close(r.done) } @@ -420,19 +431,36 @@ func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id ClusterID: uint64(cl.ID()), }, ) - if w, err = wal.Create(cfg.WALDir(), metadata); err != nil { - plog.Panicf("create wal error: %v", err) + if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil { + if cfg.Logger != nil { + cfg.Logger.Panic("failed to create WAL", zap.Error(err)) + } else { + plog.Panicf("create wal error: %v", err) + } } peers := make([]raft.Peer, len(ids)) for i, id := range ids { - ctx, err := json.Marshal((*cl).Member(id)) + var ctx []byte + ctx, err = json.Marshal((*cl).Member(id)) if err != nil { - plog.Panicf("marshal member should never fail: %v", err) + if cfg.Logger != nil { + cfg.Logger.Panic("failed to marshal member", zap.Error(err)) + } else { + plog.Panicf("marshal member should never fail: %v", err) + } } peers[i] = raft.Peer{ID: uint64(id), Context: ctx} } id = member.ID - plog.Infof("starting member %s in cluster %s", id, cl.ID()) + if cfg.Logger != nil { + cfg.Logger.Info( + "starting local member", + zap.String("local-member-id", id.String()), + zap.String("cluster-id", cl.ID().String()), + ) + } else { + plog.Infof("starting member %s in cluster %s", id, cl.ID()) + } s = raft.NewMemoryStorage() c := &raft.Config{ ID: uint64(id), @@ -442,9 +470,25 @@ func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id MaxSizePerMsg: maxSizePerMsg, MaxInflightMsgs: maxInflightMsgs, CheckQuorum: true, + PreVote: cfg.PreVote, + } + if cfg.Logger != nil { + // called after capnslog setting in "init" function + if cfg.LoggerConfig != nil { + c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig) + if err != nil { + log.Fatalf("cannot create raft logger %v", err) + } + } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil { + c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer) + } } - n = raft.StartNode(c, peers) + if len(peers) == 0 { + n = raft.RestartNode(c) + } else { + n = raft.StartNode(c, peers) + } raftStatusMu.Lock() raftStatus = n.Status raftStatusMu.Unlock() @@ -456,11 +500,20 @@ func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *member if snapshot != nil { walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term } - w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) - - plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit) - cl := membership.NewCluster("") - cl.SetID(cid) + w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap) + + if cfg.Logger != nil { + cfg.Logger.Info( + "restarting local member", + zap.String("cluster-id", cid.String()), + zap.String("local-member-id", id.String()), + zap.Uint64("commit-index", st.Commit), + ) + } else { + plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit) + } + cl := membership.NewCluster(cfg.Logger, "") + cl.SetID(id, cid) s := raft.NewMemoryStorage() if snapshot != nil { s.ApplySnapshot(*snapshot) @@ -475,6 +528,19 @@ func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *member MaxSizePerMsg: maxSizePerMsg, MaxInflightMsgs: maxInflightMsgs, CheckQuorum: true, + PreVote: cfg.PreVote, + } + if cfg.Logger != nil { + // called after capnslog setting in "init" function + var err error + if cfg.LoggerConfig != nil { + c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig) + if err != nil { + log.Fatalf("cannot create raft logger %v", err) + } + } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil { + c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer) + } } n := raft.RestartNode(c) @@ -489,33 +555,62 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types if snapshot != nil { walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term } - w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap) + w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap) // discard the previously uncommitted entries for i, ent := range ents { if ent.Index > st.Commit { - plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i) + if cfg.Logger != nil { + cfg.Logger.Info( + "discarding uncommitted WAL entries", + zap.Uint64("entry-index", ent.Index), + zap.Uint64("commit-index-from-wal", st.Commit), + zap.Int("number-of-discarded-entries", len(ents)-i), + ) + } else { + plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i) + } ents = ents[:i] break } } // force append the configuration change entries - toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit) + toAppEnts := createConfigChangeEnts( + cfg.Logger, + getIDs(cfg.Logger, snapshot, ents), + uint64(id), + st.Term, + st.Commit, + ) ents = append(ents, toAppEnts...) // force commit newly appended entries err := w.Save(raftpb.HardState{}, toAppEnts) if err != nil { - plog.Fatalf("%v", err) + if cfg.Logger != nil { + cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err)) + } else { + plog.Fatalf("%v", err) + } } if len(ents) != 0 { st.Commit = ents[len(ents)-1].Index } - plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit) - cl := membership.NewCluster("") - cl.SetID(cid) + if cfg.Logger != nil { + cfg.Logger.Info( + "forcing restart member", + zap.String("cluster-id", cid.String()), + zap.String("local-member-id", id.String()), + zap.Uint64("commit-index", st.Commit), + ) + } else { + plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit) + } + + cl := membership.NewCluster(cfg.Logger, "") + cl.SetID(id, cid) s := raft.NewMemoryStorage() if snapshot != nil { s.ApplySnapshot(*snapshot) @@ -530,7 +625,20 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types MaxSizePerMsg: maxSizePerMsg, MaxInflightMsgs: maxInflightMsgs, CheckQuorum: true, + PreVote: cfg.PreVote, + } + if cfg.Logger != nil { + // called after capnslog setting in "init" function + if cfg.LoggerConfig != nil { + c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig) + if err != nil { + log.Fatalf("cannot create raft logger %v", err) + } + } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil { + c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer) + } } + n := raft.RestartNode(c) raftStatus = n.Status return id, cl, n, s, w @@ -541,10 +649,10 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types // ID-related entry: // - ConfChangeAddNode, in which case the contained ID will be added into the set. // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. -func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { +func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { ids := make(map[uint64]bool) if snap != nil { - for _, id := range snap.Metadata.ConfState.Nodes { + for _, id := range snap.Metadata.ConfState.Voters { ids[id] = true } } @@ -562,7 +670,11 @@ func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { case raftpb.ConfChangeUpdateNode: // do nothing default: - plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") + if lg != nil { + lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String())) + } else { + plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") + } } } sids := make(types.Uint64Slice, 0, len(ids)) @@ -578,28 +690,19 @@ func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { // `self` is _not_ removed, even if present in the set. // If `self` is not inside the given ids, it creates a Raft entry to add a // default member with the given `self`. -func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry { - ents := make([]raftpb.Entry, 0) - next := index + 1 +func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry { found := false for _, id := range ids { if id == self { found = true - continue - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, } - ents = append(ents, e) - next++ } + + var ents []raftpb.Entry + next := index + 1 + + // NB: always add self first, then remove other nodes. Raft will panic if the + // set of voters ever becomes empty. if !found { m := membership.Member{ ID: types.ID(self), @@ -607,7 +710,11 @@ func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raf } ctx, err := json.Marshal(m) if err != nil { - plog.Panicf("marshal member should never fail: %v", err) + if lg != nil { + lg.Panic("failed to marshal member", zap.Error(err)) + } else { + plog.Panicf("marshal member should never fail: %v", err) + } } cc := &raftpb.ConfChange{ Type: raftpb.ConfChangeAddNode, @@ -621,6 +728,26 @@ func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raf Index: next, } ents = append(ents, e) + next++ + } + + for _, id := range ids { + if id == self { + continue + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), + Term: term, + Index: next, + } + ents = append(ents, e) + next++ } + return ents } diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/go.etcd.io/etcd/etcdserver/server.go similarity index 53% rename from vendor/github.com/coreos/etcd/etcdserver/server.go rename to vendor/go.etcd.io/etcd/etcdserver/server.go index 4fedb2007..976acd684 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/server.go +++ b/vendor/go.etcd.io/etcd/etcdserver/server.go @@ -29,41 +29,51 @@ import ( "sync/atomic" "time" - "github.com/coreos/etcd/alarm" - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/compactor" - "github.com/coreos/etcd/discovery" - "github.com/coreos/etcd/etcdserver/api" - "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/lease/leasehttp" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/pkg/idutil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/runtime" - "github.com/coreos/etcd/pkg/schedule" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/pkg/wait" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/rafthttp" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - "github.com/coreos/etcd/wal" + "go.etcd.io/etcd/auth" + "go.etcd.io/etcd/etcdserver/api" + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/etcdserver/api/rafthttp" + "go.etcd.io/etcd/etcdserver/api/snap" + "go.etcd.io/etcd/etcdserver/api/v2discovery" + "go.etcd.io/etcd/etcdserver/api/v2http/httptypes" + stats "go.etcd.io/etcd/etcdserver/api/v2stats" + "go.etcd.io/etcd/etcdserver/api/v2store" + "go.etcd.io/etcd/etcdserver/api/v3alarm" + "go.etcd.io/etcd/etcdserver/api/v3compactor" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/lease/leasehttp" + "go.etcd.io/etcd/mvcc" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/pkg/fileutil" + "go.etcd.io/etcd/pkg/idutil" + "go.etcd.io/etcd/pkg/pbutil" + "go.etcd.io/etcd/pkg/runtime" + "go.etcd.io/etcd/pkg/schedule" + "go.etcd.io/etcd/pkg/traceutil" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/pkg/wait" + "go.etcd.io/etcd/raft" + "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/version" + "go.etcd.io/etcd/wal" "github.com/coreos/go-semver/semver" "github.com/coreos/pkg/capnslog" + humanize "github.com/dustin/go-humanize" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" ) const ( - DefaultSnapCount = 100000 + DefaultSnapshotCount = 100000 + + // DefaultSnapshotCatchUpEntries is the number of entries for a slow follower + // to catch-up after compacting the raft storage entries. + // We expect the follower has a millisecond level latency with the leader. + // The max throughput is around 10K. Keep a 5K entries is enough for helping + // follower to catch up. + DefaultSnapshotCatchUpEntries uint64 = 5000 StoreClusterPrefix = "/0" StoreKeysPrefix = "/1" @@ -88,10 +98,12 @@ const ( maxPendingRevokes = 16 recommendedMaxRequestBytes = 10 * 1024 * 1024 + + readyPercent = 0.9 ) var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver") storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes")) ) @@ -113,13 +125,15 @@ func init() { type Response struct { Term uint64 Index uint64 - Event *store.Event - Watcher store.Watcher + Event *v2store.Event + Watcher v2store.Watcher Err error } type ServerV2 interface { Server + Leader() types.ID + // Do takes a V2 request and attempts to fulfill it, returning a Response. Do(ctx context.Context, r pb.Request) (Response, error) stats.Stats @@ -128,16 +142,12 @@ type ServerV2 interface { type ServerV3 interface { Server - ID() types.ID - RaftTimer + RaftStatusGetter } func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled } type Server interface { - // Leader returns the ID of the leader Server. - Leader() types.ID - // AddMember attempts to add a member into the cluster. It will return // ErrIDRemoved if member ID is removed from the cluster, or return // ErrIDExists if member ID exists in the cluster. @@ -149,6 +159,11 @@ type Server interface { // UpdateMember attempts to update an existing member in the cluster. It will // return ErrIDNotFound if the member ID does not exist. UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) + // PromoteMember attempts to promote a non-voting node to a voting node. It will + // return ErrIDNotFound if the member ID does not exist. + // return ErrLearnerNotReady if the member are not ready. + // return ErrMemberNotLearner if the member is not a learner. + PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) // ClusterVersion is the cluster-wide minimum major.minor version. // Cluster version is set to the min version that an etcd member is @@ -175,6 +190,9 @@ type EtcdServer struct { inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned. appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. + term uint64 // must use atomic operations to access; keep 64-bit aligned. + lead uint64 // must use atomic operations to access; keep 64-bit aligned. + // consistIndex used to hold the offset of current executing entry // It is initialized to 0 before executing any entry. consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned. @@ -183,6 +201,9 @@ type EtcdServer struct { readych chan struct{} Cfg ServerConfig + lgMu *sync.RWMutex + lg *zap.Logger + w wait.Wait readMu sync.RWMutex @@ -198,7 +219,8 @@ type EtcdServer struct { // stopping is closed by run goroutine on shutdown. stopping chan struct{} // done is closed when all goroutines from start() complete. - done chan struct{} + done chan struct{} + // leaderChanged is used to notify the linearizable read loop to drop the old read requests. leaderChanged chan struct{} leaderChangedMu sync.RWMutex @@ -208,7 +230,7 @@ type EtcdServer struct { cluster *membership.RaftCluster - store store.Store + v2store v2store.Store snapshotter *snap.Snapshotter applyV2 ApplierV2 @@ -224,14 +246,14 @@ type EtcdServer struct { bemu sync.Mutex be backend.Backend authStore auth.AuthStore - alarmStore *alarm.AlarmStore + alarmStore *v3alarm.AlarmStore stats *stats.ServerStats lstats *stats.LeaderStats SyncTicker *time.Ticker // compactor is used to auto-compact the KV. - compactor compactor.Compactor + compactor v3compactor.Compactor // peerRt used to send requests (version, lease) to peers. peerRt http.RoundTripper @@ -254,12 +276,14 @@ type EtcdServer struct { leadTimeMu sync.RWMutex leadElectedTime time.Time + + *AccessController } // NewServer creates a new EtcdServer from the supplied configuration. The // configuration is considered static for the lifetime of the EtcdServer. func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { - st := store.New(StoreClusterPrefix, StoreKeysPrefix) + st := v2store.New(StoreClusterPrefix, StoreKeysPrefix) var ( w *wal.WAL @@ -270,7 +294,17 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { ) if cfg.MaxRequestBytes > recommendedMaxRequestBytes { - plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) + if cfg.Logger != nil { + cfg.Logger.Warn( + "exceeded recommended request limit", + zap.Uint("max-request-bytes", cfg.MaxRequestBytes), + zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))), + zap.Int("recommended-request-bytes", recommendedMaxRequestBytes), + zap.String("recommended-request-size", humanize.Bytes(uint64(recommendedMaxRequestBytes))), + ) + } else { + plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) + } } if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { @@ -280,9 +314,17 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { haveWAL := wal.Exist(cfg.WALDir()) if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil { - plog.Fatalf("create snapshot directory error: %v", err) + if cfg.Logger != nil { + cfg.Logger.Fatal( + "failed to create snapshot directory", + zap.String("path", cfg.SnapDir()), + zap.Error(err), + ) + } else { + plog.Fatalf("create snapshot directory error: %v", err) + } } - ss := snap.New(cfg.SnapDir()) + ss := snap.New(cfg.Logger, cfg.SnapDir()) bepath := cfg.backendPath() beExist := fileutil.Exist(bepath) @@ -308,42 +350,43 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { if err = cfg.VerifyJoinExisting(); err != nil { return nil, err } - cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) + cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap) if err != nil { return nil, err } - existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt) + existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt) if gerr != nil { return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) } - if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil { + if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil { return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) } - if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) { + if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) { return nil, fmt.Errorf("incompatible with current running cluster") } remotes = existingCluster.Members() - cl.SetID(existingCluster.ID()) + cl.SetID(types.ID(0), existingCluster.ID()) cl.SetStore(st) cl.SetBackend(be) - cfg.Print() id, n, s, w = startNode(cfg, cl, nil) + cl.SetID(id, existingCluster.ID()) + case !haveWAL && cfg.NewCluster: if err = cfg.VerifyBootstrap(); err != nil { return nil, err } - cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) + cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap) if err != nil { return nil, err } m := cl.MemberByName(cfg.Name) - if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) { + if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.bootstrapTimeout()) { return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) } if cfg.ShouldDiscover() { var str string - str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) + str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) if err != nil { return nil, &DiscoveryError{Op: "join", Err: err} } @@ -355,14 +398,15 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { if checkDuplicateURL(urlsmap) { return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) } - if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil { + if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil { return nil, err } } cl.SetStore(st) cl.SetBackend(be) - cfg.PrintWithInitial() id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) + cl.SetID(id, cl.ID()) + case haveWAL: if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { return nil, fmt.Errorf("cannot write to member directory: %v", err) @@ -373,35 +417,63 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { } if cfg.ShouldDiscover() { - plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir()) - } - - // Find a snapshot to start/restart a raft node - walSnaps, serr := wal.ValidSnapshotEntries(cfg.WALDir()) - if serr != nil { - return nil, serr + if cfg.Logger != nil { + cfg.Logger.Warn( + "discovery token is ignored since cluster already initialized; valid logs are found", + zap.String("wal-dir", cfg.WALDir()), + ) + } else { + plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir()) + } } - // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding - // wal log entries - snapshot, err = ss.LoadNewestAvailable(walSnaps) + snapshot, err = ss.Load() if err != nil && err != snap.ErrNoSnapshot { return nil, err } if snapshot != nil { if err = st.Recovery(snapshot.Data); err != nil { - plog.Panicf("recovered store from snapshot error: %v", err) + if cfg.Logger != nil { + cfg.Logger.Panic("failed to recover from snapshot") + } else { + plog.Panicf("recovered store from snapshot error: %v", err) + } } - plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) + + if cfg.Logger != nil { + cfg.Logger.Info( + "recovered v2 store from snapshot", + zap.Uint64("snapshot-index", snapshot.Metadata.Index), + zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))), + ) + } else { + plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) + } + if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { - plog.Panicf("recovering backend from snapshot error: %v", err) + if cfg.Logger != nil { + cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err)) + } else { + plog.Panicf("recovering backend from snapshot error: %v", err) + } + } + if cfg.Logger != nil { + s1, s2 := be.Size(), be.SizeInUse() + cfg.Logger.Info( + "recovered v3 backend from snapshot", + zap.Int64("backend-size-bytes", s1), + zap.String("backend-size", humanize.Bytes(uint64(s1))), + zap.Int64("backend-size-in-use-bytes", s2), + zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))), + ) } } - cfg.Print() + if !cfg.ForceNewCluster { id, cl, n, s, w = restartNode(cfg, snapshot) } else { id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot) } + cl.SetStore(st) cl.SetBackend(be) cl.Recover(api.UpdateCapability) @@ -409,6 +481,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { os.RemoveAll(bepath) return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath) } + default: return nil, fmt.Errorf("unsupported bootstrap config") } @@ -424,11 +497,14 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { srv = &EtcdServer{ readych: make(chan struct{}), Cfg: cfg, + lgMu: new(sync.RWMutex), + lg: cfg.Logger, errorc: make(chan error, 1), - store: st, + v2store: st, snapshotter: ss, r: *newRaftNode( raftNodeConfig{ + lg: cfg.Logger, isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, Node: n, heartbeat: heartbeat, @@ -436,48 +512,51 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { storage: NewStorage(w, ss), }, ), - id: id, - attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, - cluster: cl, - stats: sstats, - lstats: lstats, - SyncTicker: time.NewTicker(500 * time.Millisecond), - peerRt: prt, - reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), - forceVersionC: make(chan struct{}), + id: id, + attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, + cluster: cl, + stats: sstats, + lstats: lstats, + SyncTicker: time.NewTicker(500 * time.Millisecond), + peerRt: prt, + reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), + forceVersionC: make(chan struct{}), + AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist}, } serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1) - srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster} + srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster} srv.be = be minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. - srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds()))) - - tp, err := auth.NewTokenProvider(cfg.AuthToken, - func(index uint64) <-chan struct{} { - return srv.applyWait.Wait(index) - }, - ) - if err != nil { - plog.Warningf("failed to create token provider,err is %v", err) - return nil, err - } - srv.authStore = auth.NewAuthStore(srv.be, tp) - - srv.kv = mvcc.New(srv.be, srv.lessor, srv.authStore, &srv.consistIndex) + srv.lessor = lease.NewLessor( + srv.getLogger(), + srv.be, + lease.LessorConfig{ + MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())), + CheckpointInterval: cfg.LeaseCheckpointInterval, + ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(), + }) + srv.kv = mvcc.New(srv.getLogger(), srv.be, srv.lessor, &srv.consistIndex, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit}) if beExist { kvindex := srv.kv.ConsistentIndex() // TODO: remove kvindex != 0 checking when we do not expect users to upgrade // etcd from pre-3.0 release. if snapshot != nil && kvindex < snapshot.Metadata.Index { if kvindex != 0 { - return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index) + return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", bepath, kvindex, snapshot.Metadata.Index) + } + if cfg.Logger != nil { + cfg.Logger.Warn( + "consistent index was never saved", + zap.Uint64("snapshot-index", snapshot.Metadata.Index), + ) + } else { + plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) } - plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) } } newSrv := srv // since srv == nil in defer if srv is returned as nil @@ -490,8 +569,22 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { }() srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) + tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken, + func(index uint64) <-chan struct{} { + return srv.applyWait.Wait(index) + }, + ) + if err != nil { + if cfg.Logger != nil { + cfg.Logger.Warn("failed to create token provider", zap.Error(err)) + } else { + plog.Errorf("failed to create token provider: %s", err) + } + return nil, err + } + srv.authStore = auth.NewAuthStore(srv.getLogger(), srv.be, tp, int(cfg.BcryptCost)) if num := cfg.AutoCompactionRetention; num != 0 { - srv.compactor, err = compactor.New(cfg.AutoCompactionMode, num, srv.kv, srv) + srv.compactor, err = v3compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv) if err != nil { return nil, err } @@ -503,8 +596,16 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { return nil, err } + if srv.Cfg.EnableLeaseCheckpoint { + // setting checkpointer enables lease checkpoint feature. + srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) { + srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp}) + }) + } + // TODO: move transport initialization near the definition of remote tr := &rafthttp.Transport{ + Logger: cfg.Logger, TLSInfo: cfg.PeerTLSInfo, DialTimeout: cfg.peerDialTimeout(), ID: id, @@ -535,21 +636,49 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { return srv, nil } +func (s *EtcdServer) getLogger() *zap.Logger { + s.lgMu.RLock() + l := s.lg + s.lgMu.RUnlock() + return l +} + +func tickToDur(ticks int, tickMs uint) string { + return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond) +} + func (s *EtcdServer) adjustTicks() { + lg := s.getLogger() clusterN := len(s.cluster.Members()) // single-node fresh start, or single-node recovers from snapshot if clusterN == 1 { ticks := s.Cfg.ElectionTicks - 1 - plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks) + if lg != nil { + lg.Info( + "started as single-node; fast-forwarding election ticks", + zap.String("local-member-id", s.ID().String()), + zap.Int("forward-ticks", ticks), + zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)), + zap.Int("election-ticks", s.Cfg.ElectionTicks), + zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)), + ) + } else { + plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks) + } s.r.advanceTicks(ticks) return } if !s.Cfg.InitialElectionTickAdvance { - plog.Infof("skipping initial election tick advance (election tick %d)", s.Cfg.ElectionTicks) + if lg != nil { + lg.Info("skipping initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks)) + } return } + if lg != nil { + lg.Info("starting initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks)) + } // retry up to "rafthttp.ConnReadTimeout", which is 5-sec // until peer connection reports; otherwise: @@ -571,7 +700,21 @@ func (s *EtcdServer) adjustTicks() { // multi-node received peer connection reports // adjust ticks, in case slow leader message receive ticks := s.Cfg.ElectionTicks - 2 - plog.Infof("%s initialzed peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN) + + if lg != nil { + lg.Info( + "initialized peer connections; fast-forwarding election ticks", + zap.String("local-member-id", s.ID().String()), + zap.Int("forward-ticks", ticks), + zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)), + zap.Int("election-ticks", s.Cfg.ElectionTicks), + zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)), + zap.Int("active-remote-members", peerN), + ) + } else { + plog.Infof("%s initialized peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN) + } + s.r.advanceTicks(ticks) return } @@ -587,7 +730,7 @@ func (s *EtcdServer) Start() { s.goAttach(func() { s.adjustTicks() }) s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) s.goAttach(s.purgeFile) - s.goAttach(func() { monitorFileDescriptor(s.stopping) }) + s.goAttach(func() { monitorFileDescriptor(s.getLogger(), s.stopping) }) s.goAttach(s.monitorVersions) s.goAttach(s.linearizableReadLoop) s.goAttach(s.monitorKVHash) @@ -597,10 +740,31 @@ func (s *EtcdServer) Start() { // modify a server's fields after it has been sent to Start. // This function is just used for testing. func (s *EtcdServer) start() { - if s.Cfg.SnapCount == 0 { - plog.Infof("set snapshot count to default %d", DefaultSnapCount) - s.Cfg.SnapCount = DefaultSnapCount + lg := s.getLogger() + + if s.Cfg.SnapshotCount == 0 { + if lg != nil { + lg.Info( + "updating snapshot-count to default", + zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount), + zap.Uint64("updated-snapshot-count", DefaultSnapshotCount), + ) + } else { + plog.Infof("set snapshot count to default %d", DefaultSnapshotCount) + } + s.Cfg.SnapshotCount = DefaultSnapshotCount } + if s.Cfg.SnapshotCatchUpEntries == 0 { + if lg != nil { + lg.Info( + "updating snapshot catch-up entries to default", + zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries), + zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries), + ) + } + s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries + } + s.w = wait.New() s.applyWait = wait.NewTimeList() s.done = make(chan struct{}) @@ -611,11 +775,31 @@ func (s *EtcdServer) start() { s.readNotifier = newNotifier() s.leaderChanged = make(chan struct{}) if s.ClusterVersion() != nil { - plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) + if lg != nil { + lg.Info( + "starting etcd server", + zap.String("local-member-id", s.ID().String()), + zap.String("local-server-version", version.Version), + zap.String("cluster-id", s.Cluster().ID().String()), + zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())), + ) + } else { + plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String())) + } membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(s.ClusterVersion().String())}).Set(1) } else { - plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) + if lg != nil { + lg.Info( + "starting etcd server", + zap.String("local-member-id", s.ID().String()), + zap.String("local-server-version", version.Version), + zap.String("cluster-version", "to_be_decided"), + ) + } else { + plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version) + } } + // TODO: if this is an empty log, writes all peer infos // into the first entry go s.run() @@ -623,37 +807,39 @@ func (s *EtcdServer) start() { func (s *EtcdServer) purgeFile() { var dberrc, serrc, werrc <-chan error - var dbdonec, sdonec, wdonec <-chan struct{} if s.Cfg.MaxSnapFiles > 0 { - dbdonec, dberrc = fileutil.PurgeFileWithDoneNotify(s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping) - sdonec, serrc = fileutil.PurgeFileWithDoneNotify(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping) + dberrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) + serrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) } if s.Cfg.MaxWALFiles > 0 { - wdonec, werrc = fileutil.PurgeFileWithDoneNotify(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.stopping) + werrc = fileutil.PurgeFile(s.getLogger(), s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done) } + + lg := s.getLogger() select { case e := <-dberrc: - plog.Fatalf("failed to purge snap db file %v", e) - case e := <-serrc: - plog.Fatalf("failed to purge snap file %v", e) - case e := <-werrc: - plog.Fatalf("failed to purge wal file %v", e) - case <-s.stopping: - if dbdonec != nil { - <-dbdonec + if lg != nil { + lg.Fatal("failed to purge snap db file", zap.Error(e)) + } else { + plog.Fatalf("failed to purge snap db file %v", e) } - if sdonec != nil { - <-sdonec + case e := <-serrc: + if lg != nil { + lg.Fatal("failed to purge snap file", zap.Error(e)) + } else { + plog.Fatalf("failed to purge snap file %v", e) } - if wdonec != nil { - <-wdonec + case e := <-werrc: + if lg != nil { + lg.Fatal("failed to purge wal file", zap.Error(e)) + } else { + plog.Fatalf("failed to purge wal file %v", e) } + case <-s.stopping: return } } -func (s *EtcdServer) ID() types.ID { return s.id } - func (s *EtcdServer) Cluster() api.Cluster { return s.cluster } func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) } @@ -677,7 +863,15 @@ func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() // machine, respecting any timeout of the given context. func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { if s.cluster.IsIDRemoved(types.ID(m.From)) { - plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejected Raft message from removed member", + zap.String("local-member-id", s.ID().String()), + zap.String("removed-member-id", types.ID(m.From).String()), + ) + } else { + plog.Warningf("reject message from removed member %s", types.ID(m.From).String()) + } return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") } if m.Type == raftpb.MsgApp { @@ -707,14 +901,22 @@ type etcdProgress struct { // and helps decouple state machine logic from Raft algorithms. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover type raftReadyHandler struct { + getLead func() (lead uint64) + updateLead func(lead uint64) updateLeadership func(newLeader bool) updateCommittedIndex func(uint64) } func (s *EtcdServer) run() { + lg := s.getLogger() + sn, err := s.r.raftStorage.Snapshot() if err != nil { - plog.Panicf("get snapshot from raft storage error: %v", err) + if lg != nil { + lg.Panic("failed to get snapshot from Raft storage", zap.Error(err)) + } else { + plog.Panicf("get snapshot from raft storage error: %v", err) + } } // asynchronously accept apply packets, dispatch progress in-order @@ -736,6 +938,8 @@ func (s *EtcdServer) run() { return } rh := &raftReadyHandler{ + getLead: func() (lead uint64) { return s.getLead() }, + updateLead: func(lead uint64) { s.setLead(lead) }, updateLeadership: func(newLeader bool) { if !s.isLeader() { if s.lessor != nil { @@ -758,17 +962,12 @@ func (s *EtcdServer) run() { } } if newLeader { - select { - case s.leaderChanged <- struct{}{}: - default: - } s.leaderChangedMu.Lock() lc := s.leaderChanged s.leaderChanged = make(chan struct{}) - s.leaderChangedMu.Unlock() close(lc) + s.leaderChangedMu.Unlock() } - // TODO: remove the nil checking // current test utility does not provide the stats if s.stats != nil { @@ -855,7 +1054,15 @@ func (s *EtcdServer) run() { if lerr == nil { leaseExpired.Inc() } else { - plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error()) + if lg != nil { + lg.Warn( + "failed to revoke lease", + zap.String("lease-id", fmt.Sprintf("%016x", lid)), + zap.Error(lerr), + ) + } else { + plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error()) + } } <-c @@ -863,11 +1070,16 @@ func (s *EtcdServer) run() { } }) case err := <-s.errorc: - plog.Errorf("%s", err) - plog.Infof("the data-dir used by this member must be removed.") + if lg != nil { + lg.Warn("server error", zap.Error(err)) + lg.Warn("data-dir used by this member must be removed") + } else { + plog.Errorf("%s", err) + plog.Infof("the data-dir used by this member must be removed.") + } return case <-getSyncC(): - if s.store.HasTTLKeys() { + if s.v2store.HasTTLKeys() { s.sync(s.Cfg.ReqTimeout()) } case <-s.stop: @@ -876,18 +1088,13 @@ func (s *EtcdServer) run() { } } -func (s *EtcdServer) leaderChangedNotify() <-chan struct{} { - s.leaderChangedMu.RLock() - defer s.leaderChangedMu.RUnlock() - return s.leaderChanged -} - func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { s.applySnapshot(ep, apply) s.applyEntries(ep, apply) proposalsApplied.Set(float64(ep.appliedi)) s.applyWait.Trigger(ep.appliedi) + // wait for the raft routine to finish the disk writes before triggering a // snapshot. or applied index might be greater than the last index in raft // storage, since the raft routine might be slower than apply routine. @@ -908,15 +1115,47 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { return } applySnapshotInProgress.Inc() - plog.Infof("applying snapshot at index %d...", ep.snapi) + + lg := s.getLogger() + if lg != nil { + lg.Info( + "applying snapshot", + zap.Uint64("current-snapshot-index", ep.snapi), + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + ) + } else { + plog.Infof("applying snapshot at index %d...", ep.snapi) + } defer func() { - plog.Infof("finished applying incoming snapshot at index %d", ep.snapi) + if lg != nil { + lg.Info( + "applied snapshot", + zap.Uint64("current-snapshot-index", ep.snapi), + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + ) + } else { + plog.Infof("finished applying incoming snapshot at index %d", ep.snapi) + } applySnapshotInProgress.Dec() }() if apply.snapshot.Metadata.Index <= ep.appliedi { - plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1", - apply.snapshot.Metadata.Index, ep.appliedi) + if lg != nil { + lg.Panic( + "unexpected leader snapshot from outdated index", + zap.Uint64("current-snapshot-index", ep.snapi), + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + ) + } else { + plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1", + apply.snapshot.Metadata.Index, ep.appliedi) + } } // wait for raftNode to persist snapshot onto the disk @@ -924,25 +1163,51 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) if err != nil { - plog.Panic(err) + if lg != nil { + lg.Panic("failed to open snapshot backend", zap.Error(err)) + } else { + plog.Panic(err) + } } // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. if s.lessor != nil { - plog.Info("recovering lessor...") - s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() }) - plog.Info("finished recovering lessor") + if lg != nil { + lg.Info("restoring lease store") + } else { + plog.Info("recovering lessor...") + } + + s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write(traceutil.TODO()) }) + + if lg != nil { + lg.Info("restored lease store") + } else { + plog.Info("finished recovering lessor") + } } - plog.Info("restoring mvcc store...") + if lg != nil { + lg.Info("restoring mvcc store") + } else { + plog.Info("restoring mvcc store...") + } if err := s.kv.Restore(newbe); err != nil { - plog.Panicf("restore KV error: %v", err) + if lg != nil { + lg.Panic("failed to restore mvcc store", zap.Error(err)) + } else { + plog.Panicf("restore KV error: %v", err) + } } - s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex()) - plog.Info("finished restoring mvcc store") + s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex()) + if lg != nil { + lg.Info("restored mvcc store") + } else { + plog.Info("finished restoring mvcc store") + } // Closing old backend might block until all the txns // on the backend are finished. @@ -950,53 +1215,126 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { s.bemu.Lock() oldbe := s.be go func() { - plog.Info("closing old backend...") - defer plog.Info("finished closing old backend") - + if lg != nil { + lg.Info("closing old backend file") + } else { + plog.Info("closing old backend...") + } + defer func() { + if lg != nil { + lg.Info("closed old backend file") + } else { + plog.Info("finished closing old backend") + } + }() if err := oldbe.Close(); err != nil { - plog.Panicf("close backend error: %v", err) + if lg != nil { + lg.Panic("failed to close old backend", zap.Error(err)) + } else { + plog.Panicf("close backend error: %v", err) + } } }() s.be = newbe s.bemu.Unlock() - plog.Info("recovering alarms...") + if lg != nil { + lg.Info("restoring alarm store") + } else { + plog.Info("recovering alarms...") + } + if err := s.restoreAlarms(); err != nil { - plog.Panicf("restore alarms error: %v", err) + if lg != nil { + lg.Panic("failed to restore alarm store", zap.Error(err)) + } else { + plog.Panicf("restore alarms error: %v", err) + } + } + + if lg != nil { + lg.Info("restored alarm store") + } else { + plog.Info("finished recovering alarms") } - plog.Info("finished recovering alarms") if s.authStore != nil { - plog.Info("recovering auth store...") + if lg != nil { + lg.Info("restoring auth store") + } else { + plog.Info("recovering auth store...") + } + s.authStore.Recover(newbe) - plog.Info("finished recovering auth store") + + if lg != nil { + lg.Info("restored auth store") + } else { + plog.Info("finished recovering auth store") + } + } + + if lg != nil { + lg.Info("restoring v2 store") + } else { + plog.Info("recovering store v2...") + } + if err := s.v2store.Recovery(apply.snapshot.Data); err != nil { + if lg != nil { + lg.Panic("failed to restore v2 store", zap.Error(err)) + } else { + plog.Panicf("recovery store error: %v", err) + } } - plog.Info("recovering store v2...") - if err := s.store.Recovery(apply.snapshot.Data); err != nil { - plog.Panicf("recovery store error: %v", err) + if lg != nil { + lg.Info("restored v2 store") + } else { + plog.Info("finished recovering store v2") + } + + s.cluster.SetBackend(newbe) + + if lg != nil { + lg.Info("restoring cluster configuration") + } else { + plog.Info("recovering cluster configuration...") } - plog.Info("finished recovering store v2") - s.cluster.SetBackend(s.be) - plog.Info("recovering cluster configuration...") s.cluster.Recover(api.UpdateCapability) - plog.Info("finished recovering cluster configuration") - plog.Info("removing old peers from network...") + if lg != nil { + lg.Info("restored cluster configuration") + lg.Info("removing old peers from network") + } else { + plog.Info("finished recovering cluster configuration") + plog.Info("removing old peers from network...") + } + // recover raft transport s.r.transport.RemoveAllPeers() - plog.Info("finished removing old peers from network") - plog.Info("adding peers from new cluster configuration into network...") + if lg != nil { + lg.Info("removed old peers from network") + lg.Info("adding peers from new cluster configuration") + } else { + plog.Info("finished removing old peers from network") + plog.Info("adding peers from new cluster configuration into network...") + } + for _, m := range s.cluster.Members() { if m.ID == s.ID() { continue } s.r.transport.AddPeer(m.ID, m.PeerURLs) } - plog.Info("finished adding peers from new cluster configuration into network...") + + if lg != nil { + lg.Info("added peers from new cluster configuration") + } else { + plog.Info("finished adding peers from new cluster configuration into network...") + } ep.appliedt = apply.snapshot.Metadata.Term ep.appliedi = apply.snapshot.Metadata.Index @@ -1010,7 +1348,15 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { } firsti := apply.entries[0].Index if firsti > ep.appliedi+1 { - plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi) + if lg := s.getLogger(); lg != nil { + lg.Panic( + "unexpected committed entry index", + zap.Uint64("current-applied-index", ep.appliedi), + zap.Uint64("first-committed-entry-index", firsti), + ) + } else { + plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi) + } } var ents []raftpb.Entry if ep.appliedi+1-firsti < uint64(len(apply.entries)) { @@ -1026,17 +1372,28 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { } func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { - if ep.appliedi-ep.snapi <= s.Cfg.SnapCount { + if ep.appliedi-ep.snapi <= s.Cfg.SnapshotCount { return } - plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi) + if lg := s.getLogger(); lg != nil { + lg.Info( + "triggering snapshot", + zap.String("local-member-id", s.ID().String()), + zap.Uint64("local-member-applied-index", ep.appliedi), + zap.Uint64("local-member-snapshot-index", ep.snapi), + zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount), + ) + } else { + plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi) + } + s.snapshot(ep.appliedi, ep.confState) ep.snapi = ep.appliedi } -func (s *EtcdServer) isMultiNode() bool { - return s.cluster != nil && len(s.cluster.MemberIDs()) > 1 +func (s *EtcdServer) hasMultipleVotingMembers() bool { + return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1 } func (s *EtcdServer) isLeader() bool { @@ -1045,10 +1402,24 @@ func (s *EtcdServer) isLeader() bool { // MoveLeader transfers the leader to the given transferee. func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error { + if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner { + return ErrBadLeaderTransferee + } + now := time.Now() interval := time.Duration(s.Cfg.TickMs) * time.Millisecond - plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee)) + if lg := s.getLogger(); lg != nil { + lg.Info( + "leadership transfer starting", + zap.String("local-member-id", s.ID().String()), + zap.String("current-leader-member-id", types.ID(lead).String()), + zap.String("transferee-member-id", types.ID(transferee).String()), + ) + } else { + plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee)) + } + s.r.TransferLeadership(ctx, lead, transferee) for s.Lead() != transferee { select { @@ -1059,24 +1430,49 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er } // TODO: drain all requests, or drop all messages to the old leader - - plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now)) + if lg := s.getLogger(); lg != nil { + lg.Info( + "leadership transfer finished", + zap.String("local-member-id", s.ID().String()), + zap.String("old-leader-member-id", types.ID(lead).String()), + zap.String("new-leader-member-id", types.ID(transferee).String()), + zap.Duration("took", time.Since(now)), + ) + } else { + plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now)) + } return nil } // TransferLeadership transfers the leader to the chosen transferee. func (s *EtcdServer) TransferLeadership() error { if !s.isLeader() { - plog.Printf("skipped leadership transfer for stopping non-leader member") + if lg := s.getLogger(); lg != nil { + lg.Info( + "skipped leadership transfer; local server is not leader", + zap.String("local-member-id", s.ID().String()), + zap.String("current-leader-member-id", types.ID(s.Lead()).String()), + ) + } else { + plog.Printf("skipped leadership transfer for stopping non-leader member") + } return nil } - if !s.isMultiNode() { - plog.Printf("skipped leadership transfer for single member cluster") + if !s.hasMultipleVotingMembers() { + if lg := s.getLogger(); lg != nil { + lg.Info( + "skipped leadership transfer for single voting member cluster", + zap.String("local-member-id", s.ID().String()), + zap.String("current-leader-member-id", types.ID(s.Lead()).String()), + ) + } else { + plog.Printf("skipped leadership transfer for single voting member cluster") + } return nil } - transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs()) + transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs()) if !ok { return ErrUnhealthy } @@ -1106,7 +1502,11 @@ func (s *EtcdServer) HardStop() { // Do and Process cannot be called after Stop has been invoked. func (s *EtcdServer) Stop() { if err := s.TransferLeadership(); err != nil { - plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err) + if lg := s.getLogger(); lg != nil { + lg.Warn("leadership transfer failed", zap.String("local-member-id", s.ID().String()), zap.Error(err)) + } else { + plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err) + } } s.HardStop() } @@ -1133,14 +1533,14 @@ func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() } func (s *EtcdServer) LeaderStats() []byte { - lead := atomic.LoadUint64(&s.r.lead) + lead := s.getLead() if lead != uint64(s.id) { return nil } return s.lstats.JSON() } -func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } +func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() } func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { if s.authStore == nil { @@ -1168,31 +1568,67 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]* return nil, err } - if s.Cfg.StrictReconfigCheck { - // by default StrictReconfigCheck is enabled; reject new members if unhealthy - if !s.cluster.IsReadyToAddNewMember() { - plog.Warningf("not enough started members, rejecting member add %+v", memb) - return nil, ErrNotEnoughStartedMembers - } - if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { - plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) - return nil, ErrUnhealthy - } - } - // TODO: move Member to protobuf type b, err := json.Marshal(memb) if err != nil { return nil, err } + + // by default StrictReconfigCheck is enabled; reject new members if unhealthy. + if err := s.mayAddMember(memb); err != nil { + return nil, err + } + cc := raftpb.ConfChange{ Type: raftpb.ConfChangeAddNode, NodeID: uint64(memb.ID), Context: b, } + + if memb.IsLearner { + cc.Type = raftpb.ConfChangeAddLearnerNode + } + return s.configure(ctx, cc) } +func (s *EtcdServer) mayAddMember(memb membership.Member) error { + if !s.Cfg.StrictReconfigCheck { + return nil + } + + // protect quorum when adding voting member + if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() { + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejecting member add request; not enough healthy members", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), + zap.Error(ErrNotEnoughStartedMembers), + ) + } else { + plog.Warningf("not enough started members, rejecting member add %+v", memb) + } + return ErrNotEnoughStartedMembers + } + + if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.VotingMembers()) { + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), + zap.Error(ErrUnhealthy), + ) + } else { + plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) + } + return ErrUnhealthy + } + + return nil +} + func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { if err := s.checkMembershipOperationPermission(ctx); err != nil { return nil, err @@ -1210,13 +1646,167 @@ func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership return s.configure(ctx, cc) } +// PromoteMember promotes a learner node to a voting node. +func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { + // only raft leader has information on whether the to-be-promoted learner node is ready. If promoteMember call + // fails with ErrNotLeader, forward the request to leader node via HTTP. If promoteMember call fails with error + // other than ErrNotLeader, return the error. + resp, err := s.promoteMember(ctx, id) + if err == nil { + learnerPromoteSucceed.Inc() + return resp, nil + } + if err != ErrNotLeader { + learnerPromoteFailed.WithLabelValues(err.Error()).Inc() + return resp, err + } + + cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout()) + defer cancel() + // forward to leader + for cctx.Err() == nil { + leader, err := s.waitLeader(cctx) + if err != nil { + return nil, err + } + for _, url := range leader.PeerURLs { + resp, err := promoteMemberHTTP(cctx, url, id, s.peerRt) + if err == nil { + return resp, nil + } + // If member promotion failed, return early. Otherwise keep retry. + if err == ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner { + return nil, err + } + } + } + + if cctx.Err() == context.DeadlineExceeded { + return nil, ErrTimeout + } + return nil, ErrCanceled +} + +// promoteMember checks whether the to-be-promoted learner node is ready before sending the promote +// request to raft. +// The function returns ErrNotLeader if the local node is not raft leader (therefore does not have +// enough information to determine if the learner node is ready), returns ErrLearnerNotReady if the +// local node is leader (therefore has enough information) but decided the learner node is not ready +// to be promoted. +func (s *EtcdServer) promoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { + if err := s.checkMembershipOperationPermission(ctx); err != nil { + return nil, err + } + + // check if we can promote this learner. + if err := s.mayPromoteMember(types.ID(id)); err != nil { + return nil, err + } + + // build the context for the promote confChange. mark IsLearner to false and IsPromote to true. + promoteChangeContext := membership.ConfigChangeContext{ + Member: membership.Member{ + ID: types.ID(id), + }, + IsPromote: true, + } + + b, err := json.Marshal(promoteChangeContext) + if err != nil { + return nil, err + } + + cc := raftpb.ConfChange{ + Type: raftpb.ConfChangeAddNode, + NodeID: id, + Context: b, + } + + return s.configure(ctx, cc) +} + +func (s *EtcdServer) mayPromoteMember(id types.ID) error { + err := s.isLearnerReady(uint64(id)) + if err != nil { + return err + } + + if !s.Cfg.StrictReconfigCheck { + return nil + } + if !s.cluster.IsReadyToPromoteMember(uint64(id)) { + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejecting member promote request; not enough healthy members", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-remove-id", id.String()), + zap.Error(ErrNotEnoughStartedMembers), + ) + } else { + plog.Warningf("not enough started members, rejecting promote member %s", id) + } + return ErrNotEnoughStartedMembers + } + + return nil +} + +// check whether the learner catches up with leader or not. +// Note: it will return nil if member is not found in cluster or if member is not learner. +// These two conditions will be checked before apply phase later. +func (s *EtcdServer) isLearnerReady(id uint64) error { + rs := s.raftStatus() + + // leader's raftStatus.Progress is not nil + if rs.Progress == nil { + return ErrNotLeader + } + + var learnerMatch uint64 + isFound := false + leaderID := rs.ID + for memberID, progress := range rs.Progress { + if id == memberID { + // check its status + learnerMatch = progress.Match + isFound = true + break + } + } + + if isFound { + leaderMatch := rs.Progress[leaderID].Match + // the learner's Match not caught up with leader yet + if float64(learnerMatch) < float64(leaderMatch)*readyPercent { + return ErrLearnerNotReady + } + } + + return nil +} + func (s *EtcdServer) mayRemoveMember(id types.ID) error { if !s.Cfg.StrictReconfigCheck { return nil } - if !s.cluster.IsReadyToRemoveMember(uint64(id)) { - plog.Warningf("not enough started members, rejecting remove member %s", id) + isLearner := s.cluster.IsMemberExist(id) && s.cluster.Member(id).IsLearner + // no need to check quorum when removing non-voting member + if isLearner { + return nil + } + + if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) { + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejecting member remove request; not enough healthy members", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-remove-id", id.String()), + zap.Error(ErrNotEnoughStartedMembers), + ) + } else { + plog.Warningf("not enough started members, rejecting remove member %s", id) + } return ErrNotEnoughStartedMembers } @@ -1226,10 +1816,20 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { } // protect quorum if some members are down - m := s.cluster.Members() + m := s.cluster.VotingMembers() active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m) if (active - 1) < 1+((len(m)-1)/2) { - plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id) + if lg := s.getLogger(); lg != nil { + lg.Warn( + "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum", + zap.String("local-member-id", s.ID().String()), + zap.String("requested-member-remove", id.String()), + zap.Int("active-peers", active), + zap.Error(ErrUnhealthy), + ) + } else { + plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id) + } return ErrUnhealthy } @@ -1253,18 +1853,64 @@ func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ( return s.configure(ctx, cc) } -// Implement the RaftTimer interface +func (s *EtcdServer) setCommittedIndex(v uint64) { + atomic.StoreUint64(&s.committedIndex, v) +} + +func (s *EtcdServer) getCommittedIndex() uint64 { + return atomic.LoadUint64(&s.committedIndex) +} + +func (s *EtcdServer) setAppliedIndex(v uint64) { + atomic.StoreUint64(&s.appliedIndex, v) +} -func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) } +func (s *EtcdServer) getAppliedIndex() uint64 { + return atomic.LoadUint64(&s.appliedIndex) +} -func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) } +func (s *EtcdServer) setTerm(v uint64) { + atomic.StoreUint64(&s.term, v) +} -// Lead is only for testing purposes. -// TODO: add Raft server interface to expose raft related info: -// Index, Term, Lead, Committed, Applied, LastIndex, etc. -func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } +func (s *EtcdServer) getTerm() uint64 { + return atomic.LoadUint64(&s.term) +} -func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } +func (s *EtcdServer) setLead(v uint64) { + atomic.StoreUint64(&s.lead, v) +} + +func (s *EtcdServer) getLead() uint64 { + return atomic.LoadUint64(&s.lead) +} + +func (s *EtcdServer) leaderChangedNotify() <-chan struct{} { + s.leaderChangedMu.RLock() + defer s.leaderChangedMu.RUnlock() + return s.leaderChanged +} + +// RaftStatusGetter represents etcd server and Raft progress. +type RaftStatusGetter interface { + ID() types.ID + Leader() types.ID + CommittedIndex() uint64 + AppliedIndex() uint64 + Term() uint64 +} + +func (s *EtcdServer) ID() types.ID { return s.id } + +func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) } + +func (s *EtcdServer) Lead() uint64 { return s.getLead() } + +func (s *EtcdServer) CommittedIndex() uint64 { return s.getCommittedIndex() } + +func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() } + +func (s *EtcdServer) Term() uint64 { return s.getTerm() } type confChangeResponse struct { membs []*membership.Member @@ -1277,21 +1923,37 @@ type confChangeResponse struct { func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { cc.ID = s.reqIDGen.Next() ch := s.w.Register(cc.ID) + start := time.Now() if err := s.r.ProposeConfChange(ctx, cc); err != nil { s.w.Trigger(cc.ID, nil) return nil, err } + select { case x := <-ch: if x == nil { - plog.Panicf("configure trigger value should never be nil") + if lg := s.getLogger(); lg != nil { + lg.Panic("failed to configure") + } else { + plog.Panicf("configure trigger value should never be nil") + } } resp := x.(*confChangeResponse) + if lg := s.getLogger(); lg != nil { + lg.Info( + "applied a configuration change through raft", + zap.String("local-member-id", s.ID().String()), + zap.String("raft-conf-change", cc.Type.String()), + zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()), + ) + } return resp.membs, resp.err + case <-ctx.Done(): s.w.Trigger(cc.ID, nil) // GC wait return nil, s.parseProposeCtxErr(ctx.Err(), start) + case <-s.stopping: return nil, ErrStopped } @@ -1321,10 +1983,20 @@ func (s *EtcdServer) sync(timeout time.Duration) { // static clientURLs of the server. // The function keeps attempting to register until it succeeds, // or its server is stopped. +// +// Use v2 store to encode member attributes, and apply through Raft +// but does not go through v2 API endpoint, which means even with v2 +// client handler disabled (e.g. --enable-v2=false), cluster can still +// process publish requests through rafthttp +// TODO: Deprecate v2 store func (s *EtcdServer) publish(timeout time.Duration) { b, err := json.Marshal(s.attributes) if err != nil { - plog.Panicf("json marshal error: %v", err) + if lg := s.getLogger(); lg != nil { + lg.Panic("failed to marshal JSON", zap.Error(err)) + } else { + plog.Panicf("json marshal error: %v", err) + } return } req := pb.Request{ @@ -1340,13 +2012,47 @@ func (s *EtcdServer) publish(timeout time.Duration) { switch err { case nil: close(s.readych) - plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID()) + if lg := s.getLogger(); lg != nil { + lg.Info( + "published local member to cluster through raft", + zap.String("local-member-id", s.ID().String()), + zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), + zap.String("request-path", req.Path), + zap.String("cluster-id", s.cluster.ID().String()), + zap.Duration("publish-timeout", timeout), + ) + } else { + plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID()) + } return + case ErrStopped: - plog.Infof("aborting publish because server is stopped") + if lg := s.getLogger(); lg != nil { + lg.Warn( + "stopped publish because server is stopped", + zap.String("local-member-id", s.ID().String()), + zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), + zap.Duration("publish-timeout", timeout), + zap.Error(err), + ) + } else { + plog.Infof("aborting publish because server is stopped") + } return + default: - plog.Errorf("publish error: %v", err) + if lg := s.getLogger(); lg != nil { + lg.Warn( + "failed to publish local member to cluster through raft", + zap.String("local-member-id", s.ID().String()), + zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), + zap.String("request-path", req.Path), + zap.Duration("publish-timeout", timeout), + zap.Error(err), + ) + } else { + plog.Errorf("publish error: %v", err) + } } } } @@ -1354,7 +2060,20 @@ func (s *EtcdServer) publish(timeout time.Duration) { func (s *EtcdServer) sendMergedSnap(merged snap.Message) { atomic.AddInt64(&s.inflightSnapshots, 1) + lg := s.getLogger() + fields := []zap.Field{ + zap.String("from", s.ID().String()), + zap.String("to", types.ID(merged.To).String()), + zap.Int64("bytes", merged.TotalSize), + zap.String("size", humanize.Bytes(uint64(merged.TotalSize))), + } + + now := time.Now() s.r.transport.SendSnapshot(merged) + if lg != nil { + lg.Info("sending merged snapshot", fields...) + } + s.goAttach(func() { select { case ok := <-merged.CloseNotify(): @@ -1368,8 +2087,17 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) { case <-s.stopping: } } + atomic.AddInt64(&s.inflightSnapshots, -1) + + if lg != nil { + lg.Info("sent merged snapshot", append(fields, zap.Duration("took", time.Since(now)))...) + } + case <-s.stopping: + if lg != nil { + lg.Warn("canceled sending merged snapshot; server stopping", fields...) + } return } }) @@ -1378,12 +2106,18 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) { // apply takes entries received from Raft (after it has been committed) and // applies them to the current state of the EtcdServer. // The given entries should not be empty. -func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) { +func (s *EtcdServer) apply( + es []raftpb.Entry, + confState *raftpb.ConfState, +) (appliedt uint64, appliedi uint64, shouldStop bool) { for i := range es { e := es[i] switch e.Type { case raftpb.EntryNormal: s.applyEntryNormal(&e) + s.setAppliedIndex(e.Index) + s.setTerm(e.Term) + case raftpb.EntryConfChange: // set the consistent index of current executing entry if e.Index > s.consistIndex.ConsistentIndex() { @@ -1393,15 +2127,21 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appl pbutil.MustUnmarshal(&cc, e.Data) removedSelf, err := s.applyConfChange(cc, confState) s.setAppliedIndex(e.Index) + s.setTerm(e.Term) shouldStop = shouldStop || removedSelf s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) + default: - plog.Panicf("entry type should be either EntryNormal or EntryConfChange") + if lg := s.getLogger(); lg != nil { + lg.Panic( + "unknown entry type; must be either EntryNormal or EntryConfChange", + zap.String("type", e.Type.String()), + ) + } else { + plog.Panicf("entry type should be either EntryNormal or EntryConfChange") + } } - atomic.StoreUint64(&s.r.index, e.Index) - atomic.StoreUint64(&s.r.term, e.Term) - appliedt = e.Term - appliedi = e.Index + appliedi, appliedt = e.Index, e.Term } return appliedt, appliedi, shouldStop } @@ -1414,7 +2154,6 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { s.consistIndex.setConsistentIndex(e.Index) shouldApplyV3 = true } - defer s.setAppliedIndex(e.Index) // raft state machine may generate noop entry when leader confirmation. // skip it in advance to avoid some potential bug in the future @@ -1473,7 +2212,17 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { return } - plog.Errorf("applying raft message exceeded backend quota") + if lg := s.getLogger(); lg != nil { + lg.Warn( + "message exceeded backend quota; raising alarm", + zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + zap.Error(ar.err), + ) + } else { + plog.Errorf("applying raft message exceeded backend quota") + } + s.goAttach(func() { a := &pb.AlarmRequest{ MemberID: uint64(s.ID()), @@ -1493,20 +2242,49 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con s.r.ApplyConfChange(cc) return false, err } + + lg := s.getLogger() *confState = *s.r.ApplyConfChange(cc) switch cc.Type { - case raftpb.ConfChangeAddNode: - m := new(membership.Member) - if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) + case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode: + confChangeContext := new(membership.ConfigChangeContext) + if err := json.Unmarshal(cc.Context, confChangeContext); err != nil { + if lg != nil { + lg.Panic("failed to unmarshal member", zap.Error(err)) + } else { + plog.Panicf("unmarshal member should never fail: %v", err) + } } - if cc.NodeID != uint64(m.ID) { - plog.Panicf("nodeID should always be equal to member ID") + if cc.NodeID != uint64(confChangeContext.Member.ID) { + if lg != nil { + lg.Panic( + "got different member ID", + zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()), + zap.String("member-id-from-message", confChangeContext.Member.ID.String()), + ) + } else { + plog.Panicf("nodeID should always be equal to member ID") + } } - s.cluster.AddMember(m) - if m.ID != s.id { - s.r.transport.AddPeer(m.ID, m.PeerURLs) + if confChangeContext.IsPromote { + s.cluster.PromoteMember(confChangeContext.Member.ID) + } else { + s.cluster.AddMember(&confChangeContext.Member) + + if confChangeContext.Member.ID != s.id { + s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs) + } } + + // update the isLearner metric when this server id is equal to the id in raft member confChange + if confChangeContext.Member.ID == s.id { + if cc.Type == raftpb.ConfChangeAddLearnerNode { + isLearner.Set(1) + } else { + isLearner.Set(0) + } + } + case raftpb.ConfChangeRemoveNode: id := types.ID(cc.NodeID) s.cluster.RemoveMember(id) @@ -1514,13 +2292,26 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con return true, nil } s.r.transport.RemovePeer(id) + case raftpb.ConfChangeUpdateNode: m := new(membership.Member) if err := json.Unmarshal(cc.Context, m); err != nil { - plog.Panicf("unmarshal member should never fail: %v", err) + if lg != nil { + lg.Panic("failed to unmarshal member", zap.Error(err)) + } else { + plog.Panicf("unmarshal member should never fail: %v", err) + } } if cc.NodeID != uint64(m.ID) { - plog.Panicf("nodeID should always be equal to member ID") + if lg != nil { + lg.Panic( + "got different member ID", + zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()), + zap.String("member-id-from-message", m.ID.String()), + ) + } else { + plog.Panicf("nodeID should always be equal to member ID") + } } s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes) if m.ID != s.id { @@ -1532,7 +2323,7 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con // TODO: non-blocking snapshot func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { - clone := s.store.Clone() + clone := s.v2store.Clone() // commit kv to write metadata (for example: consistent index) to disk. // KV().commit() updates the consistent index in backend. // All operations that update consistent index must be called sequentially @@ -1542,11 +2333,17 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { s.KV().Commit() s.goAttach(func() { + lg := s.getLogger() + d, err := clone.SaveNoCopy() // TODO: current store will never fail to do a snapshot // what should we do if the store might fail? if err != nil { - plog.Panicf("store save should never fail: %v", err) + if lg != nil { + lg.Panic("failed to save v2 store", zap.Error(err)) + } else { + plog.Panicf("store save should never fail: %v", err) + } } snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d) if err != nil { @@ -1555,17 +2352,28 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { if err == raft.ErrSnapOutOfDate { return } - plog.Panicf("unexpected create snapshot error %v", err) + if lg != nil { + lg.Panic("failed to create snapshot", zap.Error(err)) + } else { + plog.Panicf("unexpected create snapshot error %v", err) + } } // SaveSnap saves the snapshot and releases the locked wal files // to the snapshot index. if err = s.r.storage.SaveSnap(snap); err != nil { - plog.Fatalf("save snapshot error: %v", err) + if lg != nil { + lg.Panic("failed to save snapshot", zap.Error(err)) + } else { + plog.Fatalf("save snapshot error: %v", err) + } } - plog.Infof("saved snapshot at index %d", snap.Metadata.Index) - - if err = s.r.storage.Release(snap); err != nil { - plog.Panicf("failed to release wal %v", err) + if lg != nil { + lg.Info( + "saved snapshot", + zap.Uint64("snapshot-index", snap.Metadata.Index), + ) + } else { + plog.Infof("saved snapshot at index %d", snap.Metadata.Index) } // When sending a snapshot, etcd will pause compaction. @@ -1574,15 +2382,20 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { // the snapshot sent might already be compacted. It happens when the snapshot takes long time // to send and save. Pausing compaction avoids triggering a snapshot sending cycle. if atomic.LoadInt64(&s.inflightSnapshots) != 0 { - plog.Infof("skip compaction since there is an inflight snapshot") + if lg != nil { + lg.Info("skip compaction since there is an inflight snapshot") + } else { + plog.Infof("skip compaction since there is an inflight snapshot") + } return } // keep some in memory log entries for slow followers. compacti := uint64(1) - if snapi > numberOfCatchUpEntries { - compacti = snapi - numberOfCatchUpEntries + if snapi > s.Cfg.SnapshotCatchUpEntries { + compacti = snapi - s.Cfg.SnapshotCatchUpEntries } + err = s.r.raftStorage.Compact(compacti) if err != nil { // the compaction was done asynchronously with the progress of raft. @@ -1590,9 +2403,20 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { if err == raft.ErrCompacted { return } - plog.Panicf("unexpected compaction error %v", err) + if lg != nil { + lg.Panic("failed to compact", zap.Error(err)) + } else { + plog.Panicf("unexpected compaction error %v", err) + } + } + if lg != nil { + lg.Info( + "compacted Raft logs", + zap.Uint64("compact-index", compacti), + ) + } else { + plog.Infof("compacted raft log at %d", compacti) } - plog.Infof("compacted raft log at %d", compacti) }) } @@ -1640,7 +2464,7 @@ func (s *EtcdServer) monitorVersions() { continue } - v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt)) + v := decideClusterVersion(s.getLogger(), getVersions(s.getLogger(), s.cluster, s.id, s.peerRt)) if v != nil { // only keep major.minor version for comparison v = &semver.Version{ @@ -1670,27 +2494,60 @@ func (s *EtcdServer) monitorVersions() { } func (s *EtcdServer) updateClusterVersion(ver string) { + lg := s.getLogger() + if s.cluster.Version() == nil { - plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver)) + if lg != nil { + lg.Info( + "setting up initial cluster version", + zap.String("cluster-version", version.Cluster(ver)), + ) + } else { + plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver)) + } } else { - plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver)) + if lg != nil { + lg.Info( + "updating cluster version", + zap.String("from", version.Cluster(s.cluster.Version().String())), + zap.String("to", version.Cluster(ver)), + ) + } else { + plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver)) + } } + req := pb.Request{ Method: "PUT", Path: membership.StoreClusterVersionKey(), Val: ver, } + ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) _, err := s.Do(ctx, req) cancel() + switch err { case nil: + if lg != nil { + lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver))) + } return + case ErrStopped: - plog.Infof("aborting update cluster version because server is stopped") + if lg != nil { + lg.Warn("aborting cluster version update; server is stopped", zap.Error(err)) + } else { + plog.Infof("aborting update cluster version because server is stopped") + } return + default: - plog.Errorf("error updating cluster version (%v)", err) + if lg != nil { + lg.Warn("failed to update cluster version", zap.Error(err)) + } else { + plog.Errorf("error updating cluster version (%v)", err) + } } } @@ -1698,6 +2555,7 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { switch err { case context.Canceled: return ErrCanceled + case context.DeadlineExceeded: s.leadTimeMu.RLock() curLeadElected := s.leadElectedTime @@ -1706,8 +2564,7 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { if start.After(prevLeadLost) && start.Before(curLeadElected) { return ErrTimeoutDueToLeaderFail } - - lead := types.ID(atomic.LoadUint64(&s.r.lead)) + lead := types.ID(s.getLead()) switch lead { case types.ID(raft.None): // TODO: return error to specify it happens because the cluster does not have leader now @@ -1720,8 +2577,8 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { return ErrTimeoutDueToConnectionLost } } - return ErrTimeout + default: return err } @@ -1738,7 +2595,7 @@ func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } func (s *EtcdServer) restoreAlarms() error { s.applyV3 = s.newApplierV3() - as, err := alarm.NewAlarmStore(s) + as, err := v3alarm.NewAlarmStore(s) if err != nil { return err } @@ -1752,22 +2609,6 @@ func (s *EtcdServer) restoreAlarms() error { return nil } -func (s *EtcdServer) getAppliedIndex() uint64 { - return atomic.LoadUint64(&s.appliedIndex) -} - -func (s *EtcdServer) setAppliedIndex(v uint64) { - atomic.StoreUint64(&s.appliedIndex, v) -} - -func (s *EtcdServer) getCommittedIndex() uint64 { - return atomic.LoadUint64(&s.committedIndex) -} - -func (s *EtcdServer) setCommittedIndex(v uint64) { - atomic.StoreUint64(&s.committedIndex, v) -} - // goAttach creates a goroutine on a given function and tracks it using // the etcdserver waitgroup. func (s *EtcdServer) goAttach(f func()) { @@ -1775,7 +2616,11 @@ func (s *EtcdServer) goAttach(f func()) { defer s.wgMu.RUnlock() select { case <-s.stopping: - plog.Warning("server has stopped (skipping goAttach)") + if lg := s.getLogger(); lg != nil { + lg.Warn("server has stopped; skipping goAttach") + } else { + plog.Warning("server has stopped (skipping goAttach)") + } return default: } @@ -1791,3 +2636,22 @@ func (s *EtcdServer) goAttach(f func()) { func (s *EtcdServer) Alarms() []*pb.AlarmMember { return s.alarmStore.Get(pb.AlarmType_NONE) } + +func (s *EtcdServer) Logger() *zap.Logger { + return s.lg +} + +// IsLearner returns if the local member is raft learner +func (s *EtcdServer) IsLearner() bool { + return s.cluster.IsLocalMemberLearner() +} + +// IsMemberExist returns if the member with the given id exists in cluster. +func (s *EtcdServer) IsMemberExist(id types.ID) bool { + return s.cluster.IsMemberExist(id) +} + +// raftStatus returns the raft status of this etcd node. +func (s *EtcdServer) raftStatus() raft.Status { + return s.r.Node.Status() +} diff --git a/vendor/go.etcd.io/etcd/etcdserver/server_access_control.go b/vendor/go.etcd.io/etcd/etcdserver/server_access_control.go new file mode 100644 index 000000000..09e2255cc --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdserver/server_access_control.go @@ -0,0 +1,65 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import "sync" + +// AccessController controls etcd server HTTP request access. +type AccessController struct { + corsMu sync.RWMutex + CORS map[string]struct{} + hostWhitelistMu sync.RWMutex + HostWhitelist map[string]struct{} +} + +// NewAccessController returns a new "AccessController" with default "*" values. +func NewAccessController() *AccessController { + return &AccessController{ + CORS: map[string]struct{}{"*": {}}, + HostWhitelist: map[string]struct{}{"*": {}}, + } +} + +// OriginAllowed determines whether the server will allow a given CORS origin. +// If CORS is empty, allow all. +func (ac *AccessController) OriginAllowed(origin string) bool { + ac.corsMu.RLock() + defer ac.corsMu.RUnlock() + if len(ac.CORS) == 0 { // allow all + return true + } + _, ok := ac.CORS["*"] + if ok { + return true + } + _, ok = ac.CORS[origin] + return ok +} + +// IsHostWhitelisted returns true if the host is whitelisted. +// If whitelist is empty, allow all. +func (ac *AccessController) IsHostWhitelisted(host string) bool { + ac.hostWhitelistMu.RLock() + defer ac.hostWhitelistMu.RUnlock() + if len(ac.HostWhitelist) == 0 { // allow all + return true + } + _, ok := ac.HostWhitelist["*"] + if ok { + return true + } + _, ok = ac.HostWhitelist[host] + return ok +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/go.etcd.io/etcd/etcdserver/snapshot_merge.go similarity index 60% rename from vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go rename to vendor/go.etcd.io/etcd/etcdserver/snapshot_merge.go index 928aa95b6..417776813 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go +++ b/vendor/go.etcd.io/etcd/etcdserver/snapshot_merge.go @@ -17,9 +17,12 @@ package etcdserver import ( "io" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" + "go.etcd.io/etcd/etcdserver/api/snap" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/raft/raftpb" + + humanize "github.com/dustin/go-humanize" + "go.uber.org/zap" ) // createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf), @@ -27,17 +30,21 @@ import ( // as ReadCloser. func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message { // get a snapshot of v2 store as []byte - clone := s.store.Clone() + clone := s.v2store.Clone() d, err := clone.SaveNoCopy() if err != nil { - plog.Panicf("store save should never fail: %v", err) + if lg := s.getLogger(); lg != nil { + lg.Panic("failed to save v2 store data", zap.Error(err)) + } else { + plog.Panicf("store save should never fail: %v", err) + } } // commit kv to write metadata(for example: consistent index). s.KV().Commit() dbsnap := s.be.Snapshot() // get a snapshot of v3 KV as readCloser - rc := newSnapshotReaderCloser(dbsnap) + rc := newSnapshotReaderCloser(s.getLogger(), dbsnap) // put the []byte snapshot of store into raft snapshot and return the merged snapshot with // KV readCloser snapshot. @@ -54,19 +61,39 @@ func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi return *snap.NewMessage(m, rc, dbsnap.Size()) } -func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { +func newSnapshotReaderCloser(lg *zap.Logger, snapshot backend.Snapshot) io.ReadCloser { pr, pw := io.Pipe() go func() { n, err := snapshot.WriteTo(pw) if err == nil { - plog.Infof("wrote database snapshot out [total bytes: %d]", n) + if lg != nil { + lg.Info( + "sent database snapshot to writer", + zap.Int64("bytes", n), + zap.String("size", humanize.Bytes(uint64(n))), + ) + } else { + plog.Infof("wrote database snapshot out [total bytes: %d]", n) + } } else { - plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) + if lg != nil { + lg.Warn( + "failed to send database snapshot to writer", + zap.String("size", humanize.Bytes(uint64(n))), + zap.Error(err), + ) + } else { + plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) + } } pw.CloseWithError(err) err = snapshot.Close() if err != nil { - plog.Panicf("failed to close database snapshot: %v", err) + if lg != nil { + lg.Panic("failed to close database snapshot", zap.Error(err)) + } else { + plog.Panicf("failed to close database snapshot: %v", err) + } } }() return pr diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/go.etcd.io/etcd/etcdserver/storage.go similarity index 53% rename from vendor/github.com/coreos/etcd/etcdserver/storage.go rename to vendor/go.etcd.io/etcd/etcdserver/storage.go index 2cddb5cb2..d57b6f9a5 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/storage.go +++ b/vendor/go.etcd.io/etcd/etcdserver/storage.go @@ -17,13 +17,15 @@ package etcdserver import ( "io" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap" - "github.com/coreos/etcd/wal" - "github.com/coreos/etcd/wal/walpb" + "go.etcd.io/etcd/etcdserver/api/snap" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/pbutil" + "go.etcd.io/etcd/pkg/types" + "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/wal" + "go.etcd.io/etcd/wal/walpb" + + "go.uber.org/zap" ) type Storage interface { @@ -34,10 +36,6 @@ type Storage interface { SaveSnap(snap raftpb.Snapshot) error // Close closes the Storage and performs finalization. Close() error - // Release releases the locked wal files older than the provided snapshot. - Release(snap raftpb.Snapshot) error - // Sync WAL - Sync() error } type storage struct { @@ -49,35 +47,25 @@ func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage { return &storage{w, s} } -// SaveSnap saves the snapshot file to disk and writes the WAL snapshot entry. +// SaveSnap saves the snapshot to disk and release the locked +// wal files since they will not be used. func (st *storage) SaveSnap(snap raftpb.Snapshot) error { walsnap := walpb.Snapshot{ Index: snap.Metadata.Index, Term: snap.Metadata.Term, } - - // save the snapshot file before writing the snapshot to the wal. - // This makes it possible for the snapshot file to become orphaned, but prevents - // a WAL snapshot entry from having no corresponding snapshot file. - err := st.Snapshotter.SaveSnap(snap) + err := st.WAL.SaveSnapshot(walsnap) if err != nil { return err } - - return st.WAL.SaveSnapshot(walsnap) -} - -// Release releases resources older than the given snap and are no longer needed: -// - releases the locks to the wal files that are older than the provided wal for the given snap. -// - deletes any .snap.db files that are older than the given snap. -func (st *storage) Release(snap raftpb.Snapshot) error { - if err := st.WAL.ReleaseLockTo(snap.Metadata.Index); err != nil { + err = st.Snapshotter.SaveSnap(snap) + if err != nil { return err } - return st.Snapshotter.ReleaseSnapDBs(snap) + return st.WAL.ReleaseLockTo(snap.Metadata.Index) } -func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { +func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) { var ( err error wmetadata []byte @@ -85,19 +73,35 @@ func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, repaired := false for { - if w, err = wal.Open(waldir, snap); err != nil { - plog.Fatalf("open wal error: %v", err) + if w, err = wal.Open(lg, waldir, snap); err != nil { + if lg != nil { + lg.Fatal("failed to open WAL", zap.Error(err)) + } else { + plog.Fatalf("open wal error: %v", err) + } } if wmetadata, st, ents, err = w.ReadAll(); err != nil { w.Close() // we can only repair ErrUnexpectedEOF and we never repair twice. if repaired || err != io.ErrUnexpectedEOF { - plog.Fatalf("read wal error (%v) and cannot be repaired", err) + if lg != nil { + lg.Fatal("failed to read WAL, cannot be repaired", zap.Error(err)) + } else { + plog.Fatalf("read wal error (%v) and cannot be repaired", err) + } } - if !wal.Repair(waldir) { - plog.Fatalf("WAL error (%v) cannot be repaired", err) + if !wal.Repair(lg, waldir) { + if lg != nil { + lg.Fatal("failed to repair WAL", zap.Error(err)) + } else { + plog.Fatalf("WAL error (%v) cannot be repaired", err) + } } else { - plog.Infof("repaired WAL error (%v)", err) + if lg != nil { + lg.Info("repaired WAL", zap.Error(err)) + } else { + plog.Infof("repaired WAL error (%v)", err) + } repaired = true } continue diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/go.etcd.io/etcd/etcdserver/util.go similarity index 71% rename from vendor/github.com/coreos/etcd/etcdserver/util.go rename to vendor/go.etcd.io/etcd/etcdserver/util.go index eb11d5d0d..fe5024ef0 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/util.go +++ b/vendor/go.etcd.io/etcd/etcdserver/util.go @@ -20,11 +20,13 @@ import ( "strings" "time" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/rafthttp" "github.com/golang/protobuf/proto" + "go.etcd.io/etcd/etcdserver/api/membership" + "go.etcd.io/etcd/etcdserver/api/rafthttp" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/types" + + "go.uber.org/zap" ) // isConnectedToQuorumSince checks whether the local member is connected to the @@ -101,24 +103,15 @@ func (nc *notifier) notify(err error) { close(nc.c) } -func warnOfExpensiveRequest(now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { +func warnOfExpensiveRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { var resp string if !isNil(respMsg) { resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) } - warnOfExpensiveGenericRequest(now, reqStringer, "", resp, err) + warnOfExpensiveGenericRequest(lg, now, reqStringer, "", resp, err) } -func warnOfFailedRequest(now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { - var resp string - if !isNil(respMsg) { - resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) - } - d := time.Since(now) - plog.Warningf("failed to apply request,took %v,request %s,resp %s,err is %v", d, reqStringer.String(), resp, err) -} - -func warnOfExpensiveReadOnlyTxnRequest(now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) { +func warnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) { reqStringer := pb.NewLoggableTxnRequest(r) var resp string if !isNil(txnResponse) { @@ -133,28 +126,39 @@ func warnOfExpensiveReadOnlyTxnRequest(now time.Time, r *pb.TxnRequest, txnRespo } resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), proto.Size(txnResponse)) } - warnOfExpensiveGenericRequest(now, reqStringer, "read-only range ", resp, err) + warnOfExpensiveGenericRequest(lg, now, reqStringer, "read-only range ", resp, err) } -func warnOfExpensiveReadOnlyRangeRequest(now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) { +func warnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) { var resp string if !isNil(rangeResponse) { resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), proto.Size(rangeResponse)) } - warnOfExpensiveGenericRequest(now, reqStringer, "read-only range ", resp, err) + warnOfExpensiveGenericRequest(lg, now, reqStringer, "read-only range ", resp, err) } -func warnOfExpensiveGenericRequest(now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) { - // TODO: add metrics +func warnOfExpensiveGenericRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) { d := time.Since(now) if d > warnApplyDuration { - var result string - if err != nil { - result = fmt.Sprintf("error:%v", err) + if lg != nil { + lg.Warn( + "apply request took too long", + zap.Duration("took", d), + zap.Duration("expected-duration", warnApplyDuration), + zap.String("prefix", prefix), + zap.String("request", reqStringer.String()), + zap.String("response", resp), + zap.Error(err), + ) } else { - result = resp + var result string + if err != nil { + result = fmt.Sprintf("error:%v", err) + } else { + result = resp + } + plog.Warningf("%srequest %q with result %q took too long (%v) to execute", prefix, reqStringer.String(), result, d) } - plog.Warningf("%srequest %q with result %q took too long (%v) to execute", prefix, reqStringer.String(), result, d) slowApplies.Inc() } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go b/vendor/go.etcd.io/etcd/etcdserver/v2_server.go similarity index 94% rename from vendor/github.com/coreos/etcd/etcdserver/v2_server.go rename to vendor/go.etcd.io/etcd/etcdserver/v2_server.go index b458350c1..9238b2dc5 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go +++ b/vendor/go.etcd.io/etcd/etcdserver/v2_server.go @@ -18,8 +18,8 @@ import ( "context" "time" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/store" + "go.etcd.io/etcd/etcdserver/api/v2store" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" ) type RequestV2 pb.Request @@ -39,11 +39,11 @@ type reqV2HandlerEtcdServer struct { } type reqV2HandlerStore struct { - store store.Store + store v2store.Store applier ApplierV2 } -func NewStoreRequestV2Handler(s store.Store, applier ApplierV2) RequestV2Handler { +func NewStoreRequestV2Handler(s v2store.Store, applier ApplierV2) RequestV2Handler { return &reqV2HandlerStore{s, applier} } @@ -122,14 +122,14 @@ func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { r.ID = s.reqIDGen.Next() h := &reqV2HandlerEtcdServer{ reqV2HandlerStore: reqV2HandlerStore{ - store: s.store, + store: s.v2store, applier: s.applyV2, }, s: s, } rp := &r resp, err := ((*RequestV2)(rp)).Handle(ctx, h) - resp.Term, resp.Index = s.Term(), s.Index() + resp.Term, resp.Index = s.Term(), s.CommittedIndex() return resp, err } diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/go.etcd.io/etcd/etcdserver/v3_server.go similarity index 84% rename from vendor/github.com/coreos/etcd/etcdserver/v3_server.go rename to vendor/go.etcd.io/etcd/etcdserver/v3_server.go index 76ca8dee0..bfe08ea35 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go +++ b/vendor/go.etcd.io/etcd/etcdserver/v3_server.go @@ -20,15 +20,17 @@ import ( "encoding/binary" "time" - "github.com/coreos/etcd/auth" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/lease/leasehttp" - "github.com/coreos/etcd/mvcc" - "github.com/coreos/etcd/raft" + "go.etcd.io/etcd/auth" + "go.etcd.io/etcd/etcdserver/api/membership" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/lease/leasehttp" + "go.etcd.io/etcd/mvcc" + "go.etcd.io/etcd/pkg/traceutil" + "go.etcd.io/etcd/raft" "github.com/gogo/protobuf/proto" + "go.uber.org/zap" ) const ( @@ -37,6 +39,7 @@ const ( // However, if the committed entries are very heavy to apply, the gap might grow. // We should stop accepting new proposals if the gap growing to a certain point. maxGapBetweenApplyAndCommitIndex = 5000 + traceThreshold = 100 * time.Millisecond ) type RaftKV interface { @@ -84,14 +87,29 @@ type Authenticator interface { } func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { + trace := traceutil.New("range", + s.getLogger(), + traceutil.Field{Key: "range_begin", Value: string(r.Key)}, + traceutil.Field{Key: "range_end", Value: string(r.RangeEnd)}, + ) + ctx = context.WithValue(ctx, traceutil.TraceKey, trace) + var resp *pb.RangeResponse var err error defer func(start time.Time) { - warnOfExpensiveReadOnlyRangeRequest(start, r, resp, err) + warnOfExpensiveReadOnlyRangeRequest(s.getLogger(), start, r, resp, err) + if resp != nil { + trace.AddField( + traceutil.Field{Key: "response_count", Value: len(resp.Kvs)}, + traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}, + ) + } + trace.LogIfLong(traceThreshold) }(time.Now()) if !r.Serializable { err = s.linearizableReadNotify(ctx) + trace.Step("agreement among raft nodes before linearized reading") if err != nil { return nil, err } @@ -100,7 +118,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) } - get := func() { resp, err = s.applyV3Base.Range(nil, r) } + get := func() { resp, err = s.applyV3Base.Range(ctx, nil, r) } if serr := s.doSerialize(ctx, chk, get); serr != nil { err = serr return nil, err @@ -109,6 +127,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe } func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { + ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now()) resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) if err != nil { return nil, err @@ -139,7 +158,7 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse } defer func(start time.Time) { - warnOfExpensiveReadOnlyTxnRequest(start, r, resp, err) + warnOfExpensiveReadOnlyTxnRequest(s.getLogger(), start, r, resp, err) }(time.Now()) get := func() { resp, err = s.applyV3Base.Txn(r) } @@ -185,7 +204,18 @@ func isTxnReadonly(r *pb.TxnRequest) bool { } func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { + startTime := time.Now() result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) + trace := traceutil.TODO() + if result != nil && result.trace != nil { + trace = result.trace + defer func() { + trace.LogIfLong(traceThreshold) + }() + applyStart := result.trace.GetStartTime() + result.trace.SetStartTime(startTime) + trace.InsertStep(0, applyStart, "process raft request") + } if r.Physical && result != nil && result.physc != nil { <-result.physc // The compaction is done deleting keys; the hash is now settled @@ -194,6 +224,7 @@ func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb. // if the compaction resumes. Force the finished compaction to // commit so it won't resume following a crash. s.be.ForceCommit() + trace.Step("physically apply compaction") } if err != nil { return nil, err @@ -209,6 +240,7 @@ func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb. resp.Header = &pb.ResponseHeader{} } resp.Header.Revision = s.kv.Rev() + trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) return resp, nil } @@ -259,7 +291,11 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e } } } - return -1, ErrTimeout + + if cctx.Err() == context.DeadlineExceeded { + return -1, ErrTimeout + } + return -1, ErrCanceled } func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { @@ -302,7 +338,11 @@ func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR } } } - return nil, ErrTimeout + + if cctx.Err() == context.DeadlineExceeded { + return nil, ErrTimeout + } + return nil, ErrCanceled } func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { @@ -363,12 +403,22 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest return nil, err } + lg := s.getLogger() + var resp proto.Message for { checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) if err != nil { if err != auth.ErrAuthNotEnabled { - plog.Errorf("invalid authentication request to user %s was issued", r.Name) + if lg != nil { + lg.Warn( + "invalid authentication was requested", + zap.String("user", r.Name), + zap.Error(err), + ) + } else { + plog.Errorf("invalid authentication request to user %s was issued", r.Name) + } } return nil, err } @@ -391,7 +441,12 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest if checkedRevision == s.AuthStore().Revision() { break } - plog.Infof("revision when password checked is obsolete, retrying") + + if lg != nil { + lg.Info("revision when password checked became stale; retrying") + } else { + plog.Infof("revision when password checked is obsolete, retrying") + } } return resp.(*pb.AuthenticateResponse), nil @@ -509,15 +564,30 @@ func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftReque if result.err != nil { return nil, result.err } + if startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time); ok && result.trace != nil { + applyStart := result.trace.GetStartTime() + // The trace object is created in apply. Here reset the start time to trace + // the raft request time by the difference between the request start time + // and apply start time + result.trace.SetStartTime(startTime) + result.trace.InsertStep(0, applyStart, "process raft request") + result.trace.LogIfLong(traceThreshold) + } return result.resp, nil } func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { - return s.raftRequestOnce(ctx, r) + for { + resp, err := s.raftRequestOnce(ctx, r) + if err != auth.ErrAuthOldRevision { + return resp, err + } + } } // doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { + trace := traceutil.Get(ctx) ai, err := s.AuthInfoFromCtx(ctx) if err != nil { return err @@ -529,6 +599,7 @@ func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) e if err = chk(ai); err != nil { return err } + trace.Step("get authentication metadata") // fetch response for serialized request get() // check for stale token revision in case the auth store was updated while @@ -578,7 +649,12 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In defer cancel() start := time.Now() - s.r.Propose(cctx, data) + err = s.r.Propose(cctx, data) + if err != nil { + proposalsFailed.Inc() + s.w.Trigger(id, nil) // GC wait + return nil, err + } proposalsPending.Inc() defer proposalsPending.Dec() @@ -604,7 +680,6 @@ func (s *EtcdServer) linearizableReadLoop() { ctxToSend := make([]byte, 8) id1 := s.reqIDGen.Next() binary.BigEndian.PutUint64(ctxToSend, id1) - leaderChangedNotifier := s.leaderChangedNotify() select { case <-leaderChangedNotifier: @@ -621,13 +696,18 @@ func (s *EtcdServer) linearizableReadLoop() { s.readNotifier = nextnr s.readMu.Unlock() + lg := s.getLogger() cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) if err := s.r.ReadIndex(cctx, ctxToSend); err != nil { cancel() if err == raft.ErrStopped { return } - plog.Errorf("failed to get read index from raft: %v", err) + if lg != nil { + lg.Warn("failed to get read index from Raft", zap.Error(err)) + } else { + plog.Errorf("failed to get read index from raft: %v", err) + } readIndexFailed.Inc() nr.notify(err) continue @@ -649,22 +729,31 @@ func (s *EtcdServer) linearizableReadLoop() { if len(rs.RequestCtx) == 8 { id2 = binary.BigEndian.Uint64(rs.RequestCtx) } - plog.Warningf("ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader (request ID want %d, got %d)", id1, id2) + if lg != nil { + lg.Warn( + "ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader", + zap.Uint64("sent-request-id", id1), + zap.Uint64("received-request-id", id2), + ) + } else { + plog.Warningf("ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader (request ID want %d, got %d)", id1, id2) + } slowReadIndex.Inc() } - case <-leaderChangedNotifier: timeout = true readIndexFailed.Inc() // return a retryable error. nr.notify(ErrLeaderChanged) - case <-time.After(s.Cfg.ReqTimeout()): - plog.Warningf("timed out waiting for read index response (local node might have slow network)") + if lg != nil { + lg.Warn("timed out waiting for read index response (local node might have slow network)", zap.Duration("timeout", s.Cfg.ReqTimeout())) + } else { + plog.Warningf("timed out waiting for read index response (local node might have slow network)") + } nr.notify(ErrTimeout) timeout = true slowReadIndex.Inc() - case <-s.stopping: return } @@ -717,4 +806,5 @@ func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error } authInfo = s.AuthStore().AuthInfoFromTLS(ctx) return authInfo, nil + } diff --git a/vendor/github.com/coreos/etcd/lease/doc.go b/vendor/go.etcd.io/etcd/lease/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/lease/doc.go rename to vendor/go.etcd.io/etcd/lease/doc.go diff --git a/vendor/go.etcd.io/etcd/lease/lease_queue.go b/vendor/go.etcd.io/etcd/lease/lease_queue.go new file mode 100644 index 000000000..17ddb358e --- /dev/null +++ b/vendor/go.etcd.io/etcd/lease/lease_queue.go @@ -0,0 +1,106 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lease + +import "container/heap" + +// LeaseWithTime contains lease object with a time. +// For the lessor's lease heap, time identifies the lease expiration time. +// For the lessor's lease checkpoint heap, the time identifies the next lease checkpoint time. +type LeaseWithTime struct { + id LeaseID + // Unix nanos timestamp. + time int64 + index int +} + +type LeaseQueue []*LeaseWithTime + +func (pq LeaseQueue) Len() int { return len(pq) } + +func (pq LeaseQueue) Less(i, j int) bool { + return pq[i].time < pq[j].time +} + +func (pq LeaseQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *LeaseQueue) Push(x interface{}) { + n := len(*pq) + item := x.(*LeaseWithTime) + item.index = n + *pq = append(*pq, item) +} + +func (pq *LeaseQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + item.index = -1 // for safety + *pq = old[0 : n-1] + return item +} + +// LeaseExpiredNotifier is a queue used to notify lessor to revoke expired lease. +// Only save one item for a lease, `Register` will update time of the corresponding lease. +type LeaseExpiredNotifier struct { + m map[LeaseID]*LeaseWithTime + queue LeaseQueue +} + +func newLeaseExpiredNotifier() *LeaseExpiredNotifier { + return &LeaseExpiredNotifier{ + m: make(map[LeaseID]*LeaseWithTime), + queue: make(LeaseQueue, 0), + } +} + +func (mq *LeaseExpiredNotifier) Init() { + heap.Init(&mq.queue) + mq.m = make(map[LeaseID]*LeaseWithTime) + for _, item := range mq.queue { + mq.m[item.id] = item + } +} + +func (mq *LeaseExpiredNotifier) RegisterOrUpdate(item *LeaseWithTime) { + if old, ok := mq.m[item.id]; ok { + old.time = item.time + heap.Fix(&mq.queue, old.index) + } else { + heap.Push(&mq.queue, item) + mq.m[item.id] = item + } +} + +func (mq *LeaseExpiredNotifier) Unregister() *LeaseWithTime { + item := heap.Pop(&mq.queue).(*LeaseWithTime) + delete(mq.m, item.id) + return item +} + +func (mq *LeaseExpiredNotifier) Poll() *LeaseWithTime { + if mq.Len() == 0 { + return nil + } + return mq.queue[0] +} + +func (mq *LeaseExpiredNotifier) Len() int { + return len(mq.m) +} diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go b/vendor/go.etcd.io/etcd/lease/leasehttp/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/lease/leasehttp/doc.go rename to vendor/go.etcd.io/etcd/lease/leasehttp/doc.go diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go b/vendor/go.etcd.io/etcd/lease/leasehttp/http.go similarity index 92% rename from vendor/github.com/coreos/etcd/lease/leasehttp/http.go rename to vendor/go.etcd.io/etcd/lease/leasehttp/http.go index ac2e7880e..67e916dba 100644 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go +++ b/vendor/go.etcd.io/etcd/lease/leasehttp/http.go @@ -23,10 +23,10 @@ import ( "net/http" "time" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/lease/leasepb" - "github.com/coreos/etcd/pkg/httputil" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/lease/leasepb" + "go.etcd.io/etcd/pkg/httputil" ) var ( @@ -52,6 +52,7 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } + defer r.Body.Close() b, err := ioutil.ReadAll(r.Body) if err != nil { http.Error(w, "error reading body", http.StatusBadRequest) @@ -62,7 +63,7 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case LeasePrefix: lreq := pb.LeaseKeepAliveRequest{} - if err := lreq.Unmarshal(b); err != nil { + if uerr := lreq.Unmarshal(b); uerr != nil { http.Error(w, "error unmarshalling request", http.StatusBadRequest) return } @@ -72,14 +73,14 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout) return } - ttl, err := h.l.Renew(lease.LeaseID(lreq.ID)) - if err != nil { - if err == lease.ErrLeaseNotFound { - http.Error(w, err.Error(), http.StatusNotFound) + ttl, rerr := h.l.Renew(lease.LeaseID(lreq.ID)) + if rerr != nil { + if rerr == lease.ErrLeaseNotFound { + http.Error(w, rerr.Error(), http.StatusNotFound) return } - http.Error(w, err.Error(), http.StatusBadRequest) + http.Error(w, rerr.Error(), http.StatusBadRequest) return } // TODO: fill out ResponseHeader @@ -92,7 +93,7 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { case LeaseInternalPrefix: lreq := leasepb.LeaseInternalRequest{} - if err := lreq.Unmarshal(b); err != nil { + if lerr := lreq.Unmarshal(b); lerr != nil { http.Error(w, "error unmarshalling request", http.StatusBadRequest) return } diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go b/vendor/go.etcd.io/etcd/lease/leasepb/lease.pb.go similarity index 58% rename from vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go rename to vendor/go.etcd.io/etcd/lease/leasepb/lease.pb.go index 433f0aad1..16637ee7e 100644 --- a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go +++ b/vendor/go.etcd.io/etcd/lease/leasepb/lease.pb.go @@ -1,17 +1,31 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: lease.proto +/* + Package leasepb is a generated protocol buffer package. + + It is generated from these files: + lease.proto + + It has these top-level messages: + Lease + LeaseInternalRequest + LeaseInternalResponse +*/ package leasepb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" - etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + etcdserverpb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -26,157 +40,43 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Lease struct { - ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` - TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Lease) Reset() { *m = Lease{} } -func (m *Lease) String() string { return proto.CompactTextString(m) } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_3dd57e402472b33a, []int{0} -} -func (m *Lease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Lease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Lease) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lease.Merge(m, src) -} -func (m *Lease) XXX_Size() int { - return m.Size() -} -func (m *Lease) XXX_DiscardUnknown() { - xxx_messageInfo_Lease.DiscardUnknown(m) + ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"` + RemainingTTL int64 `protobuf:"varint,3,opt,name=RemainingTTL,proto3" json:"RemainingTTL,omitempty"` } -var xxx_messageInfo_Lease proto.InternalMessageInfo +func (m *Lease) Reset() { *m = Lease{} } +func (m *Lease) String() string { return proto.CompactTextString(m) } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{0} } type LeaseInternalRequest struct { - LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest,proto3" json:"LeaseTimeToLiveRequest,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} } -func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) } -func (*LeaseInternalRequest) ProtoMessage() {} -func (*LeaseInternalRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3dd57e402472b33a, []int{1} -} -func (m *LeaseInternalRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseInternalRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseInternalRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseInternalRequest.Merge(m, src) -} -func (m *LeaseInternalRequest) XXX_Size() int { - return m.Size() -} -func (m *LeaseInternalRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseInternalRequest.DiscardUnknown(m) + LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest" json:"LeaseTimeToLiveRequest,omitempty"` } -var xxx_messageInfo_LeaseInternalRequest proto.InternalMessageInfo +func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} } +func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) } +func (*LeaseInternalRequest) ProtoMessage() {} +func (*LeaseInternalRequest) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{1} } type LeaseInternalResponse struct { - LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse,proto3" json:"LeaseTimeToLiveResponse,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} } -func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseInternalResponse) ProtoMessage() {} -func (*LeaseInternalResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3dd57e402472b33a, []int{2} -} -func (m *LeaseInternalResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseInternalResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseInternalResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseInternalResponse.Merge(m, src) -} -func (m *LeaseInternalResponse) XXX_Size() int { - return m.Size() -} -func (m *LeaseInternalResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseInternalResponse.DiscardUnknown(m) + LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse" json:"LeaseTimeToLiveResponse,omitempty"` } -var xxx_messageInfo_LeaseInternalResponse proto.InternalMessageInfo +func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} } +func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseInternalResponse) ProtoMessage() {} +func (*LeaseInternalResponse) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{2} } func init() { proto.RegisterType((*Lease)(nil), "leasepb.Lease") proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest") proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse") } - -func init() { proto.RegisterFile("lease.proto", fileDescriptor_3dd57e402472b33a) } - -var fileDescriptor_3dd57e402472b33a = []byte{ - // 233 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, - 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, - 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45, - 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92, - 0x21, 0xea, 0x94, 0x34, 0xb9, 0x58, 0x7d, 0x40, 0x06, 0x09, 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48, - 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x31, 0x79, 0xba, 0x08, 0x09, 0x70, 0x31, 0x87, 0x84, 0xf8, - 0x48, 0x30, 0x81, 0x05, 0x40, 0x4c, 0xa5, 0x12, 0x2e, 0x11, 0xb0, 0x52, 0xcf, 0xbc, 0x92, 0xd4, - 0xa2, 0xbc, 0xc4, 0x9c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0xa1, 0x18, 0x2e, 0x31, 0xb0, - 0x78, 0x48, 0x66, 0x6e, 0x6a, 0x48, 0xbe, 0x4f, 0x66, 0x59, 0x2a, 0x54, 0x06, 0x6c, 0x1a, 0xb7, - 0x91, 0x8a, 0x1e, 0xb2, 0xdd, 0x7a, 0xd8, 0xd5, 0x06, 0xe1, 0x30, 0x43, 0xa9, 0x82, 0x4b, 0x14, - 0xcd, 0xd6, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa1, 0x78, 0x2e, 0x71, 0x0c, 0x2d, 0x10, 0x29, - 0xa8, 0xbd, 0xaa, 0x04, 0xec, 0x85, 0x28, 0x0e, 0xc2, 0x65, 0x8a, 0x93, 0xc4, 0x89, 0x87, 0x72, - 0x0c, 0x17, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, - 0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff, - 0xff, 0x9f, 0xf2, 0x42, 0xe0, 0x91, 0x01, 0x00, 0x00, -} - func (m *Lease) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -184,36 +84,32 @@ func (m *Lease) Marshal() (dAtA []byte, err error) { } func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintLease(dAtA, i, uint64(m.ID)) } if m.TTL != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.TTL)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintLease(dAtA, i, uint64(m.TTL)) } - if m.ID != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 + if m.RemainingTTL != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLease(dAtA, i, uint64(m.RemainingTTL)) } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -221,38 +117,27 @@ func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) { } func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.LeaseTimeToLiveRequest != nil { - { - size, err := m.LeaseTimeToLiveRequest.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveRequest.Size())) + n1, err := m.LeaseTimeToLiveRequest.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } - return len(dAtA) - i, nil + return i, nil } func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -260,49 +145,33 @@ func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) { } func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } if m.LeaseTimeToLiveResponse != nil { - { - size, err := m.LeaseTimeToLiveResponse.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveResponse.Size())) + n2, err := m.LeaseTimeToLiveResponse.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } - return len(dAtA) - i, nil + return i, nil } func encodeVarintLease(dAtA []byte, offset int, v uint64) int { - offset -= sovLease(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ID != 0 { @@ -311,46 +180,41 @@ func (m *Lease) Size() (n int) { if m.TTL != 0 { n += 1 + sovLease(uint64(m.TTL)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.RemainingTTL != 0 { + n += 1 + sovLease(uint64(m.RemainingTTL)) } return n } func (m *LeaseInternalRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.LeaseTimeToLiveRequest != nil { l = m.LeaseTimeToLiveRequest.Size() n += 1 + l + sovLease(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *LeaseInternalResponse) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.LeaseTimeToLiveResponse != nil { l = m.LeaseTimeToLiveResponse.Size() n += 1 + l + sovLease(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovLease(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozLease(x uint64) (n int) { return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -370,7 +234,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -398,7 +262,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + m.ID |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -417,7 +281,26 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TTL |= int64(b&0x7F) << shift + m.TTL |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingTTL", wireType) + } + m.RemainingTTL = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RemainingTTL |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -431,13 +314,9 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthLease } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthLease - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -462,7 +341,7 @@ func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -490,7 +369,7 @@ func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -499,9 +378,6 @@ func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthLease } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -521,13 +397,9 @@ func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthLease } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthLease - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -552,7 +424,7 @@ func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -580,7 +452,7 @@ func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -589,9 +461,6 @@ func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error { return ErrInvalidLengthLease } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -611,13 +480,9 @@ func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthLease } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthLease - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -681,11 +546,8 @@ func skipLease(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthLease - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthLease } return iNdEx, nil @@ -716,9 +578,6 @@ func skipLease(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthLease - } } return iNdEx, nil case 4: @@ -737,3 +596,25 @@ var ( ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) } + +var fileDescriptorLease = []byte{ + // 253 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, + 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, + 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45, + 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92, + 0x21, 0xea, 0x94, 0x7c, 0xb9, 0x58, 0x7d, 0x40, 0x06, 0x09, 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48, + 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x31, 0x79, 0xba, 0x08, 0x09, 0x70, 0x31, 0x87, 0x84, 0xf8, + 0x48, 0x30, 0x81, 0x05, 0x40, 0x4c, 0x21, 0x25, 0x2e, 0x9e, 0xa0, 0xd4, 0xdc, 0xc4, 0xcc, 0xbc, + 0xcc, 0xbc, 0x74, 0x90, 0x14, 0x33, 0x58, 0x0a, 0x45, 0x4c, 0xa9, 0x84, 0x4b, 0x04, 0x6c, 0x9c, + 0x67, 0x5e, 0x49, 0x6a, 0x51, 0x5e, 0x62, 0x4e, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x50, + 0x0c, 0x97, 0x18, 0x58, 0x3c, 0x24, 0x33, 0x37, 0x35, 0x24, 0xdf, 0x27, 0xb3, 0x2c, 0x15, 0x2a, + 0x03, 0xb6, 0x91, 0xdb, 0x48, 0x45, 0x0f, 0xd9, 0x7d, 0x7a, 0xd8, 0xd5, 0x06, 0xe1, 0x30, 0x43, + 0xa9, 0x82, 0x4b, 0x14, 0xcd, 0xd6, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa1, 0x78, 0x2e, 0x71, + 0x0c, 0x2d, 0x10, 0x29, 0xa8, 0xbd, 0xaa, 0x04, 0xec, 0x85, 0x28, 0x0e, 0xc2, 0x65, 0x8a, 0x93, + 0xc4, 0x89, 0x87, 0x72, 0x0c, 0x17, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, + 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xd7, 0x18, + 0x10, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x9f, 0x8b, 0x6c, 0xb5, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto b/vendor/go.etcd.io/etcd/lease/leasepb/lease.proto similarity index 95% rename from vendor/github.com/coreos/etcd/lease/leasepb/lease.proto rename to vendor/go.etcd.io/etcd/lease/leasepb/lease.proto index be414b993..1169d9f10 100644 --- a/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto +++ b/vendor/go.etcd.io/etcd/lease/leasepb/lease.proto @@ -13,6 +13,7 @@ option (gogoproto.goproto_enum_prefix_all) = false; message Lease { int64 ID = 1; int64 TTL = 2; + int64 RemainingTTL = 3; } message LeaseInternalRequest { diff --git a/vendor/github.com/coreos/etcd/lease/lessor.go b/vendor/go.etcd.io/etcd/lease/lessor.go similarity index 63% rename from vendor/github.com/coreos/etcd/lease/lessor.go rename to vendor/go.etcd.io/etcd/lease/lessor.go index 43f050353..b4437bd46 100644 --- a/vendor/github.com/coreos/etcd/lease/lessor.go +++ b/vendor/go.etcd.io/etcd/lease/lessor.go @@ -15,6 +15,8 @@ package lease import ( + "container/heap" + "context" "encoding/binary" "errors" "math" @@ -22,8 +24,10 @@ import ( "sync" "time" - "github.com/coreos/etcd/lease/leasepb" - "github.com/coreos/etcd/mvcc/backend" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/lease/leasepb" + "go.etcd.io/etcd/mvcc/backend" + "go.uber.org/zap" ) // NoLease is a special LeaseID representing the absence of a lease. @@ -40,6 +44,18 @@ var ( // maximum number of leases to revoke per second; configurable for tests leaseRevokeRate = 1000 + // maximum number of lease checkpoints recorded to the consensus log per second; configurable for tests + leaseCheckpointRate = 1000 + + // the default interval of lease checkpoint + defaultLeaseCheckpointInterval = 5 * time.Minute + + // maximum number of lease checkpoints to batch into a single consensus log entry + maxLeaseCheckpointBatchSize = 1000 + + // the default interval to check if the expired lease is revoked + defaultExpiredleaseRetryInterval = 3 * time.Second + ErrNotPrimary = errors.New("not a primary lessor") ErrLeaseNotFound = errors.New("lease not found") ErrLeaseExists = errors.New("lease already exists") @@ -56,6 +72,10 @@ type TxnDelete interface { // RangeDeleter is a TxnDelete constructor. type RangeDeleter func() TxnDelete +// Checkpointer permits checkpointing of lease remaining TTLs to the consensus log. Defined here to +// avoid circular dependency with mvcc. +type Checkpointer func(ctx context.Context, lc *pb.LeaseCheckpointRequest) + type LeaseID int64 // Lessor owns leases. It can grant, revoke, renew and modify leases for lessee. @@ -65,6 +85,8 @@ type Lessor interface { // new TxnDeletes. SetRangeDeleter(rd RangeDeleter) + SetCheckpointer(cp Checkpointer) + // Grant grants a lease that expires at least after TTL seconds. Grant(id LeaseID, ttl int64) (*Lease, error) // Revoke revokes a lease with given ID. The item attached to the @@ -72,6 +94,10 @@ type Lessor interface { // will be returned. Revoke(id LeaseID) error + // Checkpoint applies the remainingTTL of a lease. The remainingTTL is used in Promote to set + // the expiry of leases to less than the full TTL when possible. + Checkpoint(id LeaseID, remainingTTL int64) error + // Attach attaches given leaseItem to the lease with given LeaseID. // If the lease does not exist, an error will be returned. Attach(id LeaseID, items []LeaseItem) error @@ -116,26 +142,25 @@ type Lessor interface { // lessor implements Lessor interface. // TODO: use clockwork for testability. type lessor struct { - mu sync.Mutex + mu sync.RWMutex // demotec is set when the lessor is the primary. // demotec will be closed if the lessor is demoted. demotec chan struct{} - // TODO: probably this should be a heap with a secondary - // id index. - // Now it is O(N) to loop over the leases to find expired ones. - // We want to make Grant, Revoke, and findExpiredLeases all O(logN) and - // Renew O(1). - // findExpiredLeases and Renew should be the most frequent operations. - leaseMap map[LeaseID]*Lease - - itemMap map[LeaseItem]LeaseID + leaseMap map[LeaseID]*Lease + leaseExpiredNotifier *LeaseExpiredNotifier + leaseCheckpointHeap LeaseQueue + itemMap map[LeaseItem]LeaseID // When a lease expires, the lessor will delete the // leased range (or key) by the RangeDeleter. rd RangeDeleter + // When a lease's deadline should be persisted to preserve the remaining TTL across leader + // elections and restarts, the lessor will checkpoint the lease by the Checkpointer. + cp Checkpointer + // backend to persist leases. We only persist lease ID and expiry for now. // The leased items can be recovered by iterating all the keys in kv. b backend.Backend @@ -149,22 +174,48 @@ type lessor struct { stopC chan struct{} // doneC is a channel whose closure indicates that the lessor is stopped. doneC chan struct{} + + lg *zap.Logger + + // Wait duration between lease checkpoints. + checkpointInterval time.Duration + // the interval to check if the expired lease is revoked + expiredLeaseRetryInterval time.Duration +} + +type LessorConfig struct { + MinLeaseTTL int64 + CheckpointInterval time.Duration + ExpiredLeasesRetryInterval time.Duration } -func NewLessor(b backend.Backend, minLeaseTTL int64) Lessor { - return newLessor(b, minLeaseTTL) +func NewLessor(lg *zap.Logger, b backend.Backend, cfg LessorConfig) Lessor { + return newLessor(lg, b, cfg) } -func newLessor(b backend.Backend, minLeaseTTL int64) *lessor { +func newLessor(lg *zap.Logger, b backend.Backend, cfg LessorConfig) *lessor { + checkpointInterval := cfg.CheckpointInterval + expiredLeaseRetryInterval := cfg.ExpiredLeasesRetryInterval + if checkpointInterval == 0 { + checkpointInterval = defaultLeaseCheckpointInterval + } + if expiredLeaseRetryInterval == 0 { + expiredLeaseRetryInterval = defaultExpiredleaseRetryInterval + } l := &lessor{ - leaseMap: make(map[LeaseID]*Lease), - itemMap: make(map[LeaseItem]LeaseID), - b: b, - minLeaseTTL: minLeaseTTL, + leaseMap: make(map[LeaseID]*Lease), + itemMap: make(map[LeaseItem]LeaseID), + leaseExpiredNotifier: newLeaseExpiredNotifier(), + leaseCheckpointHeap: make(LeaseQueue, 0), + b: b, + minLeaseTTL: cfg.MinLeaseTTL, + checkpointInterval: checkpointInterval, + expiredLeaseRetryInterval: expiredLeaseRetryInterval, // expiredC is a small buffered chan to avoid unnecessary blocking. expiredC: make(chan []*Lease, 16), stopC: make(chan struct{}), doneC: make(chan struct{}), + lg: lg, } l.initAndRecover() @@ -197,6 +248,13 @@ func (le *lessor) SetRangeDeleter(rd RangeDeleter) { le.rd = rd } +func (le *lessor) SetCheckpointer(cp Checkpointer) { + le.mu.Lock() + defer le.mu.Unlock() + + le.cp = cp +} + func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) { if id == NoLease { return nil, ErrLeaseNotFound @@ -233,8 +291,17 @@ func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) { } le.leaseMap[id] = l + item := &LeaseWithTime{id: l.ID, time: l.expiry.UnixNano()} + le.leaseExpiredNotifier.RegisterOrUpdate(item) l.persistTo(le.b) + leaseTotalTTLs.Observe(float64(l.ttl)) + leaseGranted.Inc() + + if le.isPrimary() { + le.scheduleCheckpointIfNeeded(l) + } + return l, nil } @@ -273,19 +340,33 @@ func (le *lessor) Revoke(id LeaseID) error { le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID))) txn.End() + + leaseRevoked.Inc() return nil } -// Renew renews an existing lease. If the given lease does not exist or -// has expired, an error will be returned. -func (le *lessor) Renew(id LeaseID) (int64, error) { +func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error { le.mu.Lock() + defer le.mu.Unlock() - unlock := func() { le.mu.Unlock() } - defer func() { unlock() }() + if l, ok := le.leaseMap[id]; ok { + // when checkpointing, we only update the remainingTTL, Promote is responsible for applying this to lease expiry + l.remainingTTL = remainingTTL + if le.isPrimary() { + // schedule the next checkpoint as needed + le.scheduleCheckpointIfNeeded(l) + } + } + return nil +} +// Renew renews an existing lease. If the given lease does not exist or +// has expired, an error will be returned. +func (le *lessor) Renew(id LeaseID) (int64, error) { + le.mu.RLock() if !le.isPrimary() { // forward renew request to primary instead of returning error. + le.mu.RUnlock() return -1, ErrNotPrimary } @@ -293,12 +374,14 @@ func (le *lessor) Renew(id LeaseID) (int64, error) { l := le.leaseMap[id] if l == nil { + le.mu.RUnlock() return -1, ErrLeaseNotFound } + // Clear remaining TTL when we renew if it is set + clearRemainingTTL := le.cp != nil && l.remainingTTL > 0 + le.mu.RUnlock() if l.expired() { - le.mu.Unlock() - unlock = func() {} select { // A expired lease might be pending for revoking or going through // quorum to be revoked. To be accurate, renew request must wait for the @@ -314,13 +397,26 @@ func (le *lessor) Renew(id LeaseID) (int64, error) { } } + // Clear remaining TTL when we renew if it is set + // By applying a RAFT entry only when the remainingTTL is already set, we limit the number + // of RAFT entries written per lease to a max of 2 per checkpoint interval. + if clearRemainingTTL { + le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: []*pb.LeaseCheckpoint{{ID: int64(l.ID), Remaining_TTL: 0}}}) + } + + le.mu.Lock() l.refresh(0) + item := &LeaseWithTime{id: l.ID, time: l.expiry.UnixNano()} + le.leaseExpiredNotifier.RegisterOrUpdate(item) + le.mu.Unlock() + + leaseRenewed.Inc() return l.ttl, nil } func (le *lessor) Lookup(id LeaseID) *Lease { - le.mu.Lock() - defer le.mu.Unlock() + le.mu.RLock() + defer le.mu.RUnlock() return le.leaseMap[id] } @@ -329,14 +425,14 @@ func (le *lessor) unsafeLeases() []*Lease { for _, l := range le.leaseMap { leases = append(leases, l) } - sort.Sort(leasesByExpiry(leases)) return leases } func (le *lessor) Leases() []*Lease { - le.mu.Lock() + le.mu.RLock() ls := le.unsafeLeases() - le.mu.Unlock() + le.mu.RUnlock() + sort.Sort(leasesByExpiry(ls)) return ls } @@ -349,6 +445,8 @@ func (le *lessor) Promote(extend time.Duration) { // refresh the expiries of all leases. for _, l := range le.leaseMap { l.refresh(extend) + item := &LeaseWithTime{id: l.ID, time: l.expiry.UnixNano()} + le.leaseExpiredNotifier.RegisterOrUpdate(item) } if len(le.leaseMap) < leaseRevokeRate { @@ -358,6 +456,7 @@ func (le *lessor) Promote(extend time.Duration) { // adjust expiries in case of overlap leases := le.unsafeLeases() + sort.Sort(leasesByExpiry(leases)) baseWindow := leases[0].Remaining() nextWindow := baseWindow + time.Second @@ -384,6 +483,9 @@ func (le *lessor) Promote(extend time.Duration) { delay := time.Duration(rateDelay) nextWindow = baseWindow + delay l.refresh(delay + extend) + item := &LeaseWithTime{id: l.ID, time: l.expiry.UnixNano()} + le.leaseExpiredNotifier.RegisterOrUpdate(item) + le.scheduleCheckpointIfNeeded(l) } } @@ -402,6 +504,8 @@ func (le *lessor) Demote() { l.forever() } + le.clearScheduledLeasesCheckpoints() + if le.demotec != nil { close(le.demotec) le.demotec = nil @@ -430,9 +534,9 @@ func (le *lessor) Attach(id LeaseID, items []LeaseItem) error { } func (le *lessor) GetLease(item LeaseItem) LeaseID { - le.mu.Lock() + le.mu.RLock() id := le.itemMap[item] - le.mu.Unlock() + le.mu.RUnlock() return id } @@ -480,45 +584,116 @@ func (le *lessor) runLoop() { defer close(le.doneC) for { - var ls []*Lease + le.revokeExpiredLeases() + le.checkpointScheduledLeases() - // rate limit - revokeLimit := leaseRevokeRate / 2 + select { + case <-time.After(500 * time.Millisecond): + case <-le.stopC: + return + } + } +} + +// revokeExpiredLeases finds all leases past their expiry and sends them to epxired channel for +// to be revoked. +func (le *lessor) revokeExpiredLeases() { + var ls []*Lease + // rate limit + revokeLimit := leaseRevokeRate / 2 + + le.mu.RLock() + if le.isPrimary() { + ls = le.findExpiredLeases(revokeLimit) + } + le.mu.RUnlock() + + if len(ls) != 0 { + select { + case <-le.stopC: + return + case le.expiredC <- ls: + default: + // the receiver of expiredC is probably busy handling + // other stuff + // let's try this next time after 500ms + } + } +} + +// checkpointScheduledLeases finds all scheduled lease checkpoints that are due and +// submits them to the checkpointer to persist them to the consensus log. +func (le *lessor) checkpointScheduledLeases() { + var cps []*pb.LeaseCheckpoint + + // rate limit + for i := 0; i < leaseCheckpointRate/2; i++ { le.mu.Lock() if le.isPrimary() { - ls = le.findExpiredLeases(revokeLimit) + cps = le.findDueScheduledCheckpoints(maxLeaseCheckpointBatchSize) } le.mu.Unlock() - if len(ls) != 0 { - select { - case <-le.stopC: - return - case le.expiredC <- ls: - default: - // the receiver of expiredC is probably busy handling - // other stuff - // let's try this next time after 500ms - } + if len(cps) != 0 { + le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: cps}) } - - select { - case <-time.After(500 * time.Millisecond): - case <-le.stopC: + if len(cps) < maxLeaseCheckpointBatchSize { return } } } +func (le *lessor) clearScheduledLeasesCheckpoints() { + le.leaseCheckpointHeap = make(LeaseQueue, 0) +} + +// expireExists returns true if expiry items exist. +// It pops only when expiry item exists. +// "next" is true, to indicate that it may exist in next attempt. +func (le *lessor) expireExists() (l *Lease, ok bool, next bool) { + if le.leaseExpiredNotifier.Len() == 0 { + return nil, false, false + } + + item := le.leaseExpiredNotifier.Poll() + l = le.leaseMap[item.id] + if l == nil { + // lease has expired or been revoked + // no need to revoke (nothing is expiry) + le.leaseExpiredNotifier.Unregister() // O(log N) + return nil, false, true + } + now := time.Now() + if now.UnixNano() < item.time /* expiration time */ { + // Candidate expirations are caught up, reinsert this item + // and no need to revoke (nothing is expiry) + return l, false, false + } + + // recheck if revoke is complete after retry interval + item.time = now.Add(le.expiredLeaseRetryInterval).UnixNano() + le.leaseExpiredNotifier.RegisterOrUpdate(item) + return l, true, false +} + // findExpiredLeases loops leases in the leaseMap until reaching expired limit // and returns the expired leases that needed to be revoked. func (le *lessor) findExpiredLeases(limit int) []*Lease { leases := make([]*Lease, 0, 16) - for _, l := range le.leaseMap { - // TODO: probably should change to <= 100-500 millisecond to - // make up committing latency. + for { + l, ok, next := le.expireExists() + if !ok && !next { + break + } + if !ok { + continue + } + if next { + continue + } + if l.expired() { leases = append(leases, l) @@ -532,6 +707,61 @@ func (le *lessor) findExpiredLeases(limit int) []*Lease { return leases } +func (le *lessor) scheduleCheckpointIfNeeded(lease *Lease) { + if le.cp == nil { + return + } + + if lease.RemainingTTL() > int64(le.checkpointInterval.Seconds()) { + if le.lg != nil { + le.lg.Debug("Scheduling lease checkpoint", + zap.Int64("leaseID", int64(lease.ID)), + zap.Duration("intervalSeconds", le.checkpointInterval), + ) + } + heap.Push(&le.leaseCheckpointHeap, &LeaseWithTime{ + id: lease.ID, + time: time.Now().Add(le.checkpointInterval).UnixNano(), + }) + } +} + +func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCheckpoint { + if le.cp == nil { + return nil + } + + now := time.Now() + cps := []*pb.LeaseCheckpoint{} + for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit { + lt := le.leaseCheckpointHeap[0] + if lt.time /* next checkpoint time */ > now.UnixNano() { + return cps + } + heap.Pop(&le.leaseCheckpointHeap) + var l *Lease + var ok bool + if l, ok = le.leaseMap[lt.id]; !ok { + continue + } + if !now.Before(l.expiry) { + continue + } + remainingTTL := int64(math.Ceil(l.expiry.Sub(now).Seconds())) + if remainingTTL >= l.ttl { + continue + } + if le.lg != nil { + le.lg.Debug("Checkpointing lease", + zap.Int64("leaseID", int64(lt.id)), + zap.Int64("remainingTTL", remainingTTL), + ) + } + cps = append(cps, &pb.LeaseCheckpoint{ID: int64(lt.id), Remaining_TTL: remainingTTL}) + } + return cps +} + func (le *lessor) initAndRecover() { tx := le.b.BatchTx() tx.Lock() @@ -560,14 +790,17 @@ func (le *lessor) initAndRecover() { revokec: make(chan struct{}), } } + le.leaseExpiredNotifier.Init() + heap.Init(&le.leaseCheckpointHeap) tx.Unlock() le.b.ForceCommit() } type Lease struct { - ID LeaseID - ttl int64 // time to live in seconds + ID LeaseID + ttl int64 // time to live of the lease in seconds + remainingTTL int64 // remaining time to live in seconds, if zero valued it is considered unset and the full ttl should be used // expiryMu protects concurrent accesses to expiry expiryMu sync.RWMutex // expiry is time when lease should expire. no expiration when expiry.IsZero() is true @@ -586,7 +819,7 @@ func (l *Lease) expired() bool { func (l *Lease) persistTo(b backend.Backend) { key := int64ToBytes(int64(l.ID)) - lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.ttl)} + lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL} val, err := lpb.Marshal() if err != nil { panic("failed to marshal lease proto item") @@ -602,9 +835,18 @@ func (l *Lease) TTL() int64 { return l.ttl } +// RemainingTTL returns the last checkpointed remaining TTL of the lease. +// TODO(jpbetz): do not expose this utility method +func (l *Lease) RemainingTTL() int64 { + if l.remainingTTL > 0 { + return l.remainingTTL + } + return l.ttl +} + // refresh refreshes the expiry of the lease. func (l *Lease) refresh(extend time.Duration) { - newExpiry := time.Now().Add(extend + time.Duration(l.ttl)*time.Second) + newExpiry := time.Now().Add(extend + time.Duration(l.RemainingTTL())*time.Second) l.expiryMu.Lock() defer l.expiryMu.Unlock() l.expiry = newExpiry @@ -654,10 +896,14 @@ type FakeLessor struct{} func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {} +func (fl *FakeLessor) SetCheckpointer(cp Checkpointer) {} + func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil } func (fl *FakeLessor) Revoke(id LeaseID) error { return nil } +func (fl *FakeLessor) Checkpoint(id LeaseID, remainingTTL int64) error { return nil } + func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil } func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 } @@ -678,3 +924,10 @@ func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil } func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {} func (fl *FakeLessor) Stop() {} + +type FakeTxnDelete struct { + backend.BatchTx +} + +func (ftd *FakeTxnDelete) DeleteRange(key, end []byte) (n, rev int64) { return 0, 0 } +func (ftd *FakeTxnDelete) End() { ftd.Unlock() } diff --git a/vendor/go.etcd.io/etcd/lease/metrics.go b/vendor/go.etcd.io/etcd/lease/metrics.go new file mode 100644 index 000000000..06f8b5801 --- /dev/null +++ b/vendor/go.etcd.io/etcd/lease/metrics.go @@ -0,0 +1,59 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lease + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +var ( + leaseGranted = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "lease", + Name: "granted_total", + Help: "The total number of granted leases.", + }) + + leaseRevoked = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "lease", + Name: "revoked_total", + Help: "The total number of revoked leases.", + }) + + leaseRenewed = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "lease", + Name: "renewed_total", + Help: "The number of renewed leases seen by the leader.", + }) + + leaseTotalTTLs = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "lease", + Name: "ttl_total", + Help: "Bucketed histogram of lease TTLs.", + // 1 second -> 3 months + Buckets: prometheus.ExponentialBuckets(1, 2, 24), + }) +) + +func init() { + prometheus.MustRegister(leaseGranted) + prometheus.MustRegister(leaseRevoked) + prometheus.MustRegister(leaseRenewed) + prometheus.MustRegister(leaseTotalTTLs) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/go.etcd.io/etcd/mvcc/backend/backend.go similarity index 65% rename from vendor/github.com/coreos/etcd/mvcc/backend/backend.go rename to vendor/go.etcd.io/etcd/mvcc/backend/backend.go index 2229d9ce1..bffd74950 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go +++ b/vendor/go.etcd.io/etcd/mvcc/backend/backend.go @@ -25,8 +25,10 @@ import ( "sync/atomic" "time" - bolt "github.com/coreos/bbolt" "github.com/coreos/pkg/capnslog" + humanize "github.com/dustin/go-humanize" + bolt "go.etcd.io/bbolt" + "go.uber.org/zap" ) var ( @@ -40,24 +42,32 @@ var ( // This only works for linux. initialMmapSize = uint64(10 * 1024 * 1024 * 1024) - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "mvcc/backend") // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning. - minSnapshotWarningTimeout = time.Duration(30 * time.Second) + minSnapshotWarningTimeout = 30 * time.Second ) type Backend interface { + // ReadTx returns a read transaction. It is replaced by ConcurrentReadTx in the main data path, see #10523. ReadTx() ReadTx BatchTx() BatchTx + // ConcurrentReadTx returns a non-blocking read transaction. + ConcurrentReadTx() ReadTx Snapshot() Snapshot Hash(ignores map[IgnoreKey]struct{}) (uint32, error) - // Size returns the current size of the backend. + // Size returns the current size of the backend physically allocated. + // The backend can hold DB space that is not utilized at the moment, + // since it can conduct pre-allocation or spare unused space for recycling. + // Use SizeInUse() instead for the actual DB size. Size() int64 // SizeInUse returns the current size of the backend logically in use. // Since the backend can manage free space in a non-byte unit such as // number of pages, the returned value can be not exactly accurate in bytes. SizeInUse() int64 + // OpenReadTxN returns the number of currently open read transactions in the backend. + OpenReadTxN() int64 Defrag() error ForceCommit() Close() error @@ -76,14 +86,14 @@ type backend struct { // size and commits are used with atomic operations so they must be // 64-bit aligned, otherwise 32-bit tests will crash - // size is the number of bytes in the backend + // size is the number of bytes allocated in the backend size int64 - // sizeInUse is the number of bytes actually used in the backend sizeInUse int64 - // commits counts number of commits since start commits int64 + // openReadTxN is the number of currently open read transactions in the backend + openReadTxN int64 mu sync.RWMutex db *bolt.DB @@ -96,6 +106,8 @@ type backend struct { stopc chan struct{} donec chan struct{} + + lg *zap.Logger } type BackendConfig struct { @@ -105,8 +117,12 @@ type BackendConfig struct { BatchInterval time.Duration // BatchLimit is the maximum puts before flushing the BatchTx. BatchLimit int + // BackendFreelistType is the backend boltdb's freelist type. + BackendFreelistType bolt.FreelistType // MmapSize is the number of bytes to mmap for the backend. MmapSize uint64 + // Logger logs backend-side operations. + Logger *zap.Logger } func DefaultBackendConfig() BackendConfig { @@ -133,10 +149,15 @@ func newBackend(bcfg BackendConfig) *backend { *bopts = *boltOpenOptions } bopts.InitialMmapSize = bcfg.mmapSize() + bopts.FreelistType = bcfg.BackendFreelistType db, err := bolt.Open(bcfg.Path, 0600, bopts) if err != nil { - plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err) + if bcfg.Logger != nil { + bcfg.Logger.Panic("failed to open database", zap.String("path", bcfg.Path), zap.Error(err)) + } else { + plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err) + } } // In future, may want to make buffering optional for low-concurrency systems @@ -152,10 +173,13 @@ func newBackend(bcfg BackendConfig) *backend { txBuffer: txBuffer{make(map[string]*bucketBuffer)}, }, buckets: make(map[string]*bolt.Bucket), + txWg: new(sync.WaitGroup), }, stopc: make(chan struct{}), donec: make(chan struct{}), + + lg: bcfg.Logger, } b.batchTx = newBatchTxBuffered(b) go b.run() @@ -171,6 +195,24 @@ func (b *backend) BatchTx() BatchTx { func (b *backend) ReadTx() ReadTx { return b.readTx } +// ConcurrentReadTx creates and returns a new ReadTx, which: +// A) creates and keeps a copy of backend.readTx.txReadBuffer, +// B) references the boltdb read Tx (and its bucket cache) of current batch interval. +func (b *backend) ConcurrentReadTx() ReadTx { + b.readTx.RLock() + defer b.readTx.RUnlock() + // prevent boltdb read Tx from been rolled back until store read Tx is done. Needs to be called when holding readTx.RLock(). + b.readTx.txWg.Add(1) + // TODO: might want to copy the read buffer lazily - create copy when A) end of a write transaction B) end of a batch interval. + return &concurrentReadTx{ + buf: b.readTx.buf.unsafeCopy(), + tx: b.readTx.tx, + txMu: &b.readTx.txMu, + buckets: b.readTx.buckets, + txWg: b.readTx.txWg, + } +} + // ForceCommit forces the current batching tx to commit. func (b *backend) ForceCommit() { b.batchTx.Commit() @@ -183,7 +225,11 @@ func (b *backend) Snapshot() Snapshot { defer b.mu.RUnlock() tx, err := b.db.Begin(false) if err != nil { - plog.Fatalf("cannot begin tx (%s)", err) + if b.lg != nil { + b.lg.Fatal("failed to begin tx", zap.Error(err)) + } else { + plog.Fatalf("cannot begin tx (%s)", err) + } } stopc, donec := make(chan struct{}), make(chan struct{}) @@ -203,9 +249,19 @@ func (b *backend) Snapshot() Snapshot { for { select { case <-ticker.C: - plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start) + if b.lg != nil { + b.lg.Warn( + "snapshotting taking too long to transfer", + zap.Duration("taking", time.Since(start)), + zap.Int64("bytes", dbBytes), + zap.String("size", humanize.Bytes(uint64(dbBytes))), + ) + } else { + plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start) + } + case <-stopc: - snapshotDurations.Observe(time.Since(start).Seconds()) + snapshotTransferSec.Observe(time.Since(start).Seconds()) return } } @@ -270,7 +326,9 @@ func (b *backend) run() { b.batchTx.CommitAndStop() return } - b.batchTx.Commit() + if b.batchTx.safePending() != 0 { + b.batchTx.Commit() + } t.Reset(b.batchInterval) } } @@ -304,79 +362,99 @@ func (b *backend) defrag() error { defer b.mu.Unlock() // block concurrent read requests while resetting tx - b.readTx.mu.Lock() - defer b.readTx.mu.Unlock() + b.readTx.Lock() + defer b.readTx.Unlock() b.batchTx.unsafeCommit(true) + b.batchTx.tx = nil - // Create a temporary file to ensure we start with a clean slate. - // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup. - dir := filepath.Dir(b.db.Path()) - temp, err := ioutil.TempFile(dir, "db.tmp.*") + tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions) if err != nil { return err } - options := bolt.Options{} - if boltOpenOptions != nil { - options = *boltOpenOptions - } - options.OpenFile = func(path string, i int, mode os.FileMode) (file *os.File, err error) { - return temp, nil - } - tdbp := temp.Name() - tmpdb, err := bolt.Open(tdbp, 0600, &options) - if err != nil { - return err + + dbp := b.db.Path() + tdbp := tmpdb.Path() + size1, sizeInUse1 := b.Size(), b.SizeInUse() + if b.lg != nil { + b.lg.Info( + "defragmenting", + zap.String("path", dbp), + zap.Int64("current-db-size-bytes", size1), + zap.String("current-db-size", humanize.Bytes(uint64(size1))), + zap.Int64("current-db-size-in-use-bytes", sizeInUse1), + zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))), + ) } - // gofail: var defragBeforeCopy struct{} err = defragdb(b.db, tmpdb, defragLimit) - if err != nil { tmpdb.Close() - if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil { - plog.Fatalf("failed to remove db.tmp after defragmentation completed: %v", rmErr) - } + os.RemoveAll(tmpdb.Path()) return err } - dbp := b.db.Path() - err = b.db.Close() if err != nil { - plog.Fatalf("cannot close database (%s)", err) + if b.lg != nil { + b.lg.Fatal("failed to close database", zap.Error(err)) + } else { + plog.Fatalf("cannot close database (%s)", err) + } } err = tmpdb.Close() if err != nil { - plog.Fatalf("cannot close database (%s)", err) + if b.lg != nil { + b.lg.Fatal("failed to close tmp database", zap.Error(err)) + } else { + plog.Fatalf("cannot close database (%s)", err) + } } - // gofail: var defragBeforeRename struct{} err = os.Rename(tdbp, dbp) if err != nil { - plog.Fatalf("cannot rename database (%s)", err) + if b.lg != nil { + b.lg.Fatal("failed to rename tmp database", zap.Error(err)) + } else { + plog.Fatalf("cannot rename database (%s)", err) + } } b.db, err = bolt.Open(dbp, 0600, boltOpenOptions) if err != nil { - plog.Panicf("cannot open database at %s (%v)", dbp, err) - } - b.batchTx.tx, err = b.db.Begin(true) - if err != nil { - plog.Fatalf("cannot begin tx (%s)", err) + if b.lg != nil { + b.lg.Fatal("failed to open database", zap.String("path", dbp), zap.Error(err)) + } else { + plog.Panicf("cannot open database at %s (%v)", dbp, err) + } } + b.batchTx.tx = b.unsafeBegin(true) b.readTx.reset() b.readTx.tx = b.unsafeBegin(false) size := b.readTx.tx.Size() - db := b.db + db := b.readTx.tx.DB() atomic.StoreInt64(&b.size, size) atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize))) took := time.Since(now) - defragDurations.Observe(took.Seconds()) - + defragSec.Observe(took.Seconds()) + + size2, sizeInUse2 := b.Size(), b.SizeInUse() + if b.lg != nil { + b.lg.Info( + "defragmented", + zap.String("path", dbp), + zap.Int64("current-db-size-bytes-diff", size2-size1), + zap.Int64("current-db-size-bytes", size2), + zap.String("current-db-size", humanize.Bytes(uint64(size2))), + zap.Int64("current-db-size-in-use-bytes-diff", sizeInUse2-sizeInUse1), + zap.Int64("current-db-size-in-use-bytes", sizeInUse2), + zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse2))), + zap.Duration("took", took), + ) + } return nil } @@ -439,8 +517,10 @@ func (b *backend) begin(write bool) *bolt.Tx { size := tx.Size() db := tx.DB() + stats := db.Stats() atomic.StoreInt64(&b.size, size) - atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize))) + atomic.StoreInt64(&b.sizeInUse, size-(int64(stats.FreePageN)*int64(db.Info().PageSize))) + atomic.StoreInt64(&b.openReadTxN, int64(stats.OpenTxN)) return tx } @@ -448,16 +528,24 @@ func (b *backend) begin(write bool) *bolt.Tx { func (b *backend) unsafeBegin(write bool) *bolt.Tx { tx, err := b.db.Begin(write) if err != nil { - plog.Fatalf("cannot begin tx (%s)", err) + if b.lg != nil { + b.lg.Fatal("failed to begin tx", zap.Error(err)) + } else { + plog.Fatalf("cannot begin tx (%s)", err) + } } return tx } +func (b *backend) OpenReadTxN() int64 { + return atomic.LoadInt64(&b.openReadTxN) +} + // NewTmpBackend creates a backend implementation for testing. func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) { dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test") if err != nil { - plog.Fatal(err) + panic(err) } tmpPath := filepath.Join(dir, "database") bcfg := DefaultBackendConfig() diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/go.etcd.io/etcd/mvcc/backend/batch_tx.go similarity index 67% rename from vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go rename to vendor/go.etcd.io/etcd/mvcc/backend/batch_tx.go index aed6893e4..d5c8a88c3 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go +++ b/vendor/go.etcd.io/etcd/mvcc/backend/batch_tx.go @@ -21,7 +21,8 @@ import ( "sync/atomic" "time" - bolt "github.com/coreos/bbolt" + bolt "go.etcd.io/bbolt" + "go.uber.org/zap" ) type BatchTx interface { @@ -44,10 +45,41 @@ type batchTx struct { pending int } +func (t *batchTx) Lock() { + t.Mutex.Lock() +} + +func (t *batchTx) Unlock() { + if t.pending >= t.backend.batchLimit { + t.commit(false) + } + t.Mutex.Unlock() +} + +// BatchTx interface embeds ReadTx interface. But RLock() and RUnlock() do not +// have appropriate semantics in BatchTx interface. Therefore should not be called. +// TODO: might want to decouple ReadTx and BatchTx + +func (t *batchTx) RLock() { + panic("unexpected RLock") +} + +func (t *batchTx) RUnlock() { + panic("unexpected RUnlock") +} + func (t *batchTx) UnsafeCreateBucket(name []byte) { _, err := t.tx.CreateBucket(name) if err != nil && err != bolt.ErrBucketExists { - plog.Fatalf("cannot create bucket %s (%v)", name, err) + if t.backend.lg != nil { + t.backend.lg.Fatal( + "failed to create a bucket", + zap.String("bucket-name", string(name)), + zap.Error(err), + ) + } else { + plog.Fatalf("cannot create bucket %s (%v)", name, err) + } } t.pending++ } @@ -65,7 +97,14 @@ func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) { func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) { bucket := t.tx.Bucket(bucketName) if bucket == nil { - plog.Fatalf("bucket %s does not exist", bucketName) + if t.backend.lg != nil { + t.backend.lg.Fatal( + "failed to find a bucket", + zap.String("bucket-name", string(bucketName)), + ) + } else { + plog.Fatalf("bucket %s does not exist", bucketName) + } } if seq { // it is useful to increase fill percent when the workloads are mostly append-only. @@ -73,7 +112,15 @@ func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq boo bucket.FillPercent = 0.9 } if err := bucket.Put(key, value); err != nil { - plog.Fatalf("cannot put key into bucket (%v)", err) + if t.backend.lg != nil { + t.backend.lg.Fatal( + "failed to write to a bucket", + zap.String("bucket-name", string(bucketName)), + zap.Error(err), + ) + } else { + plog.Fatalf("cannot put key into bucket (%v)", err) + } } t.pending++ } @@ -82,7 +129,14 @@ func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq boo func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { bucket := t.tx.Bucket(bucketName) if bucket == nil { - plog.Fatalf("bucket %s does not exist", bucketName) + if t.backend.lg != nil { + t.backend.lg.Fatal( + "failed to find a bucket", + zap.String("bucket-name", string(bucketName)), + ) + } else { + plog.Fatalf("bucket %s does not exist", bucketName) + } } return unsafeRange(bucket.Cursor(), key, endKey, limit) } @@ -98,6 +152,7 @@ func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte isMatch = func(b []byte) bool { return bytes.Equal(b, key) } limit = 1 } + for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() { vs = append(vs, cv) keys = append(keys, ck) @@ -112,11 +167,26 @@ func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) { bucket := t.tx.Bucket(bucketName) if bucket == nil { - plog.Fatalf("bucket %s does not exist", bucketName) + if t.backend.lg != nil { + t.backend.lg.Fatal( + "failed to find a bucket", + zap.String("bucket-name", string(bucketName)), + ) + } else { + plog.Fatalf("bucket %s does not exist", bucketName) + } } err := bucket.Delete(key) if err != nil { - plog.Fatalf("cannot delete key from bucket (%v)", err) + if t.backend.lg != nil { + t.backend.lg.Fatal( + "failed to delete a key", + zap.String("bucket-name", string(bucketName)), + zap.Error(err), + ) + } else { + plog.Fatalf("cannot delete key from bucket (%v)", err) + } } t.pending++ } @@ -147,11 +217,10 @@ func (t *batchTx) CommitAndStop() { t.Unlock() } -func (t *batchTx) Unlock() { - if t.pending >= t.backend.batchLimit { - t.commit(false) - } - t.Mutex.Unlock() +func (t *batchTx) safePending() int { + t.Mutex.Lock() + defer t.Mutex.Unlock() + return t.pending } func (t *batchTx) commit(stop bool) { @@ -167,12 +236,19 @@ func (t *batchTx) commit(stop bool) { err := t.tx.Commit() // gofail: var afterCommit struct{} - commitDurations.Observe(time.Since(start).Seconds()) + rebalanceSec.Observe(t.tx.Stats().RebalanceTime.Seconds()) + spillSec.Observe(t.tx.Stats().SpillTime.Seconds()) + writeSec.Observe(t.tx.Stats().WriteTime.Seconds()) + commitSec.Observe(time.Since(start).Seconds()) atomic.AddInt64(&t.backend.commits, 1) t.pending = 0 if err != nil { - plog.Fatalf("cannot commit tx (%s)", err) + if t.backend.lg != nil { + t.backend.lg.Fatal("failed to commit tx", zap.Error(err)) + } else { + plog.Fatalf("cannot commit tx (%s)", err) + } } } if !stop { @@ -199,9 +275,9 @@ func newBatchTxBuffered(backend *backend) *batchTxBuffered { func (t *batchTxBuffered) Unlock() { if t.pending != 0 { - t.backend.readTx.mu.Lock() + t.backend.readTx.Lock() // blocks txReadBuffer for writing. t.buf.writeback(&t.backend.readTx.buf) - t.backend.readTx.mu.Unlock() + t.backend.readTx.Unlock() if t.pending >= t.backend.batchLimit { t.commit(false) } @@ -223,16 +299,25 @@ func (t *batchTxBuffered) CommitAndStop() { func (t *batchTxBuffered) commit(stop bool) { // all read txs must be closed to acquire boltdb commit rwlock - t.backend.readTx.mu.Lock() + t.backend.readTx.Lock() t.unsafeCommit(stop) - t.backend.readTx.mu.Unlock() + t.backend.readTx.Unlock() } func (t *batchTxBuffered) unsafeCommit(stop bool) { if t.backend.readTx.tx != nil { - if err := t.backend.readTx.tx.Rollback(); err != nil { - plog.Fatalf("cannot rollback tx (%s)", err) - } + // wait all store read transactions using the current boltdb tx to finish, + // then close the boltdb tx + go func(tx *bolt.Tx, wg *sync.WaitGroup) { + wg.Wait() + if err := tx.Rollback(); err != nil { + if t.backend.lg != nil { + t.backend.lg.Fatal("failed to rollback tx", zap.Error(err)) + } else { + plog.Fatalf("cannot rollback tx (%s)", err) + } + } + }(t.backend.readTx.tx, t.backend.readTx.txWg) t.backend.readTx.reset() } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go b/vendor/go.etcd.io/etcd/mvcc/backend/config_default.go similarity index 90% rename from vendor/github.com/coreos/etcd/mvcc/backend/config_default.go rename to vendor/go.etcd.io/etcd/mvcc/backend/config_default.go index edfed0025..f15f030f8 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go +++ b/vendor/go.etcd.io/etcd/mvcc/backend/config_default.go @@ -16,8 +16,8 @@ package backend -import bolt "github.com/coreos/bbolt" +import bolt "go.etcd.io/bbolt" -var boltOpenOptions *bolt.Options = nil +var boltOpenOptions *bolt.Options func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go b/vendor/go.etcd.io/etcd/mvcc/backend/config_linux.go similarity index 97% rename from vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go rename to vendor/go.etcd.io/etcd/mvcc/backend/config_linux.go index b01785f3b..f712671af 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go +++ b/vendor/go.etcd.io/etcd/mvcc/backend/config_linux.go @@ -17,7 +17,7 @@ package backend import ( "syscall" - bolt "github.com/coreos/bbolt" + bolt "go.etcd.io/bbolt" ) // syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go b/vendor/go.etcd.io/etcd/mvcc/backend/config_windows.go similarity index 95% rename from vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go rename to vendor/go.etcd.io/etcd/mvcc/backend/config_windows.go index 71d02700b..c6500592c 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go +++ b/vendor/go.etcd.io/etcd/mvcc/backend/config_windows.go @@ -16,7 +16,7 @@ package backend -import bolt "github.com/coreos/bbolt" +import bolt "go.etcd.io/bbolt" var boltOpenOptions *bolt.Options = nil diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/doc.go b/vendor/go.etcd.io/etcd/mvcc/backend/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/mvcc/backend/doc.go rename to vendor/go.etcd.io/etcd/mvcc/backend/doc.go diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go b/vendor/go.etcd.io/etcd/mvcc/backend/metrics.go similarity index 50% rename from vendor/github.com/coreos/etcd/mvcc/backend/metrics.go rename to vendor/go.etcd.io/etcd/mvcc/backend/metrics.go index 341570804..d9641af7a 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go +++ b/vendor/go.etcd.io/etcd/mvcc/backend/metrics.go @@ -17,7 +17,7 @@ package backend import "github.com/prometheus/client_golang/prometheus" var ( - commitDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + commitSec = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "etcd", Subsystem: "disk", Name: "backend_commit_duration_seconds", @@ -28,7 +28,40 @@ var ( Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), }) - defragDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + rebalanceSec = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "disk", + Name: "backend_commit_rebalance_duration_seconds", + Help: "The latency distributions of commit.rebalance called by bboltdb backend.", + + // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 + // highest bucket start of 0.001 sec * 2^13 == 8.192 sec + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) + + spillSec = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "disk", + Name: "backend_commit_spill_duration_seconds", + Help: "The latency distributions of commit.spill called by bboltdb backend.", + + // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 + // highest bucket start of 0.001 sec * 2^13 == 8.192 sec + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) + + writeSec = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd_debugging", + Subsystem: "disk", + Name: "backend_commit_write_duration_seconds", + Help: "The latency distributions of commit.write called by bboltdb backend.", + + // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 + // highest bucket start of 0.001 sec * 2^13 == 8.192 sec + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), + }) + + defragSec = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "etcd", Subsystem: "disk", Name: "backend_defrag_duration_seconds", @@ -40,7 +73,7 @@ var ( Buckets: prometheus.ExponentialBuckets(.1, 2, 13), }) - snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + snapshotTransferSec = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "etcd", Subsystem: "disk", Name: "backend_snapshot_duration_seconds", @@ -53,7 +86,10 @@ var ( ) func init() { - prometheus.MustRegister(commitDurations) - prometheus.MustRegister(defragDurations) - prometheus.MustRegister(snapshotDurations) + prometheus.MustRegister(commitSec) + prometheus.MustRegister(rebalanceSec) + prometheus.MustRegister(spillSec) + prometheus.MustRegister(writeSec) + prometheus.MustRegister(defragSec) + prometheus.MustRegister(snapshotTransferSec) } diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/read_tx.go b/vendor/go.etcd.io/etcd/mvcc/backend/read_tx.go new file mode 100644 index 000000000..91fe72ec5 --- /dev/null +++ b/vendor/go.etcd.io/etcd/mvcc/backend/read_tx.go @@ -0,0 +1,210 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "math" + "sync" + + bolt "go.etcd.io/bbolt" +) + +// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys; +// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket +// is known to never overwrite any key so range is safe. +var safeRangeBucket = []byte("key") + +type ReadTx interface { + Lock() + Unlock() + RLock() + RUnlock() + + UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) + UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error +} + +type readTx struct { + // mu protects accesses to the txReadBuffer + mu sync.RWMutex + buf txReadBuffer + + // TODO: group and encapsulate {txMu, tx, buckets, txWg}, as they share the same lifecycle. + // txMu protects accesses to buckets and tx on Range requests. + txMu sync.RWMutex + tx *bolt.Tx + buckets map[string]*bolt.Bucket + // txWg protects tx from being rolled back at the end of a batch interval until all reads using this tx are done. + txWg *sync.WaitGroup +} + +func (rt *readTx) Lock() { rt.mu.Lock() } +func (rt *readTx) Unlock() { rt.mu.Unlock() } +func (rt *readTx) RLock() { rt.mu.RLock() } +func (rt *readTx) RUnlock() { rt.mu.RUnlock() } + +func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if endKey == nil { + // forbid duplicates for single keys + limit = 1 + } + if limit <= 0 { + limit = math.MaxInt64 + } + if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) { + panic("do not use unsafeRange on non-keys bucket") + } + keys, vals := rt.buf.Range(bucketName, key, endKey, limit) + if int64(len(keys)) == limit { + return keys, vals + } + + // find/cache bucket + bn := string(bucketName) + rt.txMu.RLock() + bucket, ok := rt.buckets[bn] + rt.txMu.RUnlock() + if !ok { + rt.txMu.Lock() + bucket = rt.tx.Bucket(bucketName) + rt.buckets[bn] = bucket + rt.txMu.Unlock() + } + + // ignore missing bucket since may have been created in this batch + if bucket == nil { + return keys, vals + } + rt.txMu.Lock() + c := bucket.Cursor() + rt.txMu.Unlock() + + k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys))) + return append(k2, keys...), append(v2, vals...) +} + +func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { + dups := make(map[string]struct{}) + getDups := func(k, v []byte) error { + dups[string(k)] = struct{}{} + return nil + } + visitNoDup := func(k, v []byte) error { + if _, ok := dups[string(k)]; ok { + return nil + } + return visitor(k, v) + } + if err := rt.buf.ForEach(bucketName, getDups); err != nil { + return err + } + rt.txMu.Lock() + err := unsafeForEach(rt.tx, bucketName, visitNoDup) + rt.txMu.Unlock() + if err != nil { + return err + } + return rt.buf.ForEach(bucketName, visitor) +} + +func (rt *readTx) reset() { + rt.buf.reset() + rt.buckets = make(map[string]*bolt.Bucket) + rt.tx = nil + rt.txWg = new(sync.WaitGroup) +} + +// TODO: create a base type for readTx and concurrentReadTx to avoid duplicated function implementation? +type concurrentReadTx struct { + buf txReadBuffer + txMu *sync.RWMutex + tx *bolt.Tx + buckets map[string]*bolt.Bucket + txWg *sync.WaitGroup +} + +func (rt *concurrentReadTx) Lock() {} +func (rt *concurrentReadTx) Unlock() {} + +// RLock is no-op. concurrentReadTx does not need to be locked after it is created. +func (rt *concurrentReadTx) RLock() {} + +// RUnlock signals the end of concurrentReadTx. +func (rt *concurrentReadTx) RUnlock() { rt.txWg.Done() } + +func (rt *concurrentReadTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { + dups := make(map[string]struct{}) + getDups := func(k, v []byte) error { + dups[string(k)] = struct{}{} + return nil + } + visitNoDup := func(k, v []byte) error { + if _, ok := dups[string(k)]; ok { + return nil + } + return visitor(k, v) + } + if err := rt.buf.ForEach(bucketName, getDups); err != nil { + return err + } + rt.txMu.Lock() + err := unsafeForEach(rt.tx, bucketName, visitNoDup) + rt.txMu.Unlock() + if err != nil { + return err + } + return rt.buf.ForEach(bucketName, visitor) +} + +func (rt *concurrentReadTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if endKey == nil { + // forbid duplicates for single keys + limit = 1 + } + if limit <= 0 { + limit = math.MaxInt64 + } + if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) { + panic("do not use unsafeRange on non-keys bucket") + } + keys, vals := rt.buf.Range(bucketName, key, endKey, limit) + if int64(len(keys)) == limit { + return keys, vals + } + + // find/cache bucket + bn := string(bucketName) + rt.txMu.RLock() + bucket, ok := rt.buckets[bn] + rt.txMu.RUnlock() + if !ok { + rt.txMu.Lock() + bucket = rt.tx.Bucket(bucketName) + rt.buckets[bn] = bucket + rt.txMu.Unlock() + } + + // ignore missing bucket since may have been created in this batch + if bucket == nil { + return keys, vals + } + rt.txMu.Lock() + c := bucket.Cursor() + rt.txMu.Unlock() + + k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys))) + return append(k2, keys...), append(v2, vals...) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go b/vendor/go.etcd.io/etcd/mvcc/backend/tx_buffer.go similarity index 88% rename from vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go rename to vendor/go.etcd.io/etcd/mvcc/backend/tx_buffer.go index 56e885dbf..d73463823 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go +++ b/vendor/go.etcd.io/etcd/mvcc/backend/tx_buffer.go @@ -88,6 +88,19 @@ func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) er return nil } +// unsafeCopy returns a copy of txReadBuffer, caller should acquire backend.readTx.RLock() +func (txr *txReadBuffer) unsafeCopy() txReadBuffer { + txrCopy := txReadBuffer{ + txBuffer: txBuffer{ + buckets: make(map[string]*bucketBuffer, len(txr.txBuffer.buckets)), + }, + } + for bucketName, bucket := range txr.txBuffer.buckets { + txrCopy.txBuffer.buckets[bucketName] = bucket.Copy() + } + return txrCopy +} + type kv struct { key []byte val []byte @@ -179,3 +192,12 @@ func (bb *bucketBuffer) Less(i, j int) bool { return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0 } func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] } + +func (bb *bucketBuffer) Copy() *bucketBuffer { + bbCopy := bucketBuffer{ + buf: make([]kv, len(bb.buf)), + used: bb.used, + } + copy(bbCopy.buf, bb.buf) + return &bbCopy +} diff --git a/vendor/github.com/coreos/etcd/mvcc/doc.go b/vendor/go.etcd.io/etcd/mvcc/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/mvcc/doc.go rename to vendor/go.etcd.io/etcd/mvcc/doc.go diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/go.etcd.io/etcd/mvcc/index.go similarity index 81% rename from vendor/github.com/coreos/etcd/mvcc/index.go rename to vendor/go.etcd.io/etcd/mvcc/index.go index b27a9e543..f8cc6df88 100644 --- a/vendor/github.com/coreos/etcd/mvcc/index.go +++ b/vendor/go.etcd.io/etcd/mvcc/index.go @@ -19,6 +19,7 @@ import ( "sync" "github.com/google/btree" + "go.uber.org/zap" ) type index interface { @@ -39,11 +40,13 @@ type index interface { type treeIndex struct { sync.RWMutex tree *btree.BTree + lg *zap.Logger } -func newTreeIndex() index { +func newTreeIndex(lg *zap.Logger) index { return &treeIndex{ tree: btree.New(32), + lg: lg, } } @@ -54,12 +57,12 @@ func (ti *treeIndex) Put(key []byte, rev revision) { defer ti.Unlock() item := ti.tree.Get(keyi) if item == nil { - keyi.put(rev.main, rev.sub) + keyi.put(ti.lg, rev.main, rev.sub) ti.tree.ReplaceOrInsert(keyi) return } okeyi := item.(*keyIndex) - okeyi.put(rev.main, rev.sub) + okeyi.put(ti.lg, rev.main, rev.sub) } func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { @@ -69,7 +72,7 @@ func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, v if keyi = ti.keyIndex(keyi); keyi == nil { return revision{}, revision{}, 0, ErrRevisionNotFound } - return keyi.get(atRev) + return keyi.get(ti.lg, atRev) } func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { @@ -109,7 +112,7 @@ func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) { return []revision{rev} } ti.visit(key, end, func(ki *keyIndex) { - if rev, _, _, err := ki.get(atRev); err == nil { + if rev, _, _, err := ki.get(ti.lg, atRev); err == nil { revs = append(revs, rev) } }) @@ -125,7 +128,7 @@ func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs [] return [][]byte{key}, []revision{rev} } ti.visit(key, end, func(ki *keyIndex) { - if rev, _, _, err := ki.get(atRev); err == nil { + if rev, _, _, err := ki.get(ti.lg, atRev); err == nil { revs = append(revs, rev) keys = append(keys, ki.key) } @@ -144,7 +147,7 @@ func (ti *treeIndex) Tombstone(key []byte, rev revision) error { } ki := item.(*keyIndex) - return ki.tombstone(rev.main, rev.sub) + return ki.tombstone(ti.lg, rev.main, rev.sub) } // RangeSince returns all revisions from key(including) to end(excluding) @@ -162,7 +165,7 @@ func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { return nil } keyi = item.(*keyIndex) - return keyi.since(rev) + return keyi.since(ti.lg, rev) } endi := &keyIndex{key: end} @@ -172,7 +175,7 @@ func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { return false } curKeyi := item.(*keyIndex) - revs = append(revs, curKeyi.since(rev)...) + revs = append(revs, curKeyi.since(ti.lg, rev)...) return true }) sort.Sort(revisions(revs)) @@ -182,19 +185,34 @@ func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision { func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { available := make(map[revision]struct{}) - var emptyki []*keyIndex - plog.Printf("store.index: compact %d", rev) - // TODO: do not hold the lock for long time? - // This is probably OK. Compacting 10M keys takes O(10ms). + if ti.lg != nil { + ti.lg.Info("compact tree index", zap.Int64("revision", rev)) + } else { + plog.Printf("store.index: compact %d", rev) + } ti.Lock() - defer ti.Unlock() - ti.tree.Ascend(compactIndex(rev, available, &emptyki)) - for _, ki := range emptyki { - item := ti.tree.Delete(ki) - if item == nil { - plog.Panic("store.index: unexpected delete failure during compaction") + clone := ti.tree.Clone() + ti.Unlock() + + clone.Ascend(func(item btree.Item) bool { + keyi := item.(*keyIndex) + //Lock is needed here to prevent modification to the keyIndex while + //compaction is going on or revision added to empty before deletion + ti.Lock() + keyi.compact(ti.lg, rev, available) + if keyi.isEmpty() { + item := ti.tree.Delete(keyi) + if item == nil { + if ti.lg != nil { + ti.lg.Panic("failed to delete during compaction") + } else { + plog.Panic("store.index: unexpected delete failure during compaction") + } + } } - } + ti.Unlock() + return true + }) return available } @@ -211,17 +229,6 @@ func (ti *treeIndex) Keep(rev int64) map[revision]struct{} { return available } -func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool { - return func(i btree.Item) bool { - keyi := i.(*keyIndex) - keyi.compact(rev, available) - if keyi.isEmpty() { - *emptyki = append(*emptyki, keyi) - } - return true - } -} - func (ti *treeIndex) Equal(bi index) bool { b := bi.(*treeIndex) diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/go.etcd.io/etcd/mvcc/key_index.go similarity index 75% rename from vendor/github.com/coreos/etcd/mvcc/key_index.go rename to vendor/go.etcd.io/etcd/mvcc/key_index.go index 805922bfc..cf77cb438 100644 --- a/vendor/github.com/coreos/etcd/mvcc/key_index.go +++ b/vendor/go.etcd.io/etcd/mvcc/key_index.go @@ -20,6 +20,7 @@ import ( "fmt" "github.com/google/btree" + "go.uber.org/zap" ) var ( @@ -73,11 +74,21 @@ type keyIndex struct { } // put puts a revision to the keyIndex. -func (ki *keyIndex) put(main int64, sub int64) { +func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) { rev := revision{main: main, sub: sub} if !rev.GreaterThan(ki.modified) { - plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified) + if lg != nil { + lg.Panic( + "'put' with an unexpected smaller revision", + zap.Int64("given-revision-main", rev.main), + zap.Int64("given-revision-sub", rev.sub), + zap.Int64("modified-revision-main", ki.modified.main), + zap.Int64("modified-revision-sub", ki.modified.sub), + ) + } else { + plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified) + } } if len(ki.generations) == 0 { ki.generations = append(ki.generations, generation{}) @@ -92,9 +103,16 @@ func (ki *keyIndex) put(main int64, sub int64) { ki.modified = rev } -func (ki *keyIndex) restore(created, modified revision, ver int64) { +func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int64) { if len(ki.generations) != 0 { - plog.Panicf("store.keyindex: cannot restore non-empty keyIndex") + if lg != nil { + lg.Panic( + "'restore' got an unexpected non-empty generations", + zap.Int("generations-size", len(ki.generations)), + ) + } else { + plog.Panicf("store.keyindex: cannot restore non-empty keyIndex") + } } ki.modified = modified @@ -106,14 +124,21 @@ func (ki *keyIndex) restore(created, modified revision, ver int64) { // tombstone puts a revision, pointing to a tombstone, to the keyIndex. // It also creates a new empty generation in the keyIndex. // It returns ErrRevisionNotFound when tombstone on an empty generation. -func (ki *keyIndex) tombstone(main int64, sub int64) error { +func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error { if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key)) + if lg != nil { + lg.Panic( + "'tombstone' got an unexpected empty keyIndex", + zap.String("key", string(ki.key)), + ) + } else { + plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key)) + } } if ki.generations[len(ki.generations)-1].isEmpty() { return ErrRevisionNotFound } - ki.put(main, sub) + ki.put(lg, main, sub) ki.generations = append(ki.generations, generation{}) keysGauge.Dec() return nil @@ -121,9 +146,16 @@ func (ki *keyIndex) tombstone(main int64, sub int64) error { // get gets the modified, created revision and version of the key that satisfies the given atRev. // Rev must be higher than or equal to the given atRev. -func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) { +func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision, ver int64, err error) { if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + if lg != nil { + lg.Panic( + "'get' got an unexpected empty keyIndex", + zap.String("key", string(ki.key)), + ) + } else { + plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + } } g := ki.findGeneration(atRev) if g.isEmpty() { @@ -141,9 +173,16 @@ func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err // since returns revisions since the given rev. Only the revision with the // largest sub revision will be returned if multiple revisions have the same // main revision. -func (ki *keyIndex) since(rev int64) []revision { +func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision { if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + if lg != nil { + lg.Panic( + "'since' got an unexpected empty keyIndex", + zap.String("key", string(ki.key)), + ) + } else { + plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key)) + } } since := revision{rev, 0} var gi int @@ -182,9 +221,16 @@ func (ki *keyIndex) since(rev int64) []revision { // revision than the given atRev except the largest one (If the largest one is // a tombstone, it will not be kept). // If a generation becomes empty during compaction, it will be removed. -func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { +func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]struct{}) { if ki.isEmpty() { - plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) + if lg != nil { + lg.Panic( + "'compact' got an unexpected empty keyIndex", + zap.String("key", string(ki.key)), + ) + } else { + plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key)) + } } genIdx, revIndex := ki.doCompact(atRev, available) @@ -278,22 +324,22 @@ func (ki *keyIndex) findGeneration(rev int64) *generation { return nil } -func (a *keyIndex) Less(b btree.Item) bool { - return bytes.Compare(a.key, b.(*keyIndex).key) == -1 +func (ki *keyIndex) Less(b btree.Item) bool { + return bytes.Compare(ki.key, b.(*keyIndex).key) == -1 } -func (a *keyIndex) equal(b *keyIndex) bool { - if !bytes.Equal(a.key, b.key) { +func (ki *keyIndex) equal(b *keyIndex) bool { + if !bytes.Equal(ki.key, b.key) { return false } - if a.modified != b.modified { + if ki.modified != b.modified { return false } - if len(a.generations) != len(b.generations) { + if len(ki.generations) != len(b.generations) { return false } - for i := range a.generations { - ag, bg := a.generations[i], b.generations[i] + for i := range ki.generations { + ag, bg := ki.generations[i], b.generations[i] if !ag.equal(bg) { return false } @@ -338,16 +384,16 @@ func (g *generation) String() string { return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs) } -func (a generation) equal(b generation) bool { - if a.ver != b.ver { +func (g generation) equal(b generation) bool { + if g.ver != b.ver { return false } - if len(a.revs) != len(b.revs) { + if len(g.revs) != len(b.revs) { return false } - for i := range a.revs { - ar, br := a.revs[i], b.revs[i] + for i := range g.revs { + ar, br := g.revs[i], b.revs[i] if ar != br { return false } diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/go.etcd.io/etcd/mvcc/kv.go similarity index 94% rename from vendor/github.com/coreos/etcd/mvcc/kv.go rename to vendor/go.etcd.io/etcd/mvcc/kv.go index 2dad3ad8e..c057f9261 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kv.go +++ b/vendor/go.etcd.io/etcd/mvcc/kv.go @@ -15,9 +15,10 @@ package mvcc import ( - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/pkg/traceutil" ) type RangeOptions struct { @@ -102,10 +103,10 @@ type KV interface { WriteView // Read creates a read transaction. - Read() TxnRead + Read(trace *traceutil.Trace) TxnRead // Write creates a write transaction. - Write() TxnWrite + Write(trace *traceutil.Trace) TxnWrite // Hash computes the hash of the KV's backend. Hash() (hash uint32, revision int64, err error) @@ -114,7 +115,7 @@ type KV interface { HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error) // Compact frees all superseded keys with revisions less than rev. - Compact(rev int64) (<-chan struct{}, error) + Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) // Commit commits outstanding txns into the underlying backend. Commit() diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/go.etcd.io/etcd/mvcc/kv_view.go similarity index 83% rename from vendor/github.com/coreos/etcd/mvcc/kv_view.go rename to vendor/go.etcd.io/etcd/mvcc/kv_view.go index f40ba8edc..d4f0ca688 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kv_view.go +++ b/vendor/go.etcd.io/etcd/mvcc/kv_view.go @@ -15,25 +15,26 @@ package mvcc import ( - "github.com/coreos/etcd/lease" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/pkg/traceutil" ) type readView struct{ kv KV } func (rv *readView) FirstRev() int64 { - tr := rv.kv.Read() + tr := rv.kv.Read(traceutil.TODO()) defer tr.End() return tr.FirstRev() } func (rv *readView) Rev() int64 { - tr := rv.kv.Read() + tr := rv.kv.Read(traceutil.TODO()) defer tr.End() return tr.Rev() } func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - tr := rv.kv.Read() + tr := rv.kv.Read(traceutil.TODO()) defer tr.End() return tr.Range(key, end, ro) } @@ -41,13 +42,13 @@ func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err type writeView struct{ kv KV } func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { - tw := wv.kv.Write() + tw := wv.kv.Write(traceutil.TODO()) defer tw.End() return tw.DeleteRange(key, end) } func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - tw := wv.kv.Write() + tw := wv.kv.Write(traceutil.TODO()) defer tw.End() return tw.Put(key, value, lease) } diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/go.etcd.io/etcd/mvcc/kvstore.go similarity index 75% rename from vendor/github.com/coreos/etcd/mvcc/kvstore.go rename to vendor/go.etcd.io/etcd/mvcc/kvstore.go index 2a8a0361a..ed05bc288 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore.go +++ b/vendor/go.etcd.io/etcd/mvcc/kvstore.go @@ -18,17 +18,21 @@ import ( "context" "encoding/binary" "errors" + "fmt" "hash/crc32" "math" "sync" "sync/atomic" "time" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/schedule" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/pkg/schedule" + "go.etcd.io/etcd/pkg/traceutil" + "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" ) var ( @@ -44,7 +48,7 @@ var ( ErrCanceled = errors.New("mvcc: watcher is canceled") ErrClosed = errors.New("mvcc: closed") - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "mvcc") ) const ( @@ -57,6 +61,7 @@ const ( ) var restoreChunkKeys = 10000 // non-const for testing +var defaultCompactBatchLimit = 1000 // ConsistentIndexGetter is an interface that wraps the Get method. // Consistent index is the offset of an entry in a consistent replicated log. @@ -65,6 +70,10 @@ type ConsistentIndexGetter interface { ConsistentIndex() uint64 } +type StoreConfig struct { + CompactionBatchLimit int +} + type store struct { ReadView WriteView @@ -73,6 +82,8 @@ type store struct { // through atomics so must be 64-bit aligned. consistentIndex uint64 + cfg StoreConfig + // mu read locks for txns and write locks for non-txn store changes. mu sync.RWMutex @@ -99,15 +110,21 @@ type store struct { fifoSched schedule.Scheduler stopc chan struct{} + + lg *zap.Logger } // NewStore returns a new store. It is useful to create a store inside // mvcc pkg. It should only be used for testing externally. -func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store { +func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter, cfg StoreConfig) *store { + if cfg.CompactionBatchLimit == 0 { + cfg.CompactionBatchLimit = defaultCompactBatchLimit + } s := &store{ + cfg: cfg, b: b, ig: ig, - kvindex: newTreeIndex(), + kvindex: newTreeIndex(lg), le: le, @@ -118,11 +135,13 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto fifoSched: schedule.NewFIFOScheduler(), stopc: make(chan struct{}), + + lg: lg, } s.ReadView = &readView{s} s.WriteView = &writeView{s} if s.le != nil { - s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) }) } tx := s.b.BatchTx() @@ -132,6 +151,8 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto tx.Unlock() s.b.ForceCommit() + s.mu.Lock() + defer s.mu.Unlock() if err := s.restore(); err != nil { // TODO: return the error instead of panic here? panic("failed to recover store from backend") @@ -142,18 +163,14 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { if ctx == nil || ctx.Err() != nil { + s.mu.Lock() select { case <-s.stopc: default: - // fix deadlock in mvcc,for more information, please refer to pr 11817. - // s.stopc is only updated in restore operation, which is called by apply - // snapshot call, compaction and apply snapshot requests are serialized by - // raft, and do not happen at the same time. - s.mu.Lock() f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } s.fifoSched.Schedule(f) - s.mu.Unlock() } + s.mu.Unlock() return } close(ch) @@ -165,7 +182,7 @@ func (s *store) Hash() (hash uint32, revision int64, err error) { s.b.ForceCommit() h, err := s.b.Hash(DefaultIgnores) - hashDurations.Observe(time.Since(start).Seconds()) + hashSec.Observe(time.Since(start).Seconds()) return h, s.currentRev, err } @@ -191,8 +208,8 @@ func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev keep := s.kvindex.Keep(rev) tx := s.b.ReadTx() - tx.Lock() - defer tx.Unlock() + tx.RLock() + defer tx.RUnlock() s.mu.RUnlock() upper := revision{main: rev + 1} @@ -218,28 +235,24 @@ func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev }) hash = h.Sum32() - hashRevDurations.Observe(time.Since(start).Seconds()) + hashRevSec.Observe(time.Since(start).Seconds()) return hash, currentRev, compactRev, err } -func (s *store) Compact(rev int64) (<-chan struct{}, error) { - s.mu.Lock() - defer s.mu.Unlock() +func (s *store) updateCompactRev(rev int64) (<-chan struct{}, error) { s.revMu.Lock() - defer s.revMu.Unlock() - if rev <= s.compactMainRev { ch := make(chan struct{}) f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } s.fifoSched.Schedule(f) + s.revMu.Unlock() return ch, ErrCompacted } if rev > s.currentRev { + s.revMu.Unlock() return nil, ErrFutureRev } - start := time.Now() - s.compactMainRev = rev rbytes := newRevBytes() @@ -252,7 +265,15 @@ func (s *store) Compact(rev int64) (<-chan struct{}, error) { // ensure that desired compaction is persisted s.b.ForceCommit() + s.revMu.Unlock() + + return nil, nil +} + +func (s *store) compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) { + start := time.Now() keep := s.kvindex.Compact(rev) + trace.Step("compact in-memory index tree") ch := make(chan struct{}) var j = func(ctx context.Context) { if ctx.Err() != nil { @@ -268,10 +289,34 @@ func (s *store) Compact(rev int64) (<-chan struct{}, error) { s.fifoSched.Schedule(j) - indexCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) + indexCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond)) + trace.Step("schedule compaction") return ch, nil } +func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) { + ch, err := s.updateCompactRev(rev) + if nil != err { + return ch, err + } + + return s.compact(traceutil.TODO(), rev) +} + +func (s *store) Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) { + s.mu.Lock() + + ch, err := s.updateCompactRev(rev) + trace.Step("check and update compact revision") + if err != nil { + s.mu.Unlock() + return ch, err + } + s.mu.Unlock() + + return s.compact(trace, rev) +} + // DefaultIgnores is a map of keys to ignore in hash checking. var DefaultIgnores map[backend.IgnoreKey]struct{} @@ -303,7 +348,7 @@ func (s *store) Restore(b backend.Backend) error { atomic.StoreUint64(&s.consistentIndex, 0) s.b = b - s.kvindex = newTreeIndex() + s.kvindex = newTreeIndex(s.lg) s.currentRev = 1 s.compactMainRev = -1 s.fifoSched = schedule.NewFIFOScheduler() @@ -328,7 +373,17 @@ func (s *store) restore() error { _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) if len(finishedCompactBytes) != 0 { s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main - plog.Printf("restore compact to %d", s.compactMainRev) + + if s.lg != nil { + s.lg.Info( + "restored last compact revision", + zap.String("meta-bucket-name", string(metaBucketName)), + zap.String("meta-bucket-name-key", string(finishedCompactKeyName)), + zap.Int64("restored-compact-revision", s.compactMainRev), + ) + } else { + plog.Printf("restore compact to %d", s.compactMainRev) + } } _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) scheduledCompact := int64(0) @@ -338,7 +393,7 @@ func (s *store) restore() error { // index keys concurrently as they're loaded in from tx keysGauge.Set(0) - rkvc, revc := restoreIntoIndex(s.kvindex) + rkvc, revc := restoreIntoIndex(s.lg, s.kvindex) for { keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) if len(keys) == 0 { @@ -346,7 +401,7 @@ func (s *store) restore() error { } // rkvc blocks if the total pending keys exceeds the restore // chunk size to keep keys from consuming too much memory. - restoreChunk(rkvc, keys, vals, keyToLease) + restoreChunk(s.lg, rkvc, keys, vals, keyToLease) if len(keys) < restoreChunkKeys { // partial set implies final set break @@ -375,15 +430,33 @@ func (s *store) restore() error { } err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}}) if err != nil { - plog.Errorf("unexpected Attach error: %v", err) + if s.lg != nil { + s.lg.Warn( + "failed to attach a lease", + zap.String("lease-id", fmt.Sprintf("%016x", lid)), + zap.Error(err), + ) + } else { + plog.Errorf("unexpected Attach error: %v", err) + } } } tx.Unlock() if scheduledCompact != 0 { - s.Compact(scheduledCompact) - plog.Printf("resume scheduled compaction at %d", scheduledCompact) + s.compactLockfree(scheduledCompact) + + if s.lg != nil { + s.lg.Info( + "resume scheduled compaction", + zap.String("meta-bucket-name", string(metaBucketName)), + zap.String("meta-bucket-name-key", string(scheduledCompactKeyName)), + zap.Int64("scheduled-compact-revision", scheduledCompact), + ) + } else { + plog.Printf("resume scheduled compaction at %d", scheduledCompact) + } } return nil @@ -395,7 +468,7 @@ type revKeyValue struct { kstr string } -func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { +func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int64) { rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) go func() { currentRev := int64(1) @@ -426,12 +499,12 @@ func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { currentRev = rev.main if ok { if isTombstone(rkv.key) { - ki.tombstone(rev.main, rev.sub) + ki.tombstone(lg, rev.main, rev.sub) continue } - ki.put(rev.main, rev.sub) + ki.put(lg, rev.main, rev.sub) } else if !isTombstone(rkv.key) { - ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) + ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) idx.Insert(ki) kiCache[rkv.kstr] = ki } @@ -440,11 +513,15 @@ func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { return rkvc, revc } -func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { +func restoreChunk(lg *zap.Logger, kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { for i, key := range keys { rkv := revKeyValue{key: key} if err := rkv.kv.Unmarshal(vals[i]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) + if lg != nil { + lg.Fatal("failed to unmarshal mvccpb.KeyValue", zap.Error(err)) + } else { + plog.Fatalf("cannot unmarshal event: %v", err) + } } rkv.kstr = string(rkv.kv.Key) if isTombstone(key) { @@ -498,9 +575,15 @@ func (s *store) setupMetricsReporter() { reportDbTotalSizeInBytesMu.Lock() reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } reportDbTotalSizeInBytesMu.Unlock() + reportDbTotalSizeInBytesDebugMu.Lock() + reportDbTotalSizeInBytesDebug = func() float64 { return float64(b.Size()) } + reportDbTotalSizeInBytesDebugMu.Unlock() reportDbTotalSizeInUseInBytesMu.Lock() reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) } reportDbTotalSizeInUseInBytesMu.Unlock() + reportDbOpenReadTxNMu.Lock() + reportDbOpenReadTxN = func() float64 { return float64(b.OpenReadTxN()) } + reportDbOpenReadTxNMu.Unlock() reportCurrentRevMu.Lock() reportCurrentRev = func() float64 { s.revMu.RLock() @@ -518,9 +601,17 @@ func (s *store) setupMetricsReporter() { } // appendMarkTombstone appends tombstone mark to normal revision bytes. -func appendMarkTombstone(b []byte) []byte { +func appendMarkTombstone(lg *zap.Logger, b []byte) []byte { if len(b) != revBytesLen { - plog.Panicf("cannot append mark to non normal revision bytes") + if lg != nil { + lg.Panic( + "cannot append tombstone mark to non-normal revision bytes", + zap.Int("expected-revision-bytes-size", revBytesLen), + zap.Int("given-revision-bytes-size", len(b)), + ) + } else { + plog.Panicf("cannot append mark to non normal revision bytes") + } } return append(b, markTombstone) } diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go b/vendor/go.etcd.io/etcd/mvcc/kvstore_compaction.go similarity index 67% rename from vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go rename to vendor/go.etcd.io/etcd/mvcc/kvstore_compaction.go index 082a33f0e..2adb49854 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go +++ b/vendor/go.etcd.io/etcd/mvcc/kvstore_compaction.go @@ -17,51 +17,61 @@ package mvcc import ( "encoding/binary" "time" + + "go.uber.org/zap" ) func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool { totalStart := time.Now() - defer func() { dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond)) }() + defer func() { dbCompactionTotalMs.Observe(float64(time.Since(totalStart) / time.Millisecond)) }() keyCompactions := 0 defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }() end := make([]byte, 8) binary.BigEndian.PutUint64(end, uint64(compactMainRev+1)) - batchsize := int64(10000) last := make([]byte, 8+1+8) for { var rev revision start := time.Now() + tx := s.b.BatchTx() tx.Lock() - - keys, _ := tx.UnsafeRange(keyBucketName, last, end, batchsize) + keys, _ := tx.UnsafeRange(keyBucketName, last, end, int64(s.cfg.CompactionBatchLimit)) for _, key := range keys { rev = bytesToRev(key) if _, ok := keep[rev]; !ok { tx.UnsafeDelete(keyBucketName, key) - keyCompactions++ } } - if len(keys) < int(batchsize) { + if len(keys) < s.cfg.CompactionBatchLimit { rbytes := make([]byte, 8+1+8) revToBytes(revision{main: compactMainRev}, rbytes) tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes) tx.Unlock() - plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart)) + if s.lg != nil { + s.lg.Info( + "finished scheduled compaction", + zap.Int64("compact-revision", compactMainRev), + zap.Duration("took", time.Since(totalStart)), + ) + } else { + plog.Infof("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart)) + } return true } // update last revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last) tx.Unlock() - dbCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond)) + // Immediately commit the compaction deletes instead of letting them accumulate in the write buffer + s.b.ForceCommit() + dbCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond)) select { - case <-time.After(100 * time.Millisecond): + case <-time.After(10 * time.Millisecond): case <-s.stopc: return false } diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/go.etcd.io/etcd/mvcc/kvstore_txn.go similarity index 64% rename from vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go rename to vendor/go.etcd.io/etcd/mvcc/kvstore_txn.go index 8896fb86d..716a6d82f 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go +++ b/vendor/go.etcd.io/etcd/mvcc/kvstore_txn.go @@ -15,9 +15,11 @@ package mvcc import ( - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/pkg/traceutil" + "go.uber.org/zap" ) type storeTxnRead struct { @@ -26,16 +28,20 @@ type storeTxnRead struct { firstRev int64 rev int64 + + trace *traceutil.Trace } -func (s *store) Read() TxnRead { +func (s *store) Read(trace *traceutil.Trace) TxnRead { s.mu.RLock() - tx := s.b.ReadTx() s.revMu.RLock() - tx.Lock() + // backend holds b.readTx.RLock() only when creating the concurrentReadTx. After + // ConcurrentReadTx is created, it will not block write transaction. + tx := s.b.ConcurrentReadTx() + tx.RLock() // RLock is no-op. concurrentReadTx does not need to be locked after it is created. firstRev, rev := s.compactMainRev, s.currentRev s.revMu.RUnlock() - return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev}) + return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev, trace}) } func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } @@ -46,7 +52,7 @@ func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, } func (tr *storeTxnRead) End() { - tr.tx.Unlock() + tr.tx.RUnlock() // RUnlock signals the end of concurrentReadTx. tr.s.mu.RUnlock() } @@ -58,12 +64,12 @@ type storeTxnWrite struct { changes []mvccpb.KeyValue } -func (s *store) Write() TxnWrite { +func (s *store) Write(trace *traceutil.Trace) TxnWrite { s.mu.RLock() tx := s.b.BatchTx() tx.Lock() tw := &storeTxnWrite{ - storeTxnRead: storeTxnRead{s, tx, 0, 0}, + storeTxnRead: storeTxnRead{s, tx, 0, 0, trace}, tx: tx, beginRev: s.currentRev, changes: make([]mvccpb.KeyValue, 0, 4), @@ -83,14 +89,14 @@ func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { - return n, int64(tw.beginRev + 1) + return n, tw.beginRev + 1 } - return 0, int64(tw.beginRev) + return 0, tw.beginRev } func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { tw.put(key, value, lease) - return int64(tw.beginRev + 1) + return tw.beginRev + 1 } func (tw *storeTxnWrite) End() { @@ -120,7 +126,8 @@ func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted } - revpairs := tr.s.kvindex.Revisions(key, end, int64(rev)) + revpairs := tr.s.kvindex.Revisions(key, end, rev) + tr.trace.Step("range keys from in-memory index tree") if len(revpairs) == 0 { return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil } @@ -139,12 +146,28 @@ func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions revToBytes(revpair, revBytes) _, vs := tr.tx.UnsafeRange(keyBucketName, revBytes, nil, 0) if len(vs) != 1 { - plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) + if tr.s.lg != nil { + tr.s.lg.Fatal( + "range failed to find revision pair", + zap.Int64("revision-main", revpair.main), + zap.Int64("revision-sub", revpair.sub), + ) + } else { + plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) + } } if err := kvs[i].Unmarshal(vs[0]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) + if tr.s.lg != nil { + tr.s.lg.Fatal( + "failed to unmarshal mvccpb.KeyValue", + zap.Error(err), + ) + } else { + plog.Fatalf("cannot unmarshal event: %v", err) + } } } + tr.trace.Step("range keys from bolt db") return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil } @@ -160,7 +183,7 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { c = created.main oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) } - + tw.trace.Step("get key's previous created_revision and leaseID") ibytes := newRevBytes() idxRev := revision{main: rev, sub: int64(len(tw.changes))} revToBytes(idxRev, ibytes) @@ -177,12 +200,21 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { d, err := kv.Marshal() if err != nil { - plog.Fatalf("cannot marshal event: %v", err) + if tw.storeTxnRead.s.lg != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to marshal mvccpb.KeyValue", + zap.Error(err), + ) + } else { + plog.Fatalf("cannot marshal event: %v", err) + } } + tw.trace.Step("marshal mvccpb.KeyValue") tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) tw.s.kvindex.Put(key, idxRev) tw.changes = append(tw.changes, kv) + tw.trace.Step("store kv pair into bolt db") if oldLease != lease.NoLease { if tw.s.le == nil { @@ -190,7 +222,14 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { } err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) if err != nil { - plog.Errorf("unexpected error from lease detach: %v", err) + if tw.storeTxnRead.s.lg != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to detach old lease from a key", + zap.Error(err), + ) + } else { + plog.Errorf("unexpected error from lease detach: %v", err) + } } } if leaseID != lease.NoLease { @@ -202,40 +241,62 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { panic("unexpected error from lease Attach") } } + tw.trace.Step("attach lease to kv pair") } func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { rrev := tw.beginRev if len(tw.changes) > 0 { - rrev += 1 + rrev++ } - keys, revs := tw.s.kvindex.Range(key, end, rrev) + keys, _ := tw.s.kvindex.Range(key, end, rrev) if len(keys) == 0 { return 0 } - for i, key := range keys { - tw.delete(key, revs[i]) + for _, key := range keys { + tw.delete(key) } return int64(len(keys)) } -func (tw *storeTxnWrite) delete(key []byte, rev revision) { +func (tw *storeTxnWrite) delete(key []byte) { ibytes := newRevBytes() idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} revToBytes(idxRev, ibytes) - ibytes = appendMarkTombstone(ibytes) + + if tw.storeTxnRead.s != nil && tw.storeTxnRead.s.lg != nil { + ibytes = appendMarkTombstone(tw.storeTxnRead.s.lg, ibytes) + } else { + // TODO: remove this in v3.5 + ibytes = appendMarkTombstone(nil, ibytes) + } kv := mvccpb.KeyValue{Key: key} d, err := kv.Marshal() if err != nil { - plog.Fatalf("cannot marshal event: %v", err) + if tw.storeTxnRead.s.lg != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to marshal mvccpb.KeyValue", + zap.Error(err), + ) + } else { + plog.Fatalf("cannot marshal event: %v", err) + } } tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) err = tw.s.kvindex.Tombstone(key, idxRev) if err != nil { - plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) + if tw.storeTxnRead.s.lg != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to tombstone an existing key", + zap.String("key", string(key)), + zap.Error(err), + ) + } else { + plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) + } } tw.changes = append(tw.changes, kv) @@ -245,7 +306,14 @@ func (tw *storeTxnWrite) delete(key []byte, rev revision) { if leaseID != lease.NoLease { err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) if err != nil { - plog.Errorf("cannot detach %v", err) + if tw.storeTxnRead.s.lg != nil { + tw.storeTxnRead.s.lg.Fatal( + "failed to detach old lease from a key", + zap.Error(err), + ) + } else { + plog.Errorf("cannot detach %v", err) + } } } } diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/go.etcd.io/etcd/mvcc/metrics.go similarity index 72% rename from vendor/github.com/coreos/etcd/mvcc/metrics.go rename to vendor/go.etcd.io/etcd/mvcc/metrics.go index b46fd782e..7526ee4b5 100644 --- a/vendor/github.com/coreos/etcd/mvcc/metrics.go +++ b/vendor/go.etcd.io/etcd/mvcc/metrics.go @@ -22,6 +22,13 @@ import ( var ( rangeCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "mvcc", + Name: "range_total", + Help: "Total number of ranges seen by this member.", + }) + rangeCounterDebug = prometheus.NewCounter( prometheus.CounterOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", @@ -30,6 +37,14 @@ var ( }) putCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "mvcc", + Name: "put_total", + Help: "Total number of puts seen by this member.", + }) + // TODO: remove in 3.5 release + putCounterDebug = prometheus.NewCounter( prometheus.CounterOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", @@ -38,6 +53,14 @@ var ( }) deleteCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "mvcc", + Name: "delete_total", + Help: "Total number of deletes seen by this member.", + }) + // TODO: remove in 3.5 release + deleteCounterDebug = prometheus.NewCounter( prometheus.CounterOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", @@ -46,6 +69,13 @@ var ( }) txnCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "mvcc", + Name: "txn_total", + Help: "Total number of txns seen by this member.", + }) + txnCounterDebug = prometheus.NewCounter( prometheus.CounterOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", @@ -101,33 +131,39 @@ var ( Help: "Total number of pending events to be sent.", }) - indexCompactionPauseDurations = prometheus.NewHistogram( + indexCompactionPauseMs = prometheus.NewHistogram( prometheus.HistogramOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", Name: "index_compaction_pause_duration_milliseconds", Help: "Bucketed histogram of index compaction pause duration.", - // 0.5ms -> 1second - Buckets: prometheus.ExponentialBuckets(0.5, 2, 12), + + // lowest bucket start of upper bound 0.5 ms with factor 2 + // highest bucket start of 0.5 ms * 2^13 == 4.096 sec + Buckets: prometheus.ExponentialBuckets(0.5, 2, 14), }) - dbCompactionPauseDurations = prometheus.NewHistogram( + dbCompactionPauseMs = prometheus.NewHistogram( prometheus.HistogramOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", Name: "db_compaction_pause_duration_milliseconds", Help: "Bucketed histogram of db compaction pause duration.", - // 1ms -> 4second + + // lowest bucket start of upper bound 1 ms with factor 2 + // highest bucket start of 1 ms * 2^12 == 4.096 sec Buckets: prometheus.ExponentialBuckets(1, 2, 13), }) - dbCompactionTotalDurations = prometheus.NewHistogram( + dbCompactionTotalMs = prometheus.NewHistogram( prometheus.HistogramOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", Name: "db_compaction_total_duration_milliseconds", Help: "Bucketed histogram of db compaction total duration.", - // 100ms -> 800second + + // lowest bucket start of upper bound 100 ms with factor 2 + // highest bucket start of 100 ms * 2^13 == 8.192 sec Buckets: prometheus.ExponentialBuckets(100, 2, 14), }) @@ -139,11 +175,11 @@ var ( Help: "Total number of db keys compacted.", }) - dbTotalSizeDebugging = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd_debugging", + dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: "etcd", Subsystem: "mvcc", Name: "db_total_size_in_bytes", - Help: "Total size of the underlying database physically allocated in bytes. Use etcd_mvcc_db_total_size_in_bytes", + Help: "Total size of the underlying database physically allocated in bytes.", }, func() float64 { reportDbTotalSizeInBytesMu.RLock() @@ -151,21 +187,26 @@ var ( return reportDbTotalSizeInBytes() }, ) - dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ - Namespace: "etcd", + // overridden by mvcc initialization + reportDbTotalSizeInBytesMu sync.RWMutex + reportDbTotalSizeInBytes = func() float64 { return 0 } + + // TODO: remove this in v3.5 + dbTotalSizeDebug = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: "etcd_debugging", Subsystem: "mvcc", Name: "db_total_size_in_bytes", Help: "Total size of the underlying database physically allocated in bytes.", }, func() float64 { - reportDbTotalSizeInBytesMu.RLock() - defer reportDbTotalSizeInBytesMu.RUnlock() - return reportDbTotalSizeInBytes() + reportDbTotalSizeInBytesDebugMu.RLock() + defer reportDbTotalSizeInBytesDebugMu.RUnlock() + return reportDbTotalSizeInBytesDebug() }, ) // overridden by mvcc initialization - reportDbTotalSizeInBytesMu sync.RWMutex - reportDbTotalSizeInBytes = func() float64 { return 0 } + reportDbTotalSizeInBytesDebugMu sync.RWMutex + reportDbTotalSizeInBytesDebug = func() float64 { return 0 } dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: "etcd", @@ -181,9 +222,26 @@ var ( ) // overridden by mvcc initialization reportDbTotalSizeInUseInBytesMu sync.RWMutex - reportDbTotalSizeInUseInBytes func() float64 = func() float64 { return 0 } + reportDbTotalSizeInUseInBytes = func() float64 { return 0 } + + dbOpenReadTxN = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "mvcc", + Name: "db_open_read_transactions", + Help: "The number of currently open read transactions", + }, - hashDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + func() float64 { + reportDbOpenReadTxNMu.RLock() + defer reportDbOpenReadTxNMu.RUnlock() + return reportDbOpenReadTxN() + }, + ) + // overridden by mvcc initialization + reportDbOpenReadTxNMu sync.RWMutex + reportDbOpenReadTxN = func() float64 { return 0 } + + hashSec = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "etcd", Subsystem: "mvcc", Name: "hash_duration_seconds", @@ -195,7 +253,7 @@ var ( Buckets: prometheus.ExponentialBuckets(.01, 2, 15), }) - hashRevDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + hashRevSec = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "etcd", Subsystem: "mvcc", Name: "hash_rev_duration_seconds", @@ -238,39 +296,35 @@ var ( // overridden by mvcc initialization reportCompactRevMu sync.RWMutex reportCompactRev = func() float64 { return 0 } - - totalPutSizeGauge = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "total_put_size_in_bytes", - Help: "The total size of put kv pairs seen by this member.", - }) ) func init() { prometheus.MustRegister(rangeCounter) + prometheus.MustRegister(rangeCounterDebug) prometheus.MustRegister(putCounter) + prometheus.MustRegister(putCounterDebug) prometheus.MustRegister(deleteCounter) + prometheus.MustRegister(deleteCounterDebug) prometheus.MustRegister(txnCounter) + prometheus.MustRegister(txnCounterDebug) prometheus.MustRegister(keysGauge) prometheus.MustRegister(watchStreamGauge) prometheus.MustRegister(watcherGauge) prometheus.MustRegister(slowWatcherGauge) prometheus.MustRegister(totalEventsCounter) prometheus.MustRegister(pendingEventsGauge) - prometheus.MustRegister(indexCompactionPauseDurations) - prometheus.MustRegister(dbCompactionPauseDurations) - prometheus.MustRegister(dbCompactionTotalDurations) + prometheus.MustRegister(indexCompactionPauseMs) + prometheus.MustRegister(dbCompactionPauseMs) + prometheus.MustRegister(dbCompactionTotalMs) prometheus.MustRegister(dbCompactionKeysCounter) - prometheus.MustRegister(dbTotalSizeDebugging) prometheus.MustRegister(dbTotalSize) + prometheus.MustRegister(dbTotalSizeDebug) prometheus.MustRegister(dbTotalSizeInUse) - prometheus.MustRegister(hashDurations) - prometheus.MustRegister(hashRevDurations) + prometheus.MustRegister(dbOpenReadTxN) + prometheus.MustRegister(hashSec) + prometheus.MustRegister(hashRevSec) prometheus.MustRegister(currentRev) prometheus.MustRegister(compactRev) - prometheus.MustRegister(totalPutSizeGauge) } // ReportEventReceived reports that an event is received. diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/go.etcd.io/etcd/mvcc/metrics_txn.go similarity index 71% rename from vendor/github.com/coreos/etcd/mvcc/metrics_txn.go rename to vendor/go.etcd.io/etcd/mvcc/metrics_txn.go index a305aa531..64b629c78 100644 --- a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go +++ b/vendor/go.etcd.io/etcd/mvcc/metrics_txn.go @@ -14,24 +14,21 @@ package mvcc -import ( - "github.com/coreos/etcd/lease" -) +import "go.etcd.io/etcd/lease" type metricsTxnWrite struct { TxnWrite ranges uint puts uint deletes uint - putSize int64 } func newMetricsTxnRead(tr TxnRead) TxnRead { - return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0, 0} + return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0} } func newMetricsTxnWrite(tw TxnWrite) TxnWrite { - return &metricsTxnWrite{tw, 0, 0, 0, 0} + return &metricsTxnWrite{tw, 0, 0, 0} } func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) { @@ -46,8 +43,6 @@ func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) { func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { tw.puts++ - size := int64(len(key) + len(value)) - tw.putSize += size return tw.TxnWrite.Put(key, value, lease) } @@ -55,9 +50,18 @@ func (tw *metricsTxnWrite) End() { defer tw.TxnWrite.End() if sum := tw.ranges + tw.puts + tw.deletes; sum > 1 { txnCounter.Inc() + txnCounterDebug.Inc() // TODO: remove in 3.5 release } - rangeCounter.Add(float64(tw.ranges)) - putCounter.Add(float64(tw.puts)) - totalPutSizeGauge.Add(float64(tw.putSize)) - deleteCounter.Add(float64(tw.deletes)) + + ranges := float64(tw.ranges) + rangeCounter.Add(ranges) + rangeCounterDebug.Add(ranges) // TODO: remove in 3.5 release + + puts := float64(tw.puts) + putCounter.Add(puts) + putCounterDebug.Add(puts) // TODO: remove in 3.5 release + + deletes := float64(tw.deletes) + deleteCounter.Add(deletes) + deleteCounterDebug.Add(deletes) // TODO: remove in 3.5 release } diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go similarity index 75% rename from vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go rename to vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go index 4679da505..23fe337a5 100644 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go @@ -1,16 +1,28 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: kv.proto +/* + Package mvccpb is a generated protocol buffer package. + + It is generated from these files: + kv.proto + + It has these top-level messages: + KeyValue + Event +*/ package mvccpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -35,7 +47,6 @@ var Event_EventType_name = map[int32]string{ 0: "PUT", 1: "DELETE", } - var Event_EventType_value = map[string]int32{ "PUT": 0, "DELETE": 1, @@ -44,10 +55,7 @@ var Event_EventType_value = map[string]int32{ func (x Event_EventType) String() string { return proto.EnumName(Event_EventType_name, int32(x)) } - -func (Event_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_2216fe83c9c12408, []int{1, 0} -} +func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} } type KeyValue struct { // key is the key in bytes. An empty key is not allowed. @@ -65,44 +73,13 @@ type KeyValue struct { // lease is the ID of the lease that attached to key. // When the attached lease expires, the key will be deleted. // If lease is 0, then no lease is attached to the key. - Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_2216fe83c9c12408, []int{0} -} -func (m *KeyValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyValue.Merge(m, src) -} -func (m *KeyValue) XXX_Size() int { - return m.Size() -} -func (m *KeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_KeyValue.DiscardUnknown(m) + Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"` } -var xxx_messageInfo_KeyValue proto.InternalMessageInfo +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (m *KeyValue) String() string { return proto.CompactTextString(m) } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} } type Event struct { // type is the kind of event. If type is a PUT, it indicates @@ -114,82 +91,25 @@ type Event struct { // A PUT event with kv.Version=1 indicates the creation of a key. // A DELETE/EXPIRE event contains the deleted key with // its modification revision set to the revision of deletion. - Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"` + Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` // prev_kv holds the key-value pair before the event happens. - PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_2216fe83c9c12408, []int{1} -} -func (m *Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) -} -func (m *Event) XXX_Size() int { - return m.Size() -} -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) + PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"` } -var xxx_messageInfo_Event proto.InternalMessageInfo +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} } func init() { - proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue") proto.RegisterType((*Event)(nil), "mvccpb.Event") + proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value) } - -func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) } - -var fileDescriptor_2216fe83c9c12408 = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, - 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, - 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, - 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, - 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, - 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, - 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, - 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, - 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, - 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, - 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, - 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, - 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, - 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, - 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, - 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, - 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, - 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, - 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, -} - func (m *KeyValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -197,60 +117,49 @@ func (m *KeyValue) Marshal() (dAtA []byte, err error) { } func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) } - if m.Lease != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.Lease)) - i-- - dAtA[i] = 0x30 + if m.CreateRevision != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x2a + if m.ModRevision != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) } if m.Version != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.Version)) - i-- dAtA[i] = 0x20 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Version)) } - if m.ModRevision != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.ModRevision)) - i-- - dAtA[i] = 0x18 - } - if m.CreateRevision != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision)) - i-- - dAtA[i] = 0x10 + if len(m.Value) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintKv(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintKv(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa + if m.Lease != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Lease)) } - return len(dAtA) - i, nil + return i, nil } func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -258,66 +167,48 @@ func (m *Event) Marshal() (dAtA []byte, err error) { } func (m *Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.PrevKv != nil { - { - size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintKv(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Type)) } if m.Kv != nil { - { - size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintKv(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size())) + n1, err := m.Kv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } - if m.Type != 0 { - i = encodeVarintKv(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 + if m.PrevKv != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size())) + n2, err := m.PrevKv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } - return len(dAtA) - i, nil + return i, nil } func encodeVarintKv(dAtA []byte, offset int, v uint64) int { - offset -= sovKv(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *KeyValue) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -340,16 +231,10 @@ func (m *KeyValue) Size() (n int) { if m.Lease != 0 { n += 1 + sovKv(uint64(m.Lease)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func (m *Event) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Type != 0 { @@ -363,14 +248,18 @@ func (m *Event) Size() (n int) { l = m.PrevKv.Size() n += 1 + l + sovKv(uint64(l)) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } return n } func sovKv(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozKv(x uint64) (n int) { return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -390,7 +279,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -418,7 +307,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -427,9 +316,6 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthKv } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthKv - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -452,7 +338,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CreateRevision |= int64(b&0x7F) << shift + m.CreateRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -471,7 +357,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ModRevision |= int64(b&0x7F) << shift + m.ModRevision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -490,7 +376,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Version |= int64(b&0x7F) << shift + m.Version |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -509,7 +395,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -518,9 +404,6 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthKv } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthKv - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -543,7 +426,7 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lease |= int64(b&0x7F) << shift + m.Lease |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -557,13 +440,9 @@ func (m *KeyValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthKv } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthKv - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -588,7 +467,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -616,7 +495,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= Event_EventType(b&0x7F) << shift + m.Type |= (Event_EventType(b) & 0x7F) << shift if b < 0x80 { break } @@ -635,7 +514,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -644,9 +523,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthKv } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthKv - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -671,7 +547,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -680,9 +556,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthKv } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthKv - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -702,13 +575,9 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthKv } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthKv - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -772,11 +641,8 @@ func skipKv(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthKv - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthKv } return iNdEx, nil @@ -807,9 +673,6 @@ func skipKv(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthKv - } } return iNdEx, nil case 4: @@ -828,3 +691,28 @@ var ( ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowKv = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } + +var fileDescriptorKv = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, + 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, + 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, + 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3, + 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae, + 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7, + 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3, + 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d, + 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b, + 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23, + 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36, + 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34, + 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad, + 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30, + 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a, + 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94, + 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff, + 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto similarity index 100% rename from vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto rename to vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto diff --git a/vendor/github.com/coreos/etcd/mvcc/revision.go b/vendor/go.etcd.io/etcd/mvcc/revision.go similarity index 96% rename from vendor/github.com/coreos/etcd/mvcc/revision.go rename to vendor/go.etcd.io/etcd/mvcc/revision.go index 5fa35a1c2..d6213866f 100644 --- a/vendor/github.com/coreos/etcd/mvcc/revision.go +++ b/vendor/go.etcd.io/etcd/mvcc/revision.go @@ -27,7 +27,7 @@ type revision struct { // main is the main revision of a set of changes that happen atomically. main int64 - // sub is the the sub revision of a change in a set of changes that happen + // sub is the sub revision of a change in a set of changes that happen // atomically. Each change has different increasing sub revision in that // set. sub int64 diff --git a/vendor/github.com/coreos/etcd/mvcc/util.go b/vendor/go.etcd.io/etcd/mvcc/util.go similarity index 91% rename from vendor/github.com/coreos/etcd/mvcc/util.go rename to vendor/go.etcd.io/etcd/mvcc/util.go index 8a0df0bfc..032621aed 100644 --- a/vendor/github.com/coreos/etcd/mvcc/util.go +++ b/vendor/go.etcd.io/etcd/mvcc/util.go @@ -16,9 +16,10 @@ package mvcc import ( "encoding/binary" + "fmt" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/mvcc/mvccpb" ) func UpdateConsistentIndex(be backend.Backend, index uint64) { @@ -47,7 +48,7 @@ func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { d, err := kv.Marshal() if err != nil { - plog.Fatalf("cannot marshal event: %v", err) + panic(fmt.Errorf("cannot marshal event: %v", err)) } be.BatchTx().Lock() diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/go.etcd.io/etcd/mvcc/watchable_store.go similarity index 90% rename from vendor/github.com/coreos/etcd/mvcc/watchable_store.go rename to vendor/go.etcd.io/etcd/mvcc/watchable_store.go index 8f12bddba..a51e5aa52 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go +++ b/vendor/go.etcd.io/etcd/mvcc/watchable_store.go @@ -18,10 +18,11 @@ import ( "sync" "time" - "github.com/coreos/etcd/auth" - "github.com/coreos/etcd/lease" - "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/lease" + "go.etcd.io/etcd/mvcc/backend" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/pkg/traceutil" + "go.uber.org/zap" ) // non-const so modifiable by tests @@ -68,13 +69,13 @@ type watchableStore struct { // cancel operations. type cancelFunc func() -func New(b backend.Backend, le lease.Lessor, as auth.AuthStore, ig ConsistentIndexGetter) ConsistentWatchableKV { - return newWatchableStore(b, le, as, ig) +func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter, cfg StoreConfig) ConsistentWatchableKV { + return newWatchableStore(lg, b, le, ig, cfg) } -func newWatchableStore(b backend.Backend, le lease.Lessor, as auth.AuthStore, ig ConsistentIndexGetter) *watchableStore { +func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter, cfg StoreConfig) *watchableStore { s := &watchableStore{ - store: NewStore(b, le, ig), + store: NewStore(lg, b, le, ig, cfg), victimc: make(chan struct{}, 1), unsynced: newWatcherGroup(), synced: newWatcherGroup(), @@ -84,11 +85,7 @@ func newWatchableStore(b backend.Backend, le lease.Lessor, as auth.AuthStore, ig s.store.WriteView = &writeView{s} if s.le != nil { // use this store as the deleter so revokes trigger watch events - s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) - } - if as != nil { - // TODO: encapsulating consistentindex into a separate package - as.SetConsistentIndexSyncer(s.store.saveIndex) + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) }) } s.wg.Add(2) go s.syncWatchersLoop() @@ -350,10 +347,16 @@ func (s *watchableStore) syncWatchers() int { // UnsafeRange returns keys and values. And in boltdb, keys are revisions. // values are actual key-value pairs in backend. tx := s.store.b.ReadTx() - tx.Lock() + tx.RLock() revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) - evs := kvsToEvents(wg, revs, vs) - tx.Unlock() + var evs []mvccpb.Event + if s.store != nil && s.store.lg != nil { + evs = kvsToEvents(s.store.lg, wg, revs, vs) + } else { + // TODO: remove this in v3.5 + evs = kvsToEvents(nil, wg, revs, vs) + } + tx.RUnlock() var victims watcherBatch wb := newWatcherBatch(wg, evs) @@ -404,11 +407,15 @@ func (s *watchableStore) syncWatchers() int { } // kvsToEvents gets all events for the watchers from all key-value pairs -func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { +func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) { for i, v := range vals { var kv mvccpb.KeyValue if err := kv.Unmarshal(v); err != nil { - plog.Panicf("cannot unmarshal event: %v", err) + if lg != nil { + lg.Panic("failed to unmarshal mvccpb.KeyValue", zap.Error(err)) + } else { + plog.Panicf("cannot unmarshal event: %v", err) + } } if !wg.contains(string(kv.Key)) { @@ -432,7 +439,14 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { var victim watcherBatch for w, eb := range newWatcherBatch(&s.synced, evs) { if eb.revs != 1 { - plog.Panicf("unexpected multiple revisions in notification") + if s.store != nil && s.store.lg != nil { + s.store.lg.Panic( + "unexpected multiple revisions in watch notification", + zap.Int("number-of-revisions", eb.revs), + ) + } else { + plog.Panicf("unexpected multiple revisions in notification") + } } if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) { pendingEventsGauge.Add(float64(len(eb.evs))) diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/go.etcd.io/etcd/mvcc/watchable_store_txn.go similarity index 87% rename from vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go rename to vendor/go.etcd.io/etcd/mvcc/watchable_store_txn.go index 5c5bfda13..70b12983d 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go +++ b/vendor/go.etcd.io/etcd/mvcc/watchable_store_txn.go @@ -15,7 +15,8 @@ package mvcc import ( - "github.com/coreos/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/pkg/traceutil" ) func (tw *watchableStoreTxnWrite) End() { @@ -50,4 +51,6 @@ type watchableStoreTxnWrite struct { s *watchableStore } -func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} } +func (s *watchableStore) Write(trace *traceutil.Trace) TxnWrite { + return &watchableStoreTxnWrite{s.store.Write(trace), s} +} diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher.go b/vendor/go.etcd.io/etcd/mvcc/watcher.go similarity index 79% rename from vendor/github.com/coreos/etcd/mvcc/watcher.go rename to vendor/go.etcd.io/etcd/mvcc/watcher.go index bc0c6322f..2846d62a5 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watcher.go +++ b/vendor/go.etcd.io/etcd/mvcc/watcher.go @@ -19,11 +19,17 @@ import ( "errors" "sync" - "github.com/coreos/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/mvcc/mvccpb" ) +// AutoWatchID is the watcher ID passed in WatchStream.Watch when no +// user-provided ID is available. If pass, an ID will automatically be assigned. +const AutoWatchID WatchID = 0 + var ( - ErrWatcherNotExist = errors.New("mvcc: watcher does not exist") + ErrWatcherNotExist = errors.New("mvcc: watcher does not exist") + ErrEmptyWatcherRange = errors.New("mvcc: watcher range is empty") + ErrWatcherDuplicateID = errors.New("mvcc: duplicate watch ID provided on the WatchStream") ) type WatchID int64 @@ -36,12 +42,13 @@ type WatchStream interface { // happened on the given key or range [key, end) from the given startRev. // // The whole event history can be watched unless compacted. - // If `startRev` <=0, watch observes events after currentRev. + // If "startRev" <=0, watch observes events after currentRev. // - // The returned `id` is the ID of this watcher. It appears as WatchID + // The returned "id" is the ID of this watcher. It appears as WatchID // in events that are sent to the created watcher through stream channel. - // - Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID + // The watch ID is used when it's not equal to AutoWatchID. Otherwise, + // an auto-generated watch ID is returned. + Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) // Chan returns a chan. All watch response will be sent to the returned chan. Chan() <-chan WatchResponse @@ -98,28 +105,34 @@ type watchStream struct { } // Watch creates a new watcher in the stream and returns its WatchID. -// TODO: return error if ws is closed? -func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID { +func (ws *watchStream) Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) { // prevent wrong range where key >= end lexicographically // watch request with 'WithFromKey' has empty-byte range end if len(end) != 0 && bytes.Compare(key, end) != -1 { - return -1 + return -1, ErrEmptyWatcherRange } ws.mu.Lock() defer ws.mu.Unlock() if ws.closed { - return -1 + return -1, ErrEmptyWatcherRange } - id := ws.nextID - ws.nextID++ + if id == AutoWatchID { + for ws.watchers[ws.nextID] != nil { + ws.nextID++ + } + id = ws.nextID + ws.nextID++ + } else if _, ok := ws.watchers[id]; ok { + return -1, ErrWatcherDuplicateID + } w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...) ws.cancels[id] = c ws.watchers[id] = w - return id + return id, nil } func (ws *watchStream) Chan() <-chan WatchResponse { diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/go.etcd.io/etcd/mvcc/watcher_group.go similarity index 98% rename from vendor/github.com/coreos/etcd/mvcc/watcher_group.go rename to vendor/go.etcd.io/etcd/mvcc/watcher_group.go index b569d04b3..151f0de71 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go +++ b/vendor/go.etcd.io/etcd/mvcc/watcher_group.go @@ -18,8 +18,8 @@ import ( "fmt" "math" - "github.com/coreos/etcd/mvcc/mvccpb" - "github.com/coreos/etcd/pkg/adt" + "go.etcd.io/etcd/mvcc/mvccpb" + "go.etcd.io/etcd/pkg/adt" ) var ( diff --git a/vendor/github.com/coreos/etcd/pkg/adt/README.md b/vendor/go.etcd.io/etcd/pkg/adt/README.md similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/adt/README.md rename to vendor/go.etcd.io/etcd/pkg/adt/README.md diff --git a/vendor/github.com/coreos/etcd/pkg/adt/doc.go b/vendor/go.etcd.io/etcd/pkg/adt/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/adt/doc.go rename to vendor/go.etcd.io/etcd/pkg/adt/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go b/vendor/go.etcd.io/etcd/pkg/adt/interval_tree.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go rename to vendor/go.etcd.io/etcd/pkg/adt/interval_tree.go diff --git a/vendor/github.com/coreos/etcd/pkg/contention/contention.go b/vendor/go.etcd.io/etcd/pkg/contention/contention.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/contention/contention.go rename to vendor/go.etcd.io/etcd/pkg/contention/contention.go diff --git a/vendor/github.com/coreos/etcd/pkg/contention/doc.go b/vendor/go.etcd.io/etcd/pkg/contention/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/contention/doc.go rename to vendor/go.etcd.io/etcd/pkg/contention/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go b/vendor/go.etcd.io/etcd/pkg/cpuutil/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go rename to vendor/go.etcd.io/etcd/pkg/cpuutil/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go b/vendor/go.etcd.io/etcd/pkg/cpuutil/endian.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go rename to vendor/go.etcd.io/etcd/pkg/cpuutil/endian.go diff --git a/vendor/github.com/coreos/etcd/pkg/crc/crc.go b/vendor/go.etcd.io/etcd/pkg/crc/crc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/crc/crc.go rename to vendor/go.etcd.io/etcd/pkg/crc/crc.go diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go b/vendor/go.etcd.io/etcd/pkg/debugutil/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/debugutil/doc.go rename to vendor/go.etcd.io/etcd/pkg/debugutil/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go b/vendor/go.etcd.io/etcd/pkg/debugutil/pprof.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go rename to vendor/go.etcd.io/etcd/pkg/debugutil/pprof.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go b/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go new file mode 100644 index 000000000..69dde5a7d --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fileutil implements utility functions related to files and paths. +package fileutil diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go similarity index 84% rename from vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go index fce5126c6..5d9fb5303 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package fileutil implements utility functions related to files and paths. package fileutil import ( @@ -21,7 +20,6 @@ import ( "io/ioutil" "os" "path/filepath" - "sort" "github.com/coreos/pkg/capnslog" ) @@ -33,9 +31,7 @@ const ( PrivateDirMode = 0700 ) -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil") -) +var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/fileutil") // IsDirWriteable checks if dir is writable by writing and removing a file // to dir. It returns nil if dir is writable. @@ -47,21 +43,6 @@ func IsDirWriteable(dir string) error { return os.Remove(f) } -// ReadDir returns the filenames in the given directory in sorted order. -func ReadDir(dirpath string) ([]string, error) { - dir, err := os.Open(dirpath) - if err != nil { - return nil, err - } - defer dir.Close() - names, err := dir.Readdirnames(-1) - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - // TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory // does not exists. TouchDirAll also ensures the given directory is writable. func TouchDirAll(dir string) error { @@ -93,6 +74,7 @@ func CreateDirAll(dir string) error { return err } +// Exist returns true if a file or directory exists. func Exist(name string) bool { _, err := os.Stat(name) return err == nil diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go similarity index 92% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go index 939fea623..b0abc98ee 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go @@ -17,6 +17,7 @@ package fileutil import ( + "fmt" "io" "os" "syscall" @@ -62,7 +63,7 @@ func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := os.OpenFile(path, flag, perm) if err != nil { - return nil, err + return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%v)", path, err) } flock := wrlck @@ -83,15 +84,14 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := os.OpenFile(path, flag, perm) if err != nil { - return nil, err + return nil, fmt.Errorf("ofdLockFile failed to open %q (%v)", path, err) } flock := wrlck err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock) - if err != nil { f.Close() return nil, err } - return &LockedFile{f}, err + return &LockedFile{f}, nil } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go similarity index 66% rename from vendor/github.com/coreos/etcd/pkg/fileutil/purge.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/purge.go index c97368069..fda96c371 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go @@ -20,26 +20,18 @@ import ( "sort" "strings" "time" -) -func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { - return purgeFile(dirname, suffix, max, interval, stop, nil, nil) -} + "go.uber.org/zap" +) -func PurgeFileWithDoneNotify(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) { - doneC := make(chan struct{}) - errC := purgeFile(dirname, suffix, max, interval, stop, nil, doneC) - return doneC, errC +func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { + return purgeFile(lg, dirname, suffix, max, interval, stop, nil) } // purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil. -// if donec is non-nil, the function closes it to notify its exit. -func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error { +func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error { errC := make(chan error, 1) go func() { - if donec != nil { - defer close(donec) - } for { fnames, err := ReadDir(dirname) if err != nil { @@ -65,11 +57,19 @@ func purgeFile(dirname string, suffix string, max uint, interval time.Duration, return } if err = l.Close(); err != nil { - plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) + if lg != nil { + lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err)) + } else { + plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) + } errC <- err return } - plog.Infof("purged file %s successfully", f) + if lg != nil { + lg.Info("purged", zap.String("path", f)) + } else { + plog.Infof("purged file %s successfully", f) + } newfnames = newfnames[1:] } if purgec != nil { diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go b/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go new file mode 100644 index 000000000..2eeaa89bc --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go @@ -0,0 +1,70 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileutil + +import ( + "os" + "path/filepath" + "sort" +) + +// ReadDirOp represents an read-directory operation. +type ReadDirOp struct { + ext string +} + +// ReadDirOption configures archiver operations. +type ReadDirOption func(*ReadDirOp) + +// WithExt filters file names by their extensions. +// (e.g. WithExt(".wal") to list only WAL files) +func WithExt(ext string) ReadDirOption { + return func(op *ReadDirOp) { op.ext = ext } +} + +func (op *ReadDirOp) applyOpts(opts []ReadDirOption) { + for _, opt := range opts { + opt(op) + } +} + +// ReadDir returns the filenames in the given directory in sorted order. +func ReadDir(d string, opts ...ReadDirOption) ([]string, error) { + op := &ReadDirOp{} + op.applyOpts(opts) + + dir, err := os.Open(d) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(-1) + if err != nil { + return nil, err + } + sort.Strings(names) + + if op.ext != "" { + tss := make([]string, 0) + for _, v := range names { + if filepath.Ext(v) == op.ext { + tss = append(tss, v) + } + } + names = tss + } + return names, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/sync.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/sync.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go rename to vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go diff --git a/vendor/go.etcd.io/etcd/pkg/flags/flag.go b/vendor/go.etcd.io/etcd/pkg/flags/flag.go new file mode 100644 index 000000000..215902cf8 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/flags/flag.go @@ -0,0 +1,121 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package flags implements command-line flag parsing. +package flags + +import ( + "flag" + "fmt" + "os" + "strings" + + "github.com/coreos/pkg/capnslog" + "github.com/spf13/pflag" +) + +var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/flags") + +// SetFlagsFromEnv parses all registered flags in the given flagset, +// and if they are not already set it attempts to set their values from +// environment variables. Environment variables take the name of the flag but +// are UPPERCASE, have the given prefix and any dashes are replaced by +// underscores - for example: some-flag => ETCD_SOME_FLAG +func SetFlagsFromEnv(prefix string, fs *flag.FlagSet) error { + var err error + alreadySet := make(map[string]bool) + fs.Visit(func(f *flag.Flag) { + alreadySet[FlagToEnv(prefix, f.Name)] = true + }) + usedEnvKey := make(map[string]bool) + fs.VisitAll(func(f *flag.Flag) { + if serr := setFlagFromEnv(fs, prefix, f.Name, usedEnvKey, alreadySet, true); serr != nil { + err = serr + } + }) + verifyEnv(prefix, usedEnvKey, alreadySet) + return err +} + +// SetPflagsFromEnv is similar to SetFlagsFromEnv. However, the accepted flagset type is pflag.FlagSet +// and it does not do any logging. +func SetPflagsFromEnv(prefix string, fs *pflag.FlagSet) error { + var err error + alreadySet := make(map[string]bool) + usedEnvKey := make(map[string]bool) + fs.VisitAll(func(f *pflag.Flag) { + if f.Changed { + alreadySet[FlagToEnv(prefix, f.Name)] = true + } + if serr := setFlagFromEnv(fs, prefix, f.Name, usedEnvKey, alreadySet, false); serr != nil { + err = serr + } + }) + verifyEnv(prefix, usedEnvKey, alreadySet) + return err +} + +// FlagToEnv converts flag string to upper-case environment variable key string. +func FlagToEnv(prefix, name string) string { + return prefix + "_" + strings.ToUpper(strings.Replace(name, "-", "_", -1)) +} + +func verifyEnv(prefix string, usedEnvKey, alreadySet map[string]bool) { + for _, env := range os.Environ() { + kv := strings.SplitN(env, "=", 2) + if len(kv) != 2 { + plog.Warningf("found invalid env %s", env) + } + if usedEnvKey[kv[0]] { + continue + } + if alreadySet[kv[0]] { + plog.Fatalf("conflicting environment variable %q is shadowed by corresponding command-line flag (either unset environment variable or disable flag)", kv[0]) + } + if strings.HasPrefix(env, prefix+"_") { + plog.Warningf("unrecognized environment variable %s", env) + } + } +} + +type flagSetter interface { + Set(fk string, fv string) error +} + +func setFlagFromEnv(fs flagSetter, prefix, fname string, usedEnvKey, alreadySet map[string]bool, log bool) error { + key := FlagToEnv(prefix, fname) + if !alreadySet[key] { + val := os.Getenv(key) + if val != "" { + usedEnvKey[key] = true + if serr := fs.Set(fname, val); serr != nil { + return fmt.Errorf("invalid value %q for %s: %v", val, key, serr) + } + if log { + plog.Infof("recognized and used environment variable %s=%s", key, val) + } + } + } + return nil +} + +func IsSet(fs *flag.FlagSet, name string) bool { + set := false + fs.Visit(func(f *flag.Flag) { + if f.Name == name { + set = true + } + }) + return set +} diff --git a/vendor/go.etcd.io/etcd/pkg/flags/ignored.go b/vendor/go.etcd.io/etcd/pkg/flags/ignored.go new file mode 100644 index 000000000..995304900 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/flags/ignored.go @@ -0,0 +1,36 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +// IgnoredFlag encapsulates a flag that may have been previously valid but is +// now ignored. If an IgnoredFlag is set, a warning is printed and +// operation continues. +type IgnoredFlag struct { + Name string +} + +// IsBoolFlag is defined to allow the flag to be defined without an argument +func (f *IgnoredFlag) IsBoolFlag() bool { + return true +} + +func (f *IgnoredFlag) Set(s string) error { + plog.Warningf(`flag "-%s" is no longer supported - ignoring.`, f.Name) + return nil +} + +func (f *IgnoredFlag) String() string { + return "" +} diff --git a/vendor/go.etcd.io/etcd/pkg/flags/selective_string.go b/vendor/go.etcd.io/etcd/pkg/flags/selective_string.go new file mode 100644 index 000000000..4b90fbf4b --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/flags/selective_string.go @@ -0,0 +1,114 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// SelectiveStringValue implements the flag.Value interface. +type SelectiveStringValue struct { + v string + valids map[string]struct{} +} + +// Set verifies the argument to be a valid member of the allowed values +// before setting the underlying flag value. +func (ss *SelectiveStringValue) Set(s string) error { + if _, ok := ss.valids[s]; ok { + ss.v = s + return nil + } + return errors.New("invalid value") +} + +// String returns the set value (if any) of the SelectiveStringValue +func (ss *SelectiveStringValue) String() string { + return ss.v +} + +// Valids returns the list of valid strings. +func (ss *SelectiveStringValue) Valids() []string { + s := make([]string, 0, len(ss.valids)) + for k := range ss.valids { + s = append(s, k) + } + sort.Strings(s) + return s +} + +// NewSelectiveStringValue creates a new string flag +// for which any one of the given strings is a valid value, +// and any other value is an error. +// +// valids[0] will be default value. Caller must be sure +// len(valids) != 0 or it will panic. +func NewSelectiveStringValue(valids ...string) *SelectiveStringValue { + vm := make(map[string]struct{}) + for _, v := range valids { + vm[v] = struct{}{} + } + return &SelectiveStringValue{valids: vm, v: valids[0]} +} + +// SelectiveStringsValue implements the flag.Value interface. +type SelectiveStringsValue struct { + vs []string + valids map[string]struct{} +} + +// Set verifies the argument to be a valid member of the allowed values +// before setting the underlying flag value. +func (ss *SelectiveStringsValue) Set(s string) error { + vs := strings.Split(s, ",") + for i := range vs { + if _, ok := ss.valids[vs[i]]; ok { + ss.vs = append(ss.vs, vs[i]) + } else { + return fmt.Errorf("invalid value %q", vs[i]) + } + } + sort.Strings(ss.vs) + return nil +} + +// String returns the set value (if any) of the SelectiveStringsValue. +func (ss *SelectiveStringsValue) String() string { + return strings.Join(ss.vs, ",") +} + +// Valids returns the list of valid strings. +func (ss *SelectiveStringsValue) Valids() []string { + s := make([]string, 0, len(ss.valids)) + for k := range ss.valids { + s = append(s, k) + } + sort.Strings(s) + return s +} + +// NewSelectiveStringsValue creates a new string slice flag +// for which any one of the given strings is a valid value, +// and any other value is an error. +func NewSelectiveStringsValue(valids ...string) *SelectiveStringsValue { + vm := make(map[string]struct{}) + for _, v := range valids { + vm[v] = struct{}{} + } + return &SelectiveStringsValue{valids: vm, vs: []string{}} +} diff --git a/vendor/go.etcd.io/etcd/pkg/flags/strings.go b/vendor/go.etcd.io/etcd/pkg/flags/strings.go new file mode 100644 index 000000000..3e47fb38e --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/flags/strings.go @@ -0,0 +1,52 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +import ( + "flag" + "sort" + "strings" +) + +// StringsValue wraps "sort.StringSlice". +type StringsValue sort.StringSlice + +// Set parses a command line set of strings, separated by comma. +// Implements "flag.Value" interface. +func (ss *StringsValue) Set(s string) error { + *ss = strings.Split(s, ",") + return nil +} + +// String implements "flag.Value" interface. +func (ss *StringsValue) String() string { return strings.Join(*ss, ",") } + +// NewStringsValue implements string slice as "flag.Value" interface. +// Given value is to be separated by comma. +func NewStringsValue(s string) (ss *StringsValue) { + if s == "" { + return &StringsValue{} + } + ss = new(StringsValue) + if err := ss.Set(s); err != nil { + plog.Panicf("new StringsValue should never fail: %v", err) + } + return ss +} + +// StringsFromFlag returns a string slice from the flag. +func StringsFromFlag(fs *flag.FlagSet, flagName string) []string { + return []string(*fs.Lookup(flagName).Value.(*StringsValue)) +} diff --git a/vendor/go.etcd.io/etcd/pkg/flags/unique_strings.go b/vendor/go.etcd.io/etcd/pkg/flags/unique_strings.go new file mode 100644 index 000000000..e220ee07a --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/flags/unique_strings.go @@ -0,0 +1,76 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +import ( + "flag" + "sort" + "strings" +) + +// UniqueStringsValue wraps a list of unique strings. +// The values are set in order. +type UniqueStringsValue struct { + Values map[string]struct{} +} + +// Set parses a command line set of strings, separated by comma. +// Implements "flag.Value" interface. +// The values are set in order. +func (us *UniqueStringsValue) Set(s string) error { + us.Values = make(map[string]struct{}) + for _, v := range strings.Split(s, ",") { + us.Values[v] = struct{}{} + } + return nil +} + +// String implements "flag.Value" interface. +func (us *UniqueStringsValue) String() string { + return strings.Join(us.stringSlice(), ",") +} + +func (us *UniqueStringsValue) stringSlice() []string { + ss := make([]string, 0, len(us.Values)) + for v := range us.Values { + ss = append(ss, v) + } + sort.Strings(ss) + return ss +} + +// NewUniqueStringsValue implements string slice as "flag.Value" interface. +// Given value is to be separated by comma. +// The values are set in order. +func NewUniqueStringsValue(s string) (us *UniqueStringsValue) { + us = &UniqueStringsValue{Values: make(map[string]struct{})} + if s == "" { + return us + } + if err := us.Set(s); err != nil { + plog.Panicf("new UniqueStringsValue should never fail: %v", err) + } + return us +} + +// UniqueStringsFromFlag returns a string slice from the flag. +func UniqueStringsFromFlag(fs *flag.FlagSet, flagName string) []string { + return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).stringSlice() +} + +// UniqueStringsMapFromFlag returns a map of strings from the flag. +func UniqueStringsMapFromFlag(fs *flag.FlagSet, flagName string) map[string]struct{} { + return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).Values +} diff --git a/vendor/go.etcd.io/etcd/pkg/flags/unique_urls.go b/vendor/go.etcd.io/etcd/pkg/flags/unique_urls.go new file mode 100644 index 000000000..9b4178c3a --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/flags/unique_urls.go @@ -0,0 +1,92 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +import ( + "flag" + "net/url" + "sort" + "strings" + + "go.etcd.io/etcd/pkg/types" +) + +// UniqueURLs contains unique URLs +// with non-URL exceptions. +type UniqueURLs struct { + Values map[string]struct{} + uss []url.URL + Allowed map[string]struct{} +} + +// Set parses a command line set of URLs formatted like: +// http://127.0.0.1:2380,http://10.1.1.2:80 +// Implements "flag.Value" interface. +func (us *UniqueURLs) Set(s string) error { + if _, ok := us.Values[s]; ok { + return nil + } + if _, ok := us.Allowed[s]; ok { + us.Values[s] = struct{}{} + return nil + } + ss, err := types.NewURLs(strings.Split(s, ",")) + if err != nil { + return err + } + us.Values = make(map[string]struct{}) + us.uss = make([]url.URL, 0) + for _, v := range ss { + us.Values[v.String()] = struct{}{} + us.uss = append(us.uss, v) + } + return nil +} + +// String implements "flag.Value" interface. +func (us *UniqueURLs) String() string { + all := make([]string, 0, len(us.Values)) + for u := range us.Values { + all = append(all, u) + } + sort.Strings(all) + return strings.Join(all, ",") +} + +// NewUniqueURLsWithExceptions implements "url.URL" slice as flag.Value interface. +// Given value is to be separated by comma. +func NewUniqueURLsWithExceptions(s string, exceptions ...string) *UniqueURLs { + us := &UniqueURLs{Values: make(map[string]struct{}), Allowed: make(map[string]struct{})} + for _, v := range exceptions { + us.Allowed[v] = struct{}{} + } + if s == "" { + return us + } + if err := us.Set(s); err != nil { + plog.Panicf("new UniqueURLs should never fail: %v", err) + } + return us +} + +// UniqueURLsFromFlag returns a slice from urls got from the flag. +func UniqueURLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL { + return (*fs.Lookup(urlsFlagName).Value.(*UniqueURLs)).uss +} + +// UniqueURLsMapFromFlag returns a map from url strings got from the flag. +func UniqueURLsMapFromFlag(fs *flag.FlagSet, urlsFlagName string) map[string]struct{} { + return (*fs.Lookup(urlsFlagName).Value.(*UniqueURLs)).Values +} diff --git a/vendor/go.etcd.io/etcd/pkg/flags/urls.go b/vendor/go.etcd.io/etcd/pkg/flags/urls.go new file mode 100644 index 000000000..ca90970c2 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/flags/urls.go @@ -0,0 +1,65 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flags + +import ( + "flag" + "net/url" + "strings" + + "go.etcd.io/etcd/pkg/types" +) + +// URLsValue wraps "types.URLs". +type URLsValue types.URLs + +// Set parses a command line set of URLs formatted like: +// http://127.0.0.1:2380,http://10.1.1.2:80 +// Implements "flag.Value" interface. +func (us *URLsValue) Set(s string) error { + ss, err := types.NewURLs(strings.Split(s, ",")) + if err != nil { + return err + } + *us = URLsValue(ss) + return nil +} + +// String implements "flag.Value" interface. +func (us *URLsValue) String() string { + all := make([]string, len(*us)) + for i, u := range *us { + all[i] = u.String() + } + return strings.Join(all, ",") +} + +// NewURLsValue implements "url.URL" slice as flag.Value interface. +// Given value is to be separated by comma. +func NewURLsValue(s string) *URLsValue { + if s == "" { + return &URLsValue{} + } + v := &URLsValue{} + if err := v.Set(s); err != nil { + plog.Panicf("new URLsValue should never fail: %v", err) + } + return v +} + +// URLsFromFlag returns a slices from url got from the flag. +func URLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL { + return []url.URL(*fs.Lookup(urlsFlagName).Value.(*URLsValue)) +} diff --git a/vendor/go.etcd.io/etcd/pkg/httputil/httputil.go b/vendor/go.etcd.io/etcd/pkg/httputil/httputil.go new file mode 100644 index 000000000..3bf58a3a1 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/httputil/httputil.go @@ -0,0 +1,50 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httputil provides HTTP utility functions. +package httputil + +import ( + "io" + "io/ioutil" + "net" + "net/http" +) + +// GracefulClose drains http.Response.Body until it hits EOF +// and closes it. This prevents TCP/TLS connections from closing, +// therefore available for reuse. +// Borrowed from golang/net/context/ctxhttp/cancelreq.go. +func GracefulClose(resp *http.Response) { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() +} + +// GetHostname returns the hostname from request Host field. +// It returns empty string, if Host field contains invalid +// value (e.g. "localhost:::" with too many colons). +func GetHostname(req *http.Request) string { + if req == nil { + return "" + } + h, _, err := net.SplitHostPort(req.Host) + if err != nil { + return req.Host + } + return h +} diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/go.etcd.io/etcd/pkg/idutil/id.go similarity index 94% rename from vendor/github.com/coreos/etcd/pkg/idutil/id.go rename to vendor/go.etcd.io/etcd/pkg/idutil/id.go index 2da210626..63a02cd73 100644 --- a/vendor/github.com/coreos/etcd/pkg/idutil/id.go +++ b/vendor/go.etcd.io/etcd/pkg/idutil/id.go @@ -18,7 +18,7 @@ package idutil import ( "math" - "sync" + "sync/atomic" "time" ) @@ -47,7 +47,6 @@ const ( // id generated after restart is unique because etcd throughput is << // 256req/ms(250k reqs/second). type Generator struct { - mu sync.Mutex // high order 2 bytes prefix uint64 // low order 6 bytes @@ -66,10 +65,8 @@ func NewGenerator(memberID uint16, now time.Time) *Generator { // Next generates a id that is unique. func (g *Generator) Next() uint64 { - g.mu.Lock() - defer g.mu.Unlock() - g.suffix++ - id := g.prefix | lowbit(g.suffix, suffixLen) + suffix := atomic.AddUint64(&g.suffix, 1) + id := g.prefix | lowbit(suffix, suffixLen) return id } diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go b/vendor/go.etcd.io/etcd/pkg/ioutil/pagewriter.go similarity index 90% rename from vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go rename to vendor/go.etcd.io/etcd/pkg/ioutil/pagewriter.go index cf9a8dc66..72de1593d 100644 --- a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go +++ b/vendor/go.etcd.io/etcd/pkg/ioutil/pagewriter.go @@ -95,23 +95,12 @@ func (pw *PageWriter) Write(p []byte) (n int, err error) { return n, werr } -// Flush flushes buffered data. func (pw *PageWriter) Flush() error { - _, err := pw.flush() - return err -} - -// FlushN flushes buffered data and returns the number of written bytes. -func (pw *PageWriter) FlushN() (int, error) { - return pw.flush() -} - -func (pw *PageWriter) flush() (int, error) { if pw.bufferedBytes == 0 { - return 0, nil + return nil } - n, err := pw.w.Write(pw.buf[:pw.bufferedBytes]) + _, err := pw.w.Write(pw.buf[:pw.bufferedBytes]) pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes pw.bufferedBytes = 0 - return n, err + return err } diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go b/vendor/go.etcd.io/etcd/pkg/ioutil/readcloser.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go rename to vendor/go.etcd.io/etcd/pkg/ioutil/readcloser.go diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go b/vendor/go.etcd.io/etcd/pkg/ioutil/reader.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/ioutil/reader.go rename to vendor/go.etcd.io/etcd/pkg/ioutil/reader.go diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go b/vendor/go.etcd.io/etcd/pkg/ioutil/util.go similarity index 96% rename from vendor/github.com/coreos/etcd/pkg/ioutil/util.go rename to vendor/go.etcd.io/etcd/pkg/ioutil/util.go index 192ad888c..6a6746e0b 100644 --- a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go +++ b/vendor/go.etcd.io/etcd/pkg/ioutil/util.go @@ -18,7 +18,7 @@ import ( "io" "os" - "github.com/coreos/etcd/pkg/fileutil" + "go.etcd.io/etcd/pkg/fileutil" ) // WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library, diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go rename to vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/doc.go b/vendor/go.etcd.io/etcd/pkg/logutil/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/logutil/doc.go rename to vendor/go.etcd.io/etcd/pkg/logutil/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go b/vendor/go.etcd.io/etcd/pkg/logutil/log_level.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/logutil/log_level.go rename to vendor/go.etcd.io/etcd/pkg/logutil/log_level.go diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/logger.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/logutil/logger.go rename to vendor/go.etcd.io/etcd/pkg/logutil/logger.go diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go rename to vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go similarity index 97% rename from vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go rename to vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go index 378bee0e3..729cbdb57 100644 --- a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go +++ b/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go @@ -27,7 +27,7 @@ var _ Logger = &packageLogger{} // For example: // // var defaultLogger Logger -// defaultLogger = NewPackageLogger("github.com/coreos/etcd", "snapshot") +// defaultLogger = NewPackageLogger("go.etcd.io/etcd", "snapshot") // func NewPackageLogger(repo, pkg string) Logger { return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)} diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap.go similarity index 88% rename from vendor/github.com/coreos/etcd/pkg/logutil/zap.go rename to vendor/go.etcd.io/etcd/pkg/logutil/zap.go index 2f692233a..8fc6e03b7 100644 --- a/vendor/github.com/coreos/etcd/pkg/logutil/zap.go +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap.go @@ -53,15 +53,12 @@ var DefaultZapLoggerConfig = zap.Config{ ErrorOutputPaths: []string{"stderr"}, } -// AddOutputPaths adds output paths to the existing output paths, resolving conflicts. -func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.Config { +// MergeOutputPaths merges logging output paths, resolving conflicts. +func MergeOutputPaths(cfg zap.Config) zap.Config { outputs := make(map[string]struct{}) for _, v := range cfg.OutputPaths { outputs[v] = struct{}{} } - for _, v := range outputPaths { - outputs[v] = struct{}{} - } outputSlice := make([]string, 0) if _, ok := outputs["/dev/null"]; ok { // "/dev/null" to discard all @@ -78,9 +75,6 @@ func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap. for _, v := range cfg.ErrorOutputPaths { errOutputs[v] = struct{}{} } - for _, v := range errorOutputPaths { - errOutputs[v] = struct{}{} - } errOutputSlice := make([]string, 0) if _, ok := errOutputs["/dev/null"]; ok { // "/dev/null" to discard all diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go rename to vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go similarity index 98% rename from vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go rename to vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go index b1788bc83..fcd390381 100644 --- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go @@ -24,7 +24,7 @@ import ( "os" "path/filepath" - "github.com/coreos/etcd/pkg/systemd" + "go.etcd.io/etcd/pkg/systemd" "github.com/coreos/go-systemd/journal" "go.uber.org/zap/zapcore" diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go similarity index 98% rename from vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go rename to vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go index 012d688d2..f016b3054 100644 --- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go +++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go @@ -17,7 +17,7 @@ package logutil import ( "errors" - "github.com/coreos/etcd/raft" + "go.etcd.io/etcd/raft" "go.uber.org/zap" "go.uber.org/zap/zapcore" diff --git a/vendor/github.com/coreos/etcd/store/doc.go b/vendor/go.etcd.io/etcd/pkg/netutil/doc.go similarity index 82% rename from vendor/github.com/coreos/etcd/store/doc.go rename to vendor/go.etcd.io/etcd/pkg/netutil/doc.go index 612df9279..5d92d03a6 100644 --- a/vendor/github.com/coreos/etcd/store/doc.go +++ b/vendor/go.etcd.io/etcd/pkg/netutil/doc.go @@ -1,4 +1,4 @@ -// Copyright 2015 The etcd Authors +// Copyright 2018 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package store defines etcd's in-memory key/value store. -package store +// Package netutil implements network-related utility functions. +package netutil diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go b/vendor/go.etcd.io/etcd/pkg/netutil/isolate_linux.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go rename to vendor/go.etcd.io/etcd/pkg/netutil/isolate_linux.go diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go b/vendor/go.etcd.io/etcd/pkg/netutil/isolate_stub.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go rename to vendor/go.etcd.io/etcd/pkg/netutil/isolate_stub.go diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go b/vendor/go.etcd.io/etcd/pkg/netutil/netutil.go similarity index 74% rename from vendor/github.com/coreos/etcd/pkg/netutil/netutil.go rename to vendor/go.etcd.io/etcd/pkg/netutil/netutil.go index e3db8c50a..faef6466e 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go +++ b/vendor/go.etcd.io/etcd/pkg/netutil/netutil.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package netutil implements network-related utility functions. package netutil import ( @@ -24,17 +23,14 @@ import ( "sort" "time" - "github.com/coreos/etcd/pkg/types" - "github.com/coreos/pkg/capnslog" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil") + "go.etcd.io/etcd/pkg/types" - // indirection for testing - resolveTCPAddr = resolveTCPAddrDefault + "go.uber.org/zap" ) +// indirection for testing +var resolveTCPAddr = resolveTCPAddrDefault + const retryInterval = time.Second // taken from go's ResolveTCP code but uses configurable ctx @@ -67,7 +63,7 @@ func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, erro // resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr. // resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames // are resolved. -func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) { +func resolveTCPAddrs(ctx context.Context, lg *zap.Logger, urls [][]url.URL) ([][]url.URL, error) { newurls := make([][]url.URL, 0) for _, us := range urls { nus := make([]url.URL, len(us)) @@ -79,7 +75,7 @@ func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) nus[i] = *nu } for i, u := range nus { - h, err := resolveURL(ctx, u) + h, err := resolveURL(ctx, lg, u) if err != nil { return nil, fmt.Errorf("failed to resolve %q (%v)", u.String(), err) } @@ -92,14 +88,19 @@ func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) return newurls, nil } -func resolveURL(ctx context.Context, u url.URL) (string, error) { +func resolveURL(ctx context.Context, lg *zap.Logger, u url.URL) (string, error) { if u.Scheme == "unix" || u.Scheme == "unixs" { // unix sockets don't resolve over TCP return "", nil } host, _, err := net.SplitHostPort(u.Host) if err != nil { - plog.Errorf("could not parse url %s during tcp resolving", u.Host) + lg.Warn( + "failed to parse URL Host while resolving URL", + zap.String("url", u.String()), + zap.String("host", u.Host), + zap.Error(err), + ) return "", err } if host == "localhost" || net.ParseIP(host) != nil { @@ -108,13 +109,32 @@ func resolveURL(ctx context.Context, u url.URL) (string, error) { for ctx.Err() == nil { tcpAddr, err := resolveTCPAddr(ctx, u.Host) if err == nil { - plog.Infof("resolving %s to %s", u.Host, tcpAddr.String()) + lg.Info( + "resolved URL Host", + zap.String("url", u.String()), + zap.String("host", u.Host), + zap.String("resolved-addr", tcpAddr.String()), + ) return tcpAddr.String(), nil } - plog.Warningf("failed resolving host %s (%v); retrying in %v", u.Host, err, retryInterval) + + lg.Warn( + "failed to resolve URL Host", + zap.String("url", u.String()), + zap.String("host", u.Host), + zap.Duration("retry-interval", retryInterval), + zap.Error(err), + ) + select { case <-ctx.Done(): - plog.Errorf("could not resolve host %s", u.Host) + lg.Warn( + "failed to resolve URL Host; returning", + zap.String("url", u.String()), + zap.String("host", u.Host), + zap.Duration("retry-interval", retryInterval), + zap.Error(err), + ) return "", err case <-time.After(retryInterval): } @@ -124,11 +144,11 @@ func resolveURL(ctx context.Context, u url.URL) (string, error) { // urlsEqual checks equality of url.URLS between two arrays. // This check pass even if an URL is in hostname and opposite is in IP address. -func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) (bool, error) { +func urlsEqual(ctx context.Context, lg *zap.Logger, a []url.URL, b []url.URL) (bool, error) { if len(a) != len(b) { return false, fmt.Errorf("len(%q) != len(%q)", urlsToStrings(a), urlsToStrings(b)) } - urls, err := resolveTCPAddrs(ctx, [][]url.URL{a, b}) + urls, err := resolveTCPAddrs(ctx, lg, [][]url.URL{a, b}) if err != nil { return false, err } @@ -150,7 +170,7 @@ func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) (bool, error) { // URLStringsEqual returns "true" if given URLs are valid // and resolved to same IP addresses. Otherwise, return "false" // and error, if any. -func URLStringsEqual(ctx context.Context, a []string, b []string) (bool, error) { +func URLStringsEqual(ctx context.Context, lg *zap.Logger, a []string, b []string) (bool, error) { if len(a) != len(b) { return false, fmt.Errorf("len(%q) != len(%q)", a, b) } @@ -170,7 +190,13 @@ func URLStringsEqual(ctx context.Context, a []string, b []string) (bool, error) } urlsB = append(urlsB, *u) } - return urlsEqual(ctx, urlsA, urlsB) + if lg == nil { + lg, _ = zap.NewProduction() + if lg == nil { + lg = zap.NewExample() + } + } + return urlsEqual(ctx, lg, urlsA, urlsB) } func urlsToStrings(us []url.URL) []string { diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/routes.go b/vendor/go.etcd.io/etcd/pkg/netutil/routes.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/netutil/routes.go rename to vendor/go.etcd.io/etcd/pkg/netutil/routes.go diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go b/vendor/go.etcd.io/etcd/pkg/netutil/routes_linux.go similarity index 99% rename from vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go rename to vendor/go.etcd.io/etcd/pkg/netutil/routes_linux.go index 797baeb48..5118d3dac 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go +++ b/vendor/go.etcd.io/etcd/pkg/netutil/routes_linux.go @@ -24,7 +24,7 @@ import ( "sort" "syscall" - "github.com/coreos/etcd/pkg/cpuutil" + "go.etcd.io/etcd/pkg/cpuutil" ) var errNoDefaultRoute = fmt.Errorf("could not find default route") diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/go.etcd.io/etcd/pkg/pathutil/path.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/pathutil/path.go rename to vendor/go.etcd.io/etcd/pkg/pathutil/path.go diff --git a/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go b/vendor/go.etcd.io/etcd/pkg/pbutil/pbutil.go similarity index 95% rename from vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go rename to vendor/go.etcd.io/etcd/pkg/pbutil/pbutil.go index d70f98dd8..53167ffa5 100644 --- a/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go +++ b/vendor/go.etcd.io/etcd/pkg/pbutil/pbutil.go @@ -18,7 +18,7 @@ package pbutil import "github.com/coreos/pkg/capnslog" var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/pbutil") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/pbutil") ) type Marshaler interface { diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go b/vendor/go.etcd.io/etcd/pkg/runtime/fds_linux.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go rename to vendor/go.etcd.io/etcd/pkg/runtime/fds_linux.go diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go b/vendor/go.etcd.io/etcd/pkg/runtime/fds_other.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go rename to vendor/go.etcd.io/etcd/pkg/runtime/fds_other.go diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/doc.go b/vendor/go.etcd.io/etcd/pkg/schedule/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/schedule/doc.go rename to vendor/go.etcd.io/etcd/pkg/schedule/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go b/vendor/go.etcd.io/etcd/pkg/schedule/schedule.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/schedule/schedule.go rename to vendor/go.etcd.io/etcd/pkg/schedule/schedule.go diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/go.etcd.io/etcd/pkg/srv/srv.go similarity index 79% rename from vendor/github.com/coreos/etcd/pkg/srv/srv.go rename to vendor/go.etcd.io/etcd/pkg/srv/srv.go index 600061ce8..c3560026d 100644 --- a/vendor/github.com/coreos/etcd/pkg/srv/srv.go +++ b/vendor/go.etcd.io/etcd/pkg/srv/srv.go @@ -21,7 +21,7 @@ import ( "net/url" "strings" - "github.com/coreos/etcd/pkg/types" + "go.etcd.io/etcd/pkg/types" ) var ( @@ -32,7 +32,7 @@ var ( // GetCluster gets the cluster information via DNS discovery. // Also sees each entry as a separate instance. -func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { +func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]string, error) { tempName := int(0) tcp2ap := make(map[string]url.URL) @@ -83,20 +83,9 @@ func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) return nil } - failCount := 0 - err := updateNodeMap(service+"-ssl", "https") - srvErr := make([]string, 2) + err := updateNodeMap(service, serviceScheme) if err != nil { - srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) - failCount++ - } - err = updateNodeMap(service, "http") - if err != nil { - srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) - failCount++ - } - if failCount == 2 { - return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) + return nil, fmt.Errorf("error querying DNS SRV records for _%s %s", service, err) } return stringParts, nil } @@ -107,7 +96,7 @@ type SRVClients struct { } // GetClient looks up the client endpoints for a service and domain. -func GetClient(service, domain string) (*SRVClients, error) { +func GetClient(service, domain string, serviceName string) (*SRVClients, error) { var urls []*url.URL var srvs []*net.SRV @@ -126,8 +115,8 @@ func GetClient(service, domain string) (*SRVClients, error) { return nil } - errHTTPS := updateURLs(service+"-ssl", "https") - errHTTP := updateURLs(service, "http") + errHTTPS := updateURLs(GetSRVService(service, serviceName, "https"), "https") + errHTTP := updateURLs(GetSRVService(service, serviceName, "http"), "http") if errHTTPS != nil && errHTTP != nil { return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) @@ -139,3 +128,15 @@ func GetClient(service, domain string) (*SRVClients, error) { } return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil } + +// GetSRVService generates a SRV service including an optional suffix. +func GetSRVService(service, serviceName string, scheme string) (SRVService string) { + if scheme == "https" { + service = fmt.Sprintf("%s-ssl", service) + } + + if serviceName != "" { + return fmt.Sprintf("%s-%s", service, serviceName) + } + return service +} diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/doc.go b/vendor/go.etcd.io/etcd/pkg/systemd/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/systemd/doc.go rename to vendor/go.etcd.io/etcd/pkg/systemd/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/journal.go b/vendor/go.etcd.io/etcd/pkg/systemd/journal.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/systemd/journal.go rename to vendor/go.etcd.io/etcd/pkg/systemd/journal.go diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go rename to vendor/go.etcd.io/etcd/pkg/tlsutil/cipher_suites.go diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go rename to vendor/go.etcd.io/etcd/pkg/tlsutil/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go b/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go similarity index 99% rename from vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go rename to vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go index 79b1f632e..3a5aef089 100644 --- a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go +++ b/vendor/go.etcd.io/etcd/pkg/tlsutil/tlsutil.go @@ -41,6 +41,7 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) { if err != nil { return nil, err } + certPool.AddCert(cert) } } diff --git a/vendor/go.etcd.io/etcd/pkg/traceutil/trace.go b/vendor/go.etcd.io/etcd/pkg/traceutil/trace.go new file mode 100644 index 000000000..2d247dd9a --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/traceutil/trace.go @@ -0,0 +1,172 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package traceutil implements tracing utilities using "context". +package traceutil + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "time" + + "go.uber.org/zap" +) + +const ( + TraceKey = "trace" + StartTimeKey = "startTime" +) + +// Field is a kv pair to record additional details of the trace. +type Field struct { + Key string + Value interface{} +} + +func (f *Field) format() string { + return fmt.Sprintf("%s:%v; ", f.Key, f.Value) +} + +func writeFields(fields []Field) string { + if len(fields) == 0 { + return "" + } + var buf bytes.Buffer + buf.WriteString("{") + for _, f := range fields { + buf.WriteString(f.format()) + } + buf.WriteString("}") + return buf.String() +} + +type Trace struct { + operation string + lg *zap.Logger + fields []Field + startTime time.Time + steps []step + stepDisabled bool +} + +type step struct { + time time.Time + msg string + fields []Field +} + +func New(op string, lg *zap.Logger, fields ...Field) *Trace { + return &Trace{operation: op, lg: lg, startTime: time.Now(), fields: fields} +} + +// TODO returns a non-nil, empty Trace +func TODO() *Trace { + return &Trace{} +} + +func Get(ctx context.Context) *Trace { + if trace, ok := ctx.Value(TraceKey).(*Trace); ok && trace != nil { + return trace + } + return TODO() +} + +func (t *Trace) GetStartTime() time.Time { + return t.startTime +} + +func (t *Trace) SetStartTime(time time.Time) { + t.startTime = time +} + +func (t *Trace) InsertStep(at int, time time.Time, msg string, fields ...Field) { + newStep := step{time, msg, fields} + if at < len(t.steps) { + t.steps = append(t.steps[:at+1], t.steps[at:]...) + t.steps[at] = newStep + } else { + t.steps = append(t.steps, newStep) + } +} + +// Step adds step to trace +func (t *Trace) Step(msg string, fields ...Field) { + if !t.stepDisabled { + t.steps = append(t.steps, step{time: time.Now(), msg: msg, fields: fields}) + } +} + +// DisableStep sets the flag to prevent the trace from adding steps +func (t *Trace) DisableStep() { + t.stepDisabled = true +} + +// EnableStep re-enable the trace to add steps +func (t *Trace) EnableStep() { + t.stepDisabled = false +} + +func (t *Trace) AddField(fields ...Field) { + for _, f := range fields { + t.fields = append(t.fields, f) + } +} + +// Log dumps all steps in the Trace +func (t *Trace) Log() { + t.LogWithStepThreshold(0) +} + +// LogIfLong dumps logs if the duration is longer than threshold +func (t *Trace) LogIfLong(threshold time.Duration) { + if time.Since(t.startTime) > threshold { + stepThreshold := threshold / time.Duration(len(t.steps)+1) + t.LogWithStepThreshold(stepThreshold) + } +} + +// LogWithStepThreshold only dumps step whose duration is longer than step threshold +func (t *Trace) LogWithStepThreshold(threshold time.Duration) { + msg, fs := t.logInfo(threshold) + if t.lg != nil { + t.lg.Info(msg, fs...) + } +} + +func (t *Trace) logInfo(threshold time.Duration) (string, []zap.Field) { + endTime := time.Now() + totalDuration := endTime.Sub(t.startTime) + traceNum := rand.Int31() + msg := fmt.Sprintf("trace[%d] %s", traceNum, t.operation) + + var steps []string + lastStepTime := t.startTime + for _, step := range t.steps { + stepDuration := step.time.Sub(lastStepTime) + if stepDuration > threshold { + steps = append(steps, fmt.Sprintf("trace[%d] '%v' %s (duration: %v)", + traceNum, step.msg, writeFields(step.fields), stepDuration)) + } + lastStepTime = step.time + } + + fs := []zap.Field{zap.String("detail", writeFields(t.fields)), + zap.Duration("duration", totalDuration), + zap.Time("start", t.startTime), + zap.Time("end", endTime), + zap.Strings("steps", steps)} + return msg, fs +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/vendor/go.etcd.io/etcd/pkg/transport/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/doc.go rename to vendor/go.etcd.io/etcd/pkg/transport/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go rename to vendor/go.etcd.io/etcd/pkg/transport/keepalive_listener.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go rename to vendor/go.etcd.io/etcd/pkg/transport/limit_listen.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/go.etcd.io/etcd/pkg/transport/listener.go similarity index 53% rename from vendor/github.com/coreos/etcd/pkg/transport/listener.go rename to vendor/go.etcd.io/etcd/pkg/transport/listener.go index 60a2f7a33..80e35bda5 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ b/vendor/go.etcd.io/etcd/pkg/transport/listener.go @@ -31,14 +31,17 @@ import ( "strings" "time" - "github.com/coreos/etcd/pkg/tlsutil" + "go.etcd.io/etcd/pkg/tlsutil" + + "go.uber.org/zap" ) +// NewListener creates a new listner. func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { if l, err = newListener(addr, scheme); err != nil { return nil, err } - return wrapTLS(addr, scheme, tlsinfo, l) + return wrapTLS(scheme, tlsinfo, l) } func newListener(addr string, scheme string) (net.Listener, error) { @@ -49,7 +52,7 @@ func newListener(addr string, scheme string) (net.Listener, error) { return net.Listen("tcp", addr) } -func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { +func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { if scheme != "https" && scheme != "unixs" { return l, nil } @@ -60,14 +63,12 @@ func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listene } type TLSInfo struct { - CertFile string - KeyFile string - CAFile string // TODO: deprecate this in v4 - TrustedCAFile string - ClientCertAuth bool - CRLFile string - InsecureSkipVerify bool - + CertFile string + KeyFile string + TrustedCAFile string + ClientCertAuth bool + CRLFile string + InsecureSkipVerify bool SkipClientSANVerify bool // ServerName ensures the cert matches the given host in case of discovery / virtual hosting @@ -90,20 +91,33 @@ type TLSInfo struct { // AllowedCN is a CN which must be provided by a client. AllowedCN string + + // AllowedHostname is an IP address or hostname that must match the TLS + // certificate provided by a client. + AllowedHostname string + + // Logger logs TLS errors. + // If nil, all logs are discarded. + Logger *zap.Logger + + // EmptyCN indicates that the cert must have empty CN. + // If true, ClientConfig() will return an error for a cert with non empty CN. + EmptyCN bool } func (info TLSInfo) String() string { - return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) + return fmt.Sprintf("cert = %s, key = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) } func (info TLSInfo) Empty() bool { return info.CertFile == "" && info.KeyFile == "" } -func SelfCert(dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) { +func SelfCert(lg *zap.Logger, dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) { if err = os.MkdirAll(dirpath, 0700); err != nil { return } + info.Logger = lg certPath := filepath.Join(dirpath, "cert.pem") keyPath := filepath.Join(dirpath, "key.pem") @@ -119,6 +133,12 @@ func SelfCert(dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsa serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate random number", + zap.Error(err), + ) + } return } @@ -144,20 +164,40 @@ func SelfCert(dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsa priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate ECDSA key", + zap.Error(err), + ) + } return } derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate x509 certificate", + zap.Error(err), + ) + } return } certOut, err := os.Create(certPath) if err != nil { + info.Logger.Warn( + "cannot cert file", + zap.String("path", certPath), + zap.Error(err), + ) return } pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) certOut.Close() + if info.Logger != nil { + info.Logger.Info("created cert file", zap.String("path", certPath)) + } b, err := x509.MarshalECPrivateKey(priv) if err != nil { @@ -165,18 +205,50 @@ func SelfCert(dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsa } keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot key file", + zap.String("path", keyPath), + zap.Error(err), + ) + } return } pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) keyOut.Close() - - return SelfCert(dirpath, hosts) + if info.Logger != nil { + info.Logger.Info("created key file", zap.String("path", keyPath)) + } + return SelfCert(lg, dirpath, hosts) } +// baseConfig is called on initial TLS handshake start. +// +// Previously, +// 1. Server has non-empty (*tls.Config).Certificates on client hello +// 2. Server calls (*tls.Config).GetCertificate iff: +// - Server's (*tls.Config).Certificates is not empty, or +// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName +// +// When (*tls.Config).Certificates is always populated on initial handshake, +// client is expected to provide a valid matching SNI to pass the TLS +// verification, thus trigger server (*tls.Config).GetCertificate to reload +// TLS assets. However, a cert whose SAN field does not include domain names +// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus +// it was never able to trigger TLS reload on initial handshake; first +// ceritifcate object was being used, never being updated. +// +// Now, (*tls.Config).Certificates is created empty on initial TLS client +// handshake, in order to trigger (*tls.Config).GetCertificate and populate +// rest of the certificates on every new TLS connection, even when client +// SNI is empty (e.g. cert only includes IPs). func (info TLSInfo) baseConfig() (*tls.Config, error) { if info.KeyFile == "" || info.CertFile == "" { return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) } + if info.Logger == nil { + info.Logger = zap.NewNop() + } _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) if err != nil { @@ -192,26 +264,82 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { cfg.CipherSuites = info.CipherSuites } + // Client certificates may be verified by either an exact match on the CN, + // or a more general check of the CN and SANs. + var verifyCertificate func(*x509.Certificate) bool if info.AllowedCN != "" { + if info.AllowedHostname != "" { + return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname) + } + verifyCertificate = func(cert *x509.Certificate) bool { + return info.AllowedCN == cert.Subject.CommonName + } + } + if info.AllowedHostname != "" { + verifyCertificate = func(cert *x509.Certificate) bool { + return cert.VerifyHostname(info.AllowedHostname) == nil + } + } + if verifyCertificate != nil { cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { for _, chains := range verifiedChains { if len(chains) != 0 { - if info.AllowedCN == chains[0].Subject.CommonName { + if verifyCertificate(chains[0]) { return nil } } } - return errors.New("CommonName authentication failed") + return errors.New("client certificate authentication failed") } } // this only reloads certs when there's a client request // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching - cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find peer cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create peer certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err } - cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { - return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find client cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create client certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err } return cfg, nil } @@ -219,9 +347,6 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { // cafiles returns a list of CA file paths. func (info TLSInfo) cafiles() []string { cs := make([]string, 0) - if info.CAFile != "" { - cs = append(cs, info.CAFile) - } if info.TrustedCAFile != "" { cs = append(cs, info.TrustedCAFile) } @@ -236,13 +361,13 @@ func (info TLSInfo) ServerConfig() (*tls.Config, error) { } cfg.ClientAuth = tls.NoClientCert - if info.CAFile != "" || info.ClientCertAuth { + if info.TrustedCAFile != "" || info.ClientCertAuth { cfg.ClientAuth = tls.RequireAndVerifyClientCert } - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cp, err := tlsutil.NewCertPool(CAFiles) + cs := info.cafiles() + if len(cs) > 0 { + cp, err := tlsutil.NewCertPool(cs) if err != nil { return nil, err } @@ -270,9 +395,9 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { } cfg.InsecureSkipVerify = info.InsecureSkipVerify - CAFiles := info.cafiles() - if len(CAFiles) > 0 { - cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles) + cs := info.cafiles() + if len(cs) > 0 { + cfg.RootCAs, err = tlsutil.NewCertPool(cs) if err != nil { return nil, err } @@ -281,6 +406,28 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { if info.selfCert { cfg.InsecureSkipVerify = true } + + if info.EmptyCN { + hasNonEmptyCN := false + cn := "" + tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) { + var block *pem.Block + block, _ = pem.Decode(certPEMBlock) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return tls.Certificate{}, err + } + if len(cert.Subject.CommonName) != 0 { + hasNonEmptyCN = true + cn = cert.Subject.CommonName + } + return tls.X509KeyPair(certPEMBlock, keyPEMBlock) + }) + if hasNonEmptyCN { + return nil, fmt.Errorf("cert has non empty Common Name (%s)", cn) + } + } + return cfg, nil } diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go rename to vendor/go.etcd.io/etcd/pkg/transport/listener_tls.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go rename to vendor/go.etcd.io/etcd/pkg/transport/timeout_conn.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go rename to vendor/go.etcd.io/etcd/pkg/transport/timeout_dialer.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go similarity index 96% rename from vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go rename to vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go index b35e04955..273e99fe0 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ b/vendor/go.etcd.io/etcd/pkg/transport/timeout_listener.go @@ -32,7 +32,7 @@ func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd rdtimeoutd: rdtimeoutd, wtimeoutd: wtimeoutd, } - if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil { + if ln, err = wrapTLS(scheme, tlsinfo, ln); err != nil { return nil, err } return ln, nil diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go rename to vendor/go.etcd.io/etcd/pkg/transport/timeout_transport.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/vendor/go.etcd.io/etcd/pkg/transport/tls.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/tls.go rename to vendor/go.etcd.io/etcd/pkg/transport/tls.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/go.etcd.io/etcd/pkg/transport/transport.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/transport.go rename to vendor/go.etcd.io/etcd/pkg/transport/transport.go diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go rename to vendor/go.etcd.io/etcd/pkg/transport/unix_listener.go diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/go.etcd.io/etcd/pkg/types/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/types/doc.go rename to vendor/go.etcd.io/etcd/pkg/types/doc.go diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/go.etcd.io/etcd/pkg/types/id.go similarity index 98% rename from vendor/github.com/coreos/etcd/pkg/types/id.go rename to vendor/go.etcd.io/etcd/pkg/types/id.go index 1b042d9ce..ae00388dd 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/id.go +++ b/vendor/go.etcd.io/etcd/pkg/types/id.go @@ -14,9 +14,7 @@ package types -import ( - "strconv" -) +import "strconv" // ID represents a generic identifier which is canonically // stored as a uint64 but is typically represented as a diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/go.etcd.io/etcd/pkg/types/set.go similarity index 89% rename from vendor/github.com/coreos/etcd/pkg/types/set.go rename to vendor/go.etcd.io/etcd/pkg/types/set.go index c111b0c0c..e7a3cdc9a 100644 --- a/vendor/github.com/coreos/etcd/pkg/types/set.go +++ b/vendor/go.etcd.io/etcd/pkg/types/set.go @@ -148,6 +148,14 @@ func (ts *tsafeSet) Contains(value string) (exists bool) { func (ts *tsafeSet) Equals(other Set) bool { ts.m.RLock() defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Equals(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + return true + } + } return ts.us.Equals(other) } @@ -173,6 +181,15 @@ func (ts *tsafeSet) Copy() Set { func (ts *tsafeSet) Sub(other Set) Set { ts.m.RLock() defer ts.m.RUnlock() + + // If ts and other represent the same variable, avoid calling + // ts.us.Sub(other), to avoid double RLock bug + if _other, ok := other.(*tsafeSet); ok { + if _other == ts { + usResult := NewUnsafeSet() + return &tsafeSet{usResult, sync.RWMutex{}} + } + } usResult := ts.us.Sub(other).(*unsafeSet) return &tsafeSet{usResult, sync.RWMutex{}} } diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/go.etcd.io/etcd/pkg/types/slice.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/types/slice.go rename to vendor/go.etcd.io/etcd/pkg/types/slice.go diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/go.etcd.io/etcd/pkg/types/urls.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/types/urls.go rename to vendor/go.etcd.io/etcd/pkg/types/urls.go diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/types/urlsmap.go rename to vendor/go.etcd.io/etcd/pkg/types/urlsmap.go diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait.go b/vendor/go.etcd.io/etcd/pkg/wait/wait.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/wait/wait.go rename to vendor/go.etcd.io/etcd/pkg/wait/wait.go diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go b/vendor/go.etcd.io/etcd/pkg/wait/wait_time.go similarity index 100% rename from vendor/github.com/coreos/etcd/pkg/wait/wait_time.go rename to vendor/go.etcd.io/etcd/pkg/wait/wait_time.go diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go similarity index 98% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go rename to vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go index 33dc91f01..59dbe6b0e 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go +++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" grpc "google.golang.org/grpc" ) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/chan_stream.go similarity index 96% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go rename to vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/chan_stream.go index 82e341193..1af514b1f 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go +++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/chan_stream.go @@ -18,7 +18,9 @@ import ( "context" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) // chanServerStream implements grpc.ServerStream with a chanStream @@ -120,7 +122,7 @@ func (s *chanStream) RecvMsg(m interface{}) error { select { case msg, ok := <-s.recvc: if !ok { - return grpc.ErrClientConnClosing + return status.Error(codes.Canceled, "the client connection is closing") } if err, ok := msg.(error); ok { return err diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go similarity index 86% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go rename to vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go index 6c0340998..73a6fdfcb 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go +++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) @@ -43,3 +43,7 @@ func (s *cls2clc) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest, o func (s *cls2clc) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest, opts ...grpc.CallOption) (*pb.MemberRemoveResponse, error) { return s.cls.MemberRemove(ctx, r) } + +func (s *cls2clc) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest, opts ...grpc.CallOption) (*pb.MemberPromoteResponse, error) { + return s.cls.MemberPromote(ctx, r) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/doc.go similarity index 100% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go rename to vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/doc.go diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/election_client_adapter.go similarity index 97% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go rename to vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/election_client_adapter.go index a2ebf138f..4722be040 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go +++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/election_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb" "google.golang.org/grpc" ) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go similarity index 96% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go rename to vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go index acd5673d0..b1a782099 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go +++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" grpc "google.golang.org/grpc" ) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go similarity index 98% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go rename to vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go index 84c48b591..a58408f9f 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go +++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go similarity index 95% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go rename to vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go index 9ce7913a3..65b5641d3 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go +++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb" "google.golang.org/grpc" ) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go similarity index 98% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go rename to vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go index 92d9dfd20..4a8781b13 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go +++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go @@ -17,7 +17,7 @@ package adapter import ( "context" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go similarity index 97% rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go rename to vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go index afe61e837..2f629cc15 100644 --- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go +++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go @@ -18,7 +18,7 @@ import ( "context" "errors" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" "google.golang.org/grpc" ) diff --git a/vendor/go.etcd.io/etcd/raft/OWNERS b/vendor/go.etcd.io/etcd/raft/OWNERS new file mode 100644 index 000000000..ab781066e --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/OWNERS @@ -0,0 +1,19 @@ +approvers: +- heyitsanthony +- philips +- fanminshi +- gyuho +- mitake +- jpbetz +- xiang90 +- bdarnell +reviewers: +- heyitsanthony +- philips +- fanminshi +- gyuho +- mitake +- jpbetz +- xiang90 +- bdarnell +- tschottdorf diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/go.etcd.io/etcd/raft/README.md similarity index 91% rename from vendor/github.com/coreos/etcd/raft/README.md rename to vendor/go.etcd.io/etcd/raft/README.md index fde22b165..83cf04035 100644 --- a/vendor/github.com/coreos/etcd/raft/README.md +++ b/vendor/go.etcd.io/etcd/raft/README.md @@ -3,7 +3,7 @@ Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. The state machine is kept in sync through the use of a replicated log. For more details on Raft, see "In Search of an Understandable Consensus Algorithm" -(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. +(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout. This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more. @@ -13,7 +13,7 @@ To keep the codebase small as well as provide flexibility, the library only impl In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output. -A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample +A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/etcd-io/etcd/tree/master/contrib/raftexample # Features @@ -21,7 +21,7 @@ This raft implementation is a full feature implementation of Raft protocol. Feat - Leader election - Log replication -- Log compaction +- Log compaction - Membership changes - Leadership transfer extension - Efficient linearizable read-only queries served by both the leader and followers @@ -40,13 +40,14 @@ This raft implementation also includes a few optional enhancements: - Batching log entries to reduce disk synchronized I/O - Writing to leader's disk in parallel - Internal proposal redirection from followers to leader -- Automatic stepping down when the leader loses quorum +- Automatic stepping down when the leader loses quorum +- Protection against unbounded log growth when quorum is lost ## Notable Users - [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database - [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database -- [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store +- [etcd](https://github.com/etcd-io/etcd) A distributed reliable key-value store - [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft - [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. - [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks @@ -140,7 +141,7 @@ The total state machine handling loop will look something like this: case <-s.Ticker: n.Tick() case rd := <-s.Node.Ready(): - saveToStorage(rd.State, rd.Entries, rd.Snapshot) + saveToStorage(rd.HardState, rd.Entries, rd.Snapshot) send(rd.Messages) if !raft.IsEmptySnap(rd.Snapshot) { processSnapshot(rd.Snapshot) @@ -166,7 +167,7 @@ To propose changes to the state machine from the node to take application data, n.Propose(ctx, data) ``` -If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. +If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. To add or remove node in a cluster, build ConfChange struct 'cc' and call: @@ -189,7 +190,7 @@ may be reused. Node IDs must be non-zero. ## Implementation notes -This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. +This implementation is up to date with the final Raft thesis (https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log. diff --git a/vendor/go.etcd.io/etcd/raft/bootstrap.go b/vendor/go.etcd.io/etcd/raft/bootstrap.go new file mode 100644 index 000000000..bd82b2041 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/bootstrap.go @@ -0,0 +1,80 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "errors" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +// Bootstrap initializes the RawNode for first use by appending configuration +// changes for the supplied peers. This method returns an error if the Storage +// is nonempty. +// +// It is recommended that instead of calling this method, applications bootstrap +// their state manually by setting up a Storage that has a first index > 1 and +// which stores the desired ConfState as its InitialState. +func (rn *RawNode) Bootstrap(peers []Peer) error { + if len(peers) == 0 { + return errors.New("must provide at least one peer to Bootstrap") + } + lastIndex, err := rn.raft.raftLog.storage.LastIndex() + if err != nil { + return err + } + + if lastIndex != 0 { + return errors.New("can't bootstrap a nonempty Storage") + } + + // We've faked out initial entries above, but nothing has been + // persisted. Start with an empty HardState (thus the first Ready will + // emit a HardState update for the app to persist). + rn.prevHardSt = emptyState + + // TODO(tbg): remove StartNode and give the application the right tools to + // bootstrap the initial membership in a cleaner way. + rn.raft.becomeFollower(1, None) + ents := make([]pb.Entry, len(peers)) + for i, peer := range peers { + cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} + data, err := cc.Marshal() + if err != nil { + return err + } + + ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} + } + rn.raft.raftLog.append(ents...) + + // Now apply them, mainly so that the application can call Campaign + // immediately after StartNode in tests. Note that these nodes will + // be added to raft twice: here and when the application's Ready + // loop calls ApplyConfChange. The calls to addNode must come after + // all calls to raftLog.append so progress.next is set after these + // bootstrapping entries (it is an error if we try to append these + // entries since they have already been committed). + // We do not set raftLog.applied so the application will be able + // to observe all conf changes via Ready.CommittedEntries. + // + // TODO(bdarnell): These entries are still unstable; do we need to preserve + // the invariant that committed < unstable? + rn.raft.raftLog.committed = uint64(len(ents)) + for _, peer := range peers { + rn.raft.applyConfChange(pb.ConfChange{NodeID: peer.ID, Type: pb.ConfChangeAddNode}.AsV2()) + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/raft/confchange/confchange.go b/vendor/go.etcd.io/etcd/raft/confchange/confchange.go new file mode 100644 index 000000000..a0dc486df --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/confchange/confchange.go @@ -0,0 +1,425 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confchange + +import ( + "errors" + "fmt" + "strings" + + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// Changer facilitates configuration changes. It exposes methods to handle +// simple and joint consensus while performing the proper validation that allows +// refusing invalid configuration changes before they affect the active +// configuration. +type Changer struct { + Tracker tracker.ProgressTracker + LastIndex uint64 +} + +// EnterJoint verifies that the outgoing (=right) majority config of the joint +// config is empty and initializes it with a copy of the incoming (=left) +// majority config. That is, it transitions from +// +// (1 2 3)&&() +// to +// (1 2 3)&&(1 2 3). +// +// The supplied changes are then applied to the incoming majority config, +// resulting in a joint configuration that in terms of the Raft thesis[1] +// (Section 4.3) corresponds to `C_{new,old}`. +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if joint(cfg) { + err := errors.New("config is already joint") + return c.err(err) + } + if len(incoming(cfg.Voters)) == 0 { + // We allow adding nodes to an empty config for convenience (testing and + // bootstrap), but you can't enter a joint state. + err := errors.New("can't make a zero-voter config joint") + return c.err(err) + } + // Clear the outgoing config. + *outgoingPtr(&cfg.Voters) = quorum.MajorityConfig{} + // Copy incoming to outgoing. + for id := range incoming(cfg.Voters) { + outgoing(cfg.Voters)[id] = struct{}{} + } + + if err := c.apply(&cfg, prs, ccs...); err != nil { + return c.err(err) + } + cfg.AutoLeave = autoLeave + return checkAndReturn(cfg, prs) +} + +// LeaveJoint transitions out of a joint configuration. It is an error to call +// this method if the configuration is not joint, i.e. if the outgoing majority +// config Voters[1] is empty. +// +// The outgoing majority config of the joint configuration will be removed, +// that is, the incoming config is promoted as the sole decision maker. In the +// notation of the Raft thesis[1] (Section 4.3), this method transitions from +// `C_{new,old}` into `C_new`. +// +// At the same time, any staged learners (LearnersNext) the addition of which +// was held back by an overlapping voter in the former outgoing config will be +// inserted into Learners. +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if !joint(cfg) { + err := errors.New("can't leave a non-joint config") + return c.err(err) + } + if len(outgoing(cfg.Voters)) == 0 { + err := fmt.Errorf("configuration is not joint: %v", cfg) + return c.err(err) + } + for id := range cfg.LearnersNext { + nilAwareAdd(&cfg.Learners, id) + prs[id].IsLearner = true + } + cfg.LearnersNext = nil + + for id := range outgoing(cfg.Voters) { + _, isVoter := incoming(cfg.Voters)[id] + _, isLearner := cfg.Learners[id] + + if !isVoter && !isLearner { + delete(prs, id) + } + } + *outgoingPtr(&cfg.Voters) = nil + cfg.AutoLeave = false + + return checkAndReturn(cfg, prs) +} + +// Simple carries out a series of configuration changes that (in aggregate) +// mutates the incoming majority config Voters[0] by at most one. This method +// will return an error if that is not the case, if the resulting quorum is +// zero, or if the configuration is in a joint state (i.e. if there is an +// outgoing configuration). +func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) { + cfg, prs, err := c.checkAndCopy() + if err != nil { + return c.err(err) + } + if joint(cfg) { + err := errors.New("can't apply simple config change in joint config") + return c.err(err) + } + if err := c.apply(&cfg, prs, ccs...); err != nil { + return c.err(err) + } + if n := symdiff(incoming(c.Tracker.Voters), incoming(cfg.Voters)); n > 1 { + return tracker.Config{}, nil, errors.New("more than one voter changed without entering joint config") + } + if err := checkInvariants(cfg, prs); err != nil { + return tracker.Config{}, tracker.ProgressMap{}, nil + } + + return checkAndReturn(cfg, prs) +} + +// apply a change to the configuration. By convention, changes to voters are +// always made to the incoming majority config Voters[0]. Voters[1] is either +// empty or preserves the outgoing majority configuration while in a joint state. +func (c Changer) apply(cfg *tracker.Config, prs tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error { + for _, cc := range ccs { + if cc.NodeID == 0 { + // etcd replaces the NodeID with zero if it decides (downstream of + // raft) to not apply a change, so we have to have explicit code + // here to ignore these. + continue + } + switch cc.Type { + case pb.ConfChangeAddNode: + c.makeVoter(cfg, prs, cc.NodeID) + case pb.ConfChangeAddLearnerNode: + c.makeLearner(cfg, prs, cc.NodeID) + case pb.ConfChangeRemoveNode: + c.remove(cfg, prs, cc.NodeID) + case pb.ConfChangeUpdateNode: + default: + return fmt.Errorf("unexpected conf type %d", cc.Type) + } + } + if len(incoming(cfg.Voters)) == 0 { + return errors.New("removed all voters") + } + return nil +} + +// makeVoter adds or promotes the given ID to be a voter in the incoming +// majority config. +func (c Changer) makeVoter(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + pr := prs[id] + if pr == nil { + c.initProgress(cfg, prs, id, false /* isLearner */) + return + } + + pr.IsLearner = false + nilAwareDelete(&cfg.Learners, id) + nilAwareDelete(&cfg.LearnersNext, id) + incoming(cfg.Voters)[id] = struct{}{} + return +} + +// makeLearner makes the given ID a learner or stages it to be a learner once +// an active joint configuration is exited. +// +// The former happens when the peer is not a part of the outgoing config, in +// which case we either add a new learner or demote a voter in the incoming +// config. +// +// The latter case occurs when the configuration is joint and the peer is a +// voter in the outgoing config. In that case, we do not want to add the peer +// as a learner because then we'd have to track a peer as a voter and learner +// simultaneously. Instead, we add the learner to LearnersNext, so that it will +// be added to Learners the moment the outgoing config is removed by +// LeaveJoint(). +func (c Changer) makeLearner(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + pr := prs[id] + if pr == nil { + c.initProgress(cfg, prs, id, true /* isLearner */) + return + } + if pr.IsLearner { + return + } + // Remove any existing voter in the incoming config... + c.remove(cfg, prs, id) + // ... but save the Progress. + prs[id] = pr + // Use LearnersNext if we can't add the learner to Learners directly, i.e. + // if the peer is still tracked as a voter in the outgoing config. It will + // be turned into a learner in LeaveJoint(). + // + // Otherwise, add a regular learner right away. + if _, onRight := outgoing(cfg.Voters)[id]; onRight { + nilAwareAdd(&cfg.LearnersNext, id) + } else { + pr.IsLearner = true + nilAwareAdd(&cfg.Learners, id) + } +} + +// remove this peer as a voter or learner from the incoming config. +func (c Changer) remove(cfg *tracker.Config, prs tracker.ProgressMap, id uint64) { + if _, ok := prs[id]; !ok { + return + } + + delete(incoming(cfg.Voters), id) + nilAwareDelete(&cfg.Learners, id) + nilAwareDelete(&cfg.LearnersNext, id) + + // If the peer is still a voter in the outgoing config, keep the Progress. + if _, onRight := outgoing(cfg.Voters)[id]; !onRight { + delete(prs, id) + } +} + +// initProgress initializes a new progress for the given node or learner. +func (c Changer) initProgress(cfg *tracker.Config, prs tracker.ProgressMap, id uint64, isLearner bool) { + if !isLearner { + incoming(cfg.Voters)[id] = struct{}{} + } else { + nilAwareAdd(&cfg.Learners, id) + } + prs[id] = &tracker.Progress{ + // Initializing the Progress with the last index means that the follower + // can be probed (with the last index). + // + // TODO(tbg): seems awfully optimistic. Using the first index would be + // better. The general expectation here is that the follower has no log + // at all (and will thus likely need a snapshot), though the app may + // have applied a snapshot out of band before adding the replica (thus + // making the first index the better choice). + Next: c.LastIndex, + Match: 0, + Inflights: tracker.NewInflights(c.Tracker.MaxInflight), + IsLearner: isLearner, + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has had a chance to communicate with us. + RecentActive: true, + } +} + +// checkInvariants makes sure that the config and progress are compatible with +// each other. This is used to check both what the Changer is initialized with, +// as well as what it returns. +func checkInvariants(cfg tracker.Config, prs tracker.ProgressMap) error { + // NB: intentionally allow the empty config. In production we'll never see a + // non-empty config (we prevent it from being created) but we will need to + // be able to *create* an initial config, for example during bootstrap (or + // during tests). Instead of having to hand-code this, we allow + // transitioning from an empty config into any other legal and non-empty + // config. + for _, ids := range []map[uint64]struct{}{ + cfg.Voters.IDs(), + cfg.Learners, + cfg.LearnersNext, + } { + for id := range ids { + if _, ok := prs[id]; !ok { + return fmt.Errorf("no progress for %d", id) + } + } + } + + // Any staged learner was staged because it could not be directly added due + // to a conflicting voter in the outgoing config. + for id := range cfg.LearnersNext { + if _, ok := outgoing(cfg.Voters)[id]; !ok { + return fmt.Errorf("%d is in LearnersNext, but not Voters[1]", id) + } + if prs[id].IsLearner { + return fmt.Errorf("%d is in LearnersNext, but is already marked as learner", id) + } + } + // Conversely Learners and Voters doesn't intersect at all. + for id := range cfg.Learners { + if _, ok := outgoing(cfg.Voters)[id]; ok { + return fmt.Errorf("%d is in Learners and Voters[1]", id) + } + if _, ok := incoming(cfg.Voters)[id]; ok { + return fmt.Errorf("%d is in Learners and Voters[0]", id) + } + if !prs[id].IsLearner { + return fmt.Errorf("%d is in Learners, but is not marked as learner", id) + } + } + + if !joint(cfg) { + // We enforce that empty maps are nil instead of zero. + if outgoing(cfg.Voters) != nil { + return fmt.Errorf("Voters[1] must be nil when not joint") + } + if cfg.LearnersNext != nil { + return fmt.Errorf("LearnersNext must be nil when not joint") + } + if cfg.AutoLeave { + return fmt.Errorf("AutoLeave must be false when not joint") + } + } + + return nil +} + +// checkAndCopy copies the tracker's config and progress map (deeply enough for +// the purposes of the Changer) and returns those copies. It returns an error +// if checkInvariants does. +func (c Changer) checkAndCopy() (tracker.Config, tracker.ProgressMap, error) { + cfg := c.Tracker.Config.Clone() + prs := tracker.ProgressMap{} + + for id, pr := range c.Tracker.Progress { + // A shallow copy is enough because we only mutate the Learner field. + ppr := *pr + prs[id] = &ppr + } + return checkAndReturn(cfg, prs) +} + +// checkAndReturn calls checkInvariants on the input and returns either the +// resulting error or the input. +func checkAndReturn(cfg tracker.Config, prs tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) { + if err := checkInvariants(cfg, prs); err != nil { + return tracker.Config{}, tracker.ProgressMap{}, err + } + return cfg, prs, nil +} + +// err returns zero values and an error. +func (c Changer) err(err error) (tracker.Config, tracker.ProgressMap, error) { + return tracker.Config{}, nil, err +} + +// nilAwareAdd populates a map entry, creating the map if necessary. +func nilAwareAdd(m *map[uint64]struct{}, id uint64) { + if *m == nil { + *m = map[uint64]struct{}{} + } + (*m)[id] = struct{}{} +} + +// nilAwareDelete deletes from a map, nil'ing the map itself if it is empty after. +func nilAwareDelete(m *map[uint64]struct{}, id uint64) { + if *m == nil { + return + } + delete(*m, id) + if len(*m) == 0 { + *m = nil + } +} + +// symdiff returns the count of the symmetric difference between the sets of +// uint64s, i.e. len( (l - r) \union (r - l)). +func symdiff(l, r map[uint64]struct{}) int { + var n int + pairs := [][2]quorum.MajorityConfig{ + {l, r}, // count elems in l but not in r + {r, l}, // count elems in r but not in l + } + for _, p := range pairs { + for id := range p[0] { + if _, ok := p[1][id]; !ok { + n++ + } + } + } + return n +} + +func joint(cfg tracker.Config) bool { + return len(outgoing(cfg.Voters)) > 0 +} + +func incoming(voters quorum.JointConfig) quorum.MajorityConfig { return voters[0] } +func outgoing(voters quorum.JointConfig) quorum.MajorityConfig { return voters[1] } +func outgoingPtr(voters *quorum.JointConfig) *quorum.MajorityConfig { return &voters[1] } + +// Describe prints the type and NodeID of the configuration changes as a +// space-delimited string. +func Describe(ccs ...pb.ConfChangeSingle) string { + var buf strings.Builder + for _, cc := range ccs { + if buf.Len() > 0 { + buf.WriteByte(' ') + } + fmt.Fprintf(&buf, "%s(%d)", cc.Type, cc.NodeID) + } + return buf.String() +} diff --git a/vendor/go.etcd.io/etcd/raft/confchange/restore.go b/vendor/go.etcd.io/etcd/raft/confchange/restore.go new file mode 100644 index 000000000..724068da0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/confchange/restore.go @@ -0,0 +1,155 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package confchange + +import ( + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" +) + +// toConfChangeSingle translates a conf state into 1) a slice of operations creating +// first the config that will become the outgoing one, and then the incoming one, and +// b) another slice that, when applied to the config resulted from 1), represents the +// ConfState. +func toConfChangeSingle(cs pb.ConfState) (out []pb.ConfChangeSingle, in []pb.ConfChangeSingle) { + // Example to follow along this code: + // voters=(1 2 3) learners=(5) outgoing=(1 2 4 6) learners_next=(4) + // + // This means that before entering the joint config, the configuration + // had voters (1 2 4) and perhaps some learners that are already gone. + // The new set of voters is (1 2 3), i.e. (1 2) were kept around, and (4 6) + // are no longer voters; however 4 is poised to become a learner upon leaving + // the joint state. + // We can't tell whether 5 was a learner before entering the joint config, + // but it doesn't matter (we'll pretend that it wasn't). + // + // The code below will construct + // outgoing = add 1; add 2; add 4; add 6 + // incoming = remove 1; remove 2; remove 4; remove 6 + // add 1; add 2; add 3; + // add-learner 5; + // add-learner 4; + // + // So, when starting with an empty config, after applying 'outgoing' we have + // + // quorum=(1 2 4 6) + // + // From which we enter a joint state via 'incoming' + // + // quorum=(1 2 3)&&(1 2 4 6) learners=(5) learners_next=(4) + // + // as desired. + + for _, id := range cs.VotersOutgoing { + // If there are outgoing voters, first add them one by one so that the + // (non-joint) config has them all. + out = append(out, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddNode, + NodeID: id, + }) + + } + + // We're done constructing the outgoing slice, now on to the incoming one + // (which will apply on top of the config created by the outgoing slice). + + // First, we'll remove all of the outgoing voters. + for _, id := range cs.VotersOutgoing { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeRemoveNode, + NodeID: id, + }) + } + // Then we'll add the incoming voters and learners. + for _, id := range cs.Voters { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddNode, + NodeID: id, + }) + } + for _, id := range cs.Learners { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddLearnerNode, + NodeID: id, + }) + } + // Same for LearnersNext; these are nodes we want to be learners but which + // are currently voters in the outgoing config. + for _, id := range cs.LearnersNext { + in = append(in, pb.ConfChangeSingle{ + Type: pb.ConfChangeAddLearnerNode, + NodeID: id, + }) + } + return out, in +} + +func chain(chg Changer, ops ...func(Changer) (tracker.Config, tracker.ProgressMap, error)) (tracker.Config, tracker.ProgressMap, error) { + for _, op := range ops { + cfg, prs, err := op(chg) + if err != nil { + return tracker.Config{}, nil, err + } + chg.Tracker.Config = cfg + chg.Tracker.Progress = prs + } + return chg.Tracker.Config, chg.Tracker.Progress, nil +} + +// Restore takes a Changer (which must represent an empty configuration), and +// runs a sequence of changes enacting the configuration described in the +// ConfState. +// +// TODO(tbg) it's silly that this takes a Changer. Unravel this by making sure +// the Changer only needs a ProgressMap (not a whole Tracker) at which point +// this can just take LastIndex and MaxInflight directly instead and cook up +// the results from that alone. +func Restore(chg Changer, cs pb.ConfState) (tracker.Config, tracker.ProgressMap, error) { + outgoing, incoming := toConfChangeSingle(cs) + + var ops []func(Changer) (tracker.Config, tracker.ProgressMap, error) + + if len(outgoing) == 0 { + // No outgoing config, so just apply the incoming changes one by one. + for _, cc := range incoming { + cc := cc // loop-local copy + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.Simple(cc) + }) + } + } else { + // The ConfState describes a joint configuration. + // + // First, apply all of the changes of the outgoing config one by one, so + // that it temporarily becomes the incoming active config. For example, + // if the config is (1 2 3)&(2 3 4), this will establish (2 3 4)&(). + for _, cc := range outgoing { + cc := cc // loop-local copy + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.Simple(cc) + }) + } + // Now enter the joint state, which rotates the above additions into the + // outgoing config, and adds the incoming config in. Continuing the + // example above, we'd get (1 2 3)&(2 3 4), i.e. the incoming operations + // would be removing 2,3,4 and then adding in 1,2,3 while transitioning + // into a joint state. + ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) { + return chg.EnterJoint(cs.AutoLeave, incoming...) + }) + } + + return chain(chg, ops...) +} diff --git a/vendor/github.com/coreos/etcd/raft/design.md b/vendor/go.etcd.io/etcd/raft/design.md similarity index 100% rename from vendor/github.com/coreos/etcd/raft/design.md rename to vendor/go.etcd.io/etcd/raft/design.md diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/go.etcd.io/etcd/raft/doc.go similarity index 96% rename from vendor/github.com/coreos/etcd/raft/doc.go rename to vendor/go.etcd.io/etcd/raft/doc.go index b55c591ff..68fe6f0a6 100644 --- a/vendor/github.com/coreos/etcd/raft/doc.go +++ b/vendor/go.etcd.io/etcd/raft/doc.go @@ -19,11 +19,11 @@ defined in the raftpb package. Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. The state machine is kept in sync through the use of a replicated log. For more details on Raft, see "In Search of an Understandable Consensus Algorithm" -(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. +(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout. A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: -https://github.com/coreos/etcd/tree/master/contrib/raftexample +https://github.com/etcd-io/etcd/tree/master/contrib/raftexample Usage @@ -87,7 +87,7 @@ large). Note: Marshalling messages is not thread-safe; it is important that you make sure that no new entries are persisted while marshalling. -The easiest way to achieve this is to serialise the messages directly inside +The easiest way to achieve this is to serialize the messages directly inside your main raft loop. 3. Apply Snapshot (if any) and CommittedEntries to the state machine. @@ -153,7 +153,7 @@ If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; you may have to re-propose after a timeout. -To add or remove node in a cluster, build ConfChange struct 'cc' and call: +To add or remove a node in a cluster, build ConfChange struct 'cc' and call: n.ProposeConfChange(ctx, cc) @@ -172,7 +172,7 @@ may be reused. Node IDs must be non-zero. Implementation notes This implementation is up to date with the final Raft thesis -(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our +(https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the @@ -260,7 +260,7 @@ stale log entries: 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election protocol. When Config.PreVote is true, a pre-election is carried out first (using the same rules as a regular election), and no node increases its term - number unless the pre-election indicates that the campaigining node would win. + number unless the pre-election indicates that the campaigning node would win. This minimizes disruption when a partitioned node rejoins the cluster. 'MsgSnap' requests to install a snapshot message. When a node has just diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/go.etcd.io/etcd/raft/log.go similarity index 92% rename from vendor/github.com/coreos/etcd/raft/log.go rename to vendor/go.etcd.io/etcd/raft/log.go index c3036d3c9..77eedfccb 100644 --- a/vendor/github.com/coreos/etcd/raft/log.go +++ b/vendor/go.etcd.io/etcd/raft/log.go @@ -18,7 +18,7 @@ import ( "fmt" "log" - pb "github.com/coreos/etcd/raft/raftpb" + pb "go.etcd.io/etcd/raft/raftpb" ) type raftLog struct { @@ -38,17 +38,29 @@ type raftLog struct { applied uint64 logger Logger + + // maxNextEntsSize is the maximum number aggregate byte size of the messages + // returned from calls to nextEnts. + maxNextEntsSize uint64 } -// newLog returns log using the given storage. It recovers the log to the state -// that it just commits and applies the latest snapshot. +// newLog returns log using the given storage and default options. It +// recovers the log to the state that it just commits and applies the +// latest snapshot. func newLog(storage Storage, logger Logger) *raftLog { + return newLogWithSize(storage, logger, noLimit) +} + +// newLogWithSize returns a log using the given storage and max +// message size. +func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog { if storage == nil { log.Panic("storage must not be nil") } log := &raftLog{ - storage: storage, - logger: logger, + storage: storage, + logger: logger, + maxNextEntsSize: maxNextEntsSize, } firstIndex, err := storage.FirstIndex() if err != nil { @@ -139,7 +151,7 @@ func (l *raftLog) unstableEntries() []pb.Entry { func (l *raftLog) nextEnts() (ents []pb.Entry) { off := max(l.applied+1, l.firstIndex()) if l.committed+1 > off { - ents, err := l.slice(off, l.committed+1, noLimit) + ents, err := l.slice(off, l.committed+1, l.maxNextEntsSize) if err != nil { l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) } @@ -320,8 +332,10 @@ func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { if hi > l.unstable.offset { unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) if len(ents) > 0 { - ents = append([]pb.Entry{}, ents...) - ents = append(ents, unstable...) + combined := make([]pb.Entry, len(ents)+len(unstable)) + n := copy(combined, ents) + copy(combined[n:], unstable) + ents = combined } else { ents = unstable } diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/go.etcd.io/etcd/raft/log_unstable.go similarity index 96% rename from vendor/github.com/coreos/etcd/raft/log_unstable.go rename to vendor/go.etcd.io/etcd/raft/log_unstable.go index 263af9ce4..1bff5a7bd 100644 --- a/vendor/github.com/coreos/etcd/raft/log_unstable.go +++ b/vendor/go.etcd.io/etcd/raft/log_unstable.go @@ -14,7 +14,7 @@ package raft -import pb "github.com/coreos/etcd/raft/raftpb" +import pb "go.etcd.io/etcd/raft/raftpb" // unstable.entries[i] has raft log position i+unstable.offset. // Note that unstable.offset may be less than the highest log @@ -55,10 +55,7 @@ func (u *unstable) maybeLastIndex() (uint64, bool) { // is any. func (u *unstable) maybeTerm(i uint64) (uint64, bool) { if i < u.offset { - if u.snapshot == nil { - return 0, false - } - if u.snapshot.Metadata.Index == i { + if u.snapshot != nil && u.snapshot.Metadata.Index == i { return u.snapshot.Metadata.Term, true } return 0, false @@ -71,6 +68,7 @@ func (u *unstable) maybeTerm(i uint64) (uint64, bool) { if i > last { return 0, false } + return u.entries[i-u.offset].Term, true } @@ -147,7 +145,7 @@ func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { return u.entries[lo-u.offset : hi-u.offset] } -// u.offset <= lo <= hi <= u.offset+len(u.offset) +// u.offset <= lo <= hi <= u.offset+len(u.entries) func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { if lo > hi { u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/go.etcd.io/etcd/raft/logger.go similarity index 96% rename from vendor/github.com/coreos/etcd/raft/logger.go rename to vendor/go.etcd.io/etcd/raft/logger.go index 426a77d34..6d8962965 100644 --- a/vendor/github.com/coreos/etcd/raft/logger.go +++ b/vendor/go.etcd.io/etcd/raft/logger.go @@ -19,6 +19,7 @@ import ( "io/ioutil" "log" "os" + "sync" ) type Logger interface { @@ -41,11 +42,16 @@ type Logger interface { Panicf(format string, v ...interface{}) } -func SetLogger(l Logger) { raftLogger = l } +func SetLogger(l Logger) { + raftLoggerMu.Lock() + raftLogger = l + raftLoggerMu.Unlock() +} var ( defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} + raftLoggerMu sync.Mutex raftLogger = Logger(defaultLogger) ) diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/go.etcd.io/etcd/raft/node.go similarity index 65% rename from vendor/github.com/coreos/etcd/raft/node.go rename to vendor/go.etcd.io/etcd/raft/node.go index 33a9db840..ab6185b99 100644 --- a/vendor/github.com/coreos/etcd/raft/node.go +++ b/vendor/go.etcd.io/etcd/raft/node.go @@ -18,7 +18,7 @@ import ( "context" "errors" - pb "github.com/coreos/etcd/raft/raftpb" + pb "go.etcd.io/etcd/raft/raftpb" ) type SnapshotStatus int @@ -109,6 +109,19 @@ func (rd Ready) containsUpdates() bool { len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0 } +// appliedCursor extracts from the Ready the highest index the client has +// applied (once the Ready is confirmed via Advance). If no information is +// contained in the Ready, returns zero. +func (rd Ready) appliedCursor() uint64 { + if n := len(rd.CommittedEntries); n > 0 { + return rd.CommittedEntries[n-1].Index + } + if index := rd.Snapshot.Metadata.Index; index > 0 { + return index + } + return 0 +} + // Node represents a node in a raft cluster. type Node interface { // Tick increments the internal logical clock for the Node by a single tick. Election @@ -116,12 +129,23 @@ type Node interface { Tick() // Campaign causes the Node to transition to candidate state and start campaigning to become leader. Campaign(ctx context.Context) error - // Propose proposes that data be appended to the log. + // Propose proposes that data be appended to the log. Note that proposals can be lost without + // notice, therefore it is user's job to ensure proposal retries. Propose(ctx context.Context, data []byte) error - // ProposeConfChange proposes config change. - // At most one ConfChange can be in the process of going through consensus. - // Application needs to call ApplyConfChange when applying EntryConfChange type entry. - ProposeConfChange(ctx context.Context, cc pb.ConfChange) error + // ProposeConfChange proposes a configuration change. Like any proposal, the + // configuration change may be dropped with or without an error being + // returned. In particular, configuration changes are dropped unless the + // leader has certainty that there is no prior unapplied configuration + // change in its log. + // + // The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2 + // message. The latter allows arbitrary configuration changes via joint + // consensus, notably including replacing a voter. Passing a ConfChangeV2 + // message is only allowed if all Nodes participating in the cluster run a + // version of this library aware of the V2 API. See pb.ConfChangeV2 for + // usage details and semantics. + ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error + // Step advances the state machine using the given message. ctx.Err() will be returned, if any. Step(ctx context.Context, msg pb.Message) error @@ -142,11 +166,13 @@ type Node interface { // a long time to apply the snapshot data. To continue receiving Ready without blocking raft // progress, it can call Advance before finishing applying the last ready. Advance() - // ApplyConfChange applies config change to the local node. - // Returns an opaque ConfState protobuf which must be recorded - // in snapshots. Will never return nil; it returns a pointer only - // to match MemoryStorage.Compact. - ApplyConfChange(cc pb.ConfChange) *pb.ConfState + // ApplyConfChange applies a config change (previously passed to + // ProposeConfChange) to the node. This must be called whenever a config + // change is observed in Ready.CommittedEntries. + // + // Returns an opaque non-nil ConfState protobuf which must be recorded in + // snapshots. + ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState // TransferLeadership attempts to transfer leadership to the given transferee. TransferLeadership(ctx context.Context, lead, transferee uint64) @@ -161,7 +187,16 @@ type Node interface { Status() Status // ReportUnreachable reports the given node is not reachable for the last send. ReportUnreachable(id uint64) - // ReportSnapshot reports the status of the sent snapshot. + // ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower + // who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure. + // Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a + // snapshot (for e.g., while streaming it from leader to follower), should be reported to the + // leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft + // log probes until the follower can apply the snapshot and advance its state. If the follower + // can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any + // updates from the leader. Therefore, it is crucial that the application ensures that any + // failure in snapshot sending is caught and reported back to the leader; so it can resume raft + // log probing in the follower. ReportSnapshot(id uint64, status SnapshotStatus) // Stop performs any necessary termination of the Node. Stop() @@ -174,40 +209,21 @@ type Peer struct { // StartNode returns a new Node given configuration and a list of raft peers. // It appends a ConfChangeAddNode entry for each given peer to the initial log. +// +// Peers must not be zero length; call RestartNode in that case. func StartNode(c *Config, peers []Peer) Node { - r := newRaft(c) - // become the follower at term 1 and apply initial configuration - // entries of term 1 - r.becomeFollower(1, None) - for _, peer := range peers { - cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} - d, err := cc.Marshal() - if err != nil { - panic("unexpected marshal error") - } - e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d} - r.raftLog.append(e) + if len(peers) == 0 { + panic("no peers given; use RestartNode instead") } - // Mark these initial entries as committed. - // TODO(bdarnell): These entries are still unstable; do we need to preserve - // the invariant that committed < unstable? - r.raftLog.committed = r.raftLog.lastIndex() - // Now apply them, mainly so that the application can call Campaign - // immediately after StartNode in tests. Note that these nodes will - // be added to raft twice: here and when the application's Ready - // loop calls ApplyConfChange. The calls to addNode must come after - // all calls to raftLog.append so progress.next is set after these - // bootstrapping entries (it is an error if we try to append these - // entries since they have already been committed). - // We do not set raftLog.applied so the application will be able - // to observe all conf changes via Ready.CommittedEntries. - for _, peer := range peers { - r.addNode(peer.ID) + rn, err := NewRawNode(c) + if err != nil { + panic(err) } + rn.Bootstrap(peers) - n := newNode() - n.logger = c.Logger - go n.run(r) + n := newNode(rn) + + go n.run() return &n } @@ -216,19 +232,25 @@ func StartNode(c *Config, peers []Peer) Node { // If the caller has an existing state machine, pass in the last log index that // has been applied to it; otherwise use zero. func RestartNode(c *Config) Node { - r := newRaft(c) - - n := newNode() - n.logger = c.Logger - go n.run(r) + rn, err := NewRawNode(c) + if err != nil { + panic(err) + } + n := newNode(rn) + go n.run() return &n } +type msgWithResult struct { + m pb.Message + result chan error +} + // node is the canonical implementation of the Node interface type node struct { - propc chan pb.Message + propc chan msgWithResult recvc chan pb.Message - confc chan pb.ConfChange + confc chan pb.ConfChangeV2 confstatec chan pb.ConfState readyc chan Ready advancec chan struct{} @@ -237,14 +259,14 @@ type node struct { stop chan struct{} status chan chan Status - logger Logger + rn *RawNode } -func newNode() node { +func newNode(rn *RawNode) node { return node{ - propc: make(chan pb.Message), + propc: make(chan msgWithResult), recvc: make(chan pb.Message), - confc: make(chan pb.ConfChange), + confc: make(chan pb.ConfChangeV2), confstatec: make(chan pb.ConfState), readyc: make(chan Ready), advancec: make(chan struct{}), @@ -255,6 +277,7 @@ func newNode() node { done: make(chan struct{}), stop: make(chan struct{}), status: make(chan chan Status), + rn: rn, } } @@ -270,29 +293,30 @@ func (n *node) Stop() { <-n.done } -func (n *node) run(r *raft) { - var propc chan pb.Message +func (n *node) run() { + var propc chan msgWithResult var readyc chan Ready var advancec chan struct{} - var prevLastUnstablei, prevLastUnstablet uint64 - var havePrevLastUnstablei bool - var prevSnapi uint64 var rd Ready + r := n.rn.raft + lead := None - prevSoftSt := r.softState() - prevHardSt := emptyState for { if advancec != nil { readyc = nil - } else { - rd = newReady(r, prevSoftSt, prevHardSt) - if rd.containsUpdates() { - readyc = n.readyc - } else { - readyc = nil - } + } else if n.rn.HasReady() { + // Populate a Ready. Note that this Ready is not guaranteed to + // actually be handled. We will arm readyc, but there's no guarantee + // that we will actually send on it. It's possible that we will + // service another channel instead, loop around, and then populate + // the Ready again. We could instead force the previous Ready to be + // handled first, but it's generally good to emit larger Readys plus + // it simplifies testing (by emitting less frequently and more + // predictably). + rd = n.rn.readyWithoutAccept() + readyc = n.readyc } if lead != r.lead { @@ -314,74 +338,56 @@ func (n *node) run(r *raft) { // TODO: maybe buffer the config propose if there exists one (the way // described in raft dissertation) // Currently it is dropped in Step silently. - case m := <-propc: + case pm := <-propc: + m := pm.m m.From = r.id - r.Step(m) + err := r.Step(m) + if pm.result != nil { + pm.result <- err + close(pm.result) + } case m := <-n.recvc: // filter out response message from unknown From. - if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) { - r.Step(m) // raft never returns an error + if pr := r.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { + r.Step(m) } case cc := <-n.confc: - if cc.NodeID == None { - r.resetPendingConf() - select { - case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: - case <-n.done: + _, okBefore := r.prs.Progress[r.id] + cs := r.applyConfChange(cc) + // If the node was removed, block incoming proposals. Note that we + // only do this if the node was in the config before. Nodes may be + // a member of the group without knowing this (when they're catching + // up on the log and don't have the latest config) and we don't want + // to block the proposal channel in that case. + // + // NB: propc is reset when the leader changes, which, if we learn + // about it, sort of implies that we got readded, maybe? This isn't + // very sound and likely has bugs. + if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter { + var found bool + for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} { + for _, id := range sl { + if id == r.id { + found = true + } + } } - break - } - switch cc.Type { - case pb.ConfChangeAddNode: - r.addNode(cc.NodeID) - case pb.ConfChangeAddLearnerNode: - r.addLearner(cc.NodeID) - case pb.ConfChangeRemoveNode: - // block incoming proposal when local node is - // removed - if cc.NodeID == r.id { + if !found { propc = nil } - r.removeNode(cc.NodeID) - case pb.ConfChangeUpdateNode: - r.resetPendingConf() - default: - panic("unexpected conf type") } select { - case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: + case n.confstatec <- cs: case <-n.done: } case <-n.tickc: - r.tick() + n.rn.Tick() case readyc <- rd: - if rd.SoftState != nil { - prevSoftSt = rd.SoftState - } - if len(rd.Entries) > 0 { - prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index - prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term - havePrevLastUnstablei = true - } - if !IsEmptyHardState(rd.HardState) { - prevHardSt = rd.HardState - } - if !IsEmptySnap(rd.Snapshot) { - prevSnapi = rd.Snapshot.Metadata.Index - } - - r.msgs = nil - r.readStates = nil + n.rn.acceptReady(rd) advancec = n.advancec case <-advancec: - if prevHardSt.Commit != 0 { - r.raftLog.appliedTo(prevHardSt.Commit) - } - if havePrevLastUnstablei { - r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet) - havePrevLastUnstablei = false - } - r.raftLog.stableSnapTo(prevSnapi) + n.rn.Advance(rd) + rd = Ready{} advancec = nil case c := <-n.status: c <- getStatus(r) @@ -399,14 +405,14 @@ func (n *node) Tick() { case n.tickc <- struct{}{}: case <-n.done: default: - n.logger.Warningf("A tick missed to fire. Node blocks too long!") + n.rn.raft.logger.Warningf("%x (leader %v) A tick missed to fire. Node blocks too long!", n.rn.raft.id, n.rn.raft.id == n.rn.raft.lead) } } func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) } func (n *node) Propose(ctx context.Context, data []byte) error { - return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) + return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) } func (n *node) Step(ctx context.Context, m pb.Message) error { @@ -418,30 +424,69 @@ func (n *node) Step(ctx context.Context, m pb.Message) error { return n.step(ctx, m) } -func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error { - data, err := cc.Marshal() +func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) { + typ, data, err := pb.MarshalConfChange(c) + if err != nil { + return pb.Message{}, err + } + return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil +} + +func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error { + msg, err := confChangeToMsg(cc) if err != nil { return err } - return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}}) + return n.Step(ctx, msg) +} + +func (n *node) step(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, false) +} + +func (n *node) stepWait(ctx context.Context, m pb.Message) error { + return n.stepWithWaitOption(ctx, m, true) } // Step advances the state machine using msgs. The ctx.Err() will be returned, // if any. -func (n *node) step(ctx context.Context, m pb.Message) error { - ch := n.recvc - if m.Type == pb.MsgProp { - ch = n.propc +func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error { + if m.Type != pb.MsgProp { + select { + case n.recvc <- m: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + } + ch := n.propc + pm := msgWithResult{m: m} + if wait { + pm.result = make(chan error, 1) } - select { - case ch <- m: - return nil + case ch <- pm: + if !wait { + return nil + } + case <-ctx.Done(): + return ctx.Err() + case <-n.done: + return ErrStopped + } + select { + case err := <-pm.result: + if err != nil { + return err + } case <-ctx.Done(): return ctx.Err() case <-n.done: return ErrStopped } + return nil } func (n *node) Ready() <-chan Ready { return n.readyc } @@ -453,10 +498,10 @@ func (n *node) Advance() { } } -func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { +func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { var cs pb.ConfState select { - case n.confc <- cc: + case n.confc <- cc.AsV2(): case <-n.done: } select { @@ -523,7 +568,7 @@ func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { if len(r.readStates) != 0 { rd.ReadStates = r.readStates } - rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries)) + rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries)) return rd } diff --git a/vendor/go.etcd.io/etcd/raft/quorum/joint.go b/vendor/go.etcd.io/etcd/raft/quorum/joint.go new file mode 100644 index 000000000..e3741e0b0 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/joint.go @@ -0,0 +1,75 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +// JointConfig is a configuration of two groups of (possibly overlapping) +// majority configurations. Decisions require the support of both majorities. +type JointConfig [2]MajorityConfig + +func (c JointConfig) String() string { + if len(c[1]) > 0 { + return c[0].String() + "&&" + c[1].String() + } + return c[0].String() +} + +// IDs returns a newly initialized map representing the set of voters present +// in the joint configuration. +func (c JointConfig) IDs() map[uint64]struct{} { + m := map[uint64]struct{}{} + for _, cc := range c { + for id := range cc { + m[id] = struct{}{} + } + } + return m +} + +// Describe returns a (multi-line) representation of the commit indexes for the +// given lookuper. +func (c JointConfig) Describe(l AckedIndexer) string { + return MajorityConfig(c.IDs()).Describe(l) +} + +// CommittedIndex returns the largest committed index for the given joint +// quorum. An index is jointly committed if it is committed in both constituent +// majorities. +func (c JointConfig) CommittedIndex(l AckedIndexer) Index { + idx0 := c[0].CommittedIndex(l) + idx1 := c[1].CommittedIndex(l) + if idx0 < idx1 { + return idx0 + } + return idx1 +} + +// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns +// a result indicating whether the vote is pending, lost, or won. A joint quorum +// requires both majority quorums to vote in favor. +func (c JointConfig) VoteResult(votes map[uint64]bool) VoteResult { + r1 := c[0].VoteResult(votes) + r2 := c[1].VoteResult(votes) + + if r1 == r2 { + // If they agree, return the agreed state. + return r1 + } + if r1 == VoteLost || r2 == VoteLost { + // If either config has lost, loss is the only possible outcome. + return VoteLost + } + // One side won, the other one is pending, so the whole outcome is. + return VotePending +} diff --git a/vendor/go.etcd.io/etcd/raft/quorum/majority.go b/vendor/go.etcd.io/etcd/raft/quorum/majority.go new file mode 100644 index 000000000..8858a36b6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/majority.go @@ -0,0 +1,210 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +import ( + "fmt" + "math" + "sort" + "strings" +) + +// MajorityConfig is a set of IDs that uses majority quorums to make decisions. +type MajorityConfig map[uint64]struct{} + +func (c MajorityConfig) String() string { + sl := make([]uint64, 0, len(c)) + for id := range c { + sl = append(sl, id) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + var buf strings.Builder + buf.WriteByte('(') + for i := range sl { + if i > 0 { + buf.WriteByte(' ') + } + fmt.Fprint(&buf, sl[i]) + } + buf.WriteByte(')') + return buf.String() +} + +// Describe returns a (multi-line) representation of the commit indexes for the +// given lookuper. +func (c MajorityConfig) Describe(l AckedIndexer) string { + if len(c) == 0 { + return "" + } + type tup struct { + id uint64 + idx Index + ok bool // idx found? + bar int // length of bar displayed for this tup + } + + // Below, populate .bar so that the i-th largest commit index has bar i (we + // plot this as sort of a progress bar). The actual code is a bit more + // complicated and also makes sure that equal index => equal bar. + + n := len(c) + info := make([]tup, 0, n) + for id := range c { + idx, ok := l.AckedIndex(id) + info = append(info, tup{id: id, idx: idx, ok: ok}) + } + + // Sort by index + sort.Slice(info, func(i, j int) bool { + if info[i].idx == info[j].idx { + return info[i].id < info[j].id + } + return info[i].idx < info[j].idx + }) + + // Populate .bar. + for i := range info { + if i > 0 && info[i-1].idx < info[i].idx { + info[i].bar = i + } + } + + // Sort by ID. + sort.Slice(info, func(i, j int) bool { + return info[i].id < info[j].id + }) + + var buf strings.Builder + + // Print. + fmt.Fprint(&buf, strings.Repeat(" ", n)+" idx\n") + for i := range info { + bar := info[i].bar + if !info[i].ok { + fmt.Fprint(&buf, "?"+strings.Repeat(" ", n)) + } else { + fmt.Fprint(&buf, strings.Repeat("x", bar)+">"+strings.Repeat(" ", n-bar)) + } + fmt.Fprintf(&buf, " %5d (id=%d)\n", info[i].idx, info[i].id) + } + return buf.String() +} + +// Slice returns the MajorityConfig as a sorted slice. +func (c MajorityConfig) Slice() []uint64 { + var sl []uint64 + for id := range c { + sl = append(sl, id) + } + sort.Slice(sl, func(i, j int) bool { return sl[i] < sl[j] }) + return sl +} + +func insertionSort(sl []uint64) { + a, b := 0, len(sl) + for i := a + 1; i < b; i++ { + for j := i; j > a && sl[j] < sl[j-1]; j-- { + sl[j], sl[j-1] = sl[j-1], sl[j] + } + } +} + +// CommittedIndex computes the committed index from those supplied via the +// provided AckedIndexer (for the active config). +func (c MajorityConfig) CommittedIndex(l AckedIndexer) Index { + n := len(c) + if n == 0 { + // This plays well with joint quorums which, when one half is the zero + // MajorityConfig, should behave like the other half. + return math.MaxUint64 + } + + // Use an on-stack slice to collect the committed indexes when n <= 7 + // (otherwise we alloc). The alternative is to stash a slice on + // MajorityConfig, but this impairs usability (as is, MajorityConfig is just + // a map, and that's nice). The assumption is that running with a + // replication factor of >7 is rare, and in cases in which it happens + // performance is a lesser concern (additionally the performance + // implications of an allocation here are far from drastic). + var stk [7]uint64 + var srt []uint64 + if len(stk) >= n { + srt = stk[:n] + } else { + srt = make([]uint64, n) + } + + { + // Fill the slice with the indexes observed. Any unused slots will be + // left as zero; these correspond to voters that may report in, but + // haven't yet. We fill from the right (since the zeroes will end up on + // the left after sorting below anyway). + i := n - 1 + for id := range c { + if idx, ok := l.AckedIndex(id); ok { + srt[i] = uint64(idx) + i-- + } + } + } + + // Sort by index. Use a bespoke algorithm (copied from the stdlib's sort + // package) to keep srt on the stack. + insertionSort(srt) + + // The smallest index into the array for which the value is acked by a + // quorum. In other words, from the end of the slice, move n/2+1 to the + // left (accounting for zero-indexing). + pos := n - (n/2 + 1) + return Index(srt[pos]) +} + +// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns +// a result indicating whether the vote is pending (i.e. neither a quorum of +// yes/no has been reached), won (a quorum of yes has been reached), or lost (a +// quorum of no has been reached). +func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult { + if len(c) == 0 { + // By convention, the elections on an empty config win. This comes in + // handy with joint quorums because it'll make a half-populated joint + // quorum behave like a majority quorum. + return VoteWon + } + + ny := [2]int{} // vote counts for no and yes, respectively + + var missing int + for id := range c { + v, ok := votes[id] + if !ok { + missing++ + continue + } + if v { + ny[1]++ + } else { + ny[0]++ + } + } + + q := len(c)/2 + 1 + if ny[1] >= q { + return VoteWon + } + if ny[1]+missing >= q { + return VotePending + } + return VoteLost +} diff --git a/vendor/go.etcd.io/etcd/raft/quorum/quorum.go b/vendor/go.etcd.io/etcd/raft/quorum/quorum.go new file mode 100644 index 000000000..2899e46c9 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/quorum.go @@ -0,0 +1,58 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package quorum + +import ( + "math" + "strconv" +) + +// Index is a Raft log position. +type Index uint64 + +func (i Index) String() string { + if i == math.MaxUint64 { + return "∞" + } + return strconv.FormatUint(uint64(i), 10) +} + +// AckedIndexer allows looking up a commit index for a given ID of a voter +// from a corresponding MajorityConfig. +type AckedIndexer interface { + AckedIndex(voterID uint64) (idx Index, found bool) +} + +type mapAckIndexer map[uint64]Index + +func (m mapAckIndexer) AckedIndex(id uint64) (Index, bool) { + idx, ok := m[id] + return idx, ok +} + +// VoteResult indicates the outcome of a vote. +// +//go:generate stringer -type=VoteResult +type VoteResult uint8 + +const ( + // VotePending indicates that the decision of the vote depends on future + // votes, i.e. neither "yes" or "no" has reached quorum yet. + VotePending VoteResult = 1 + iota + // VoteLost indicates that the quorum has voted "no". + VoteLost + // VoteWon indicates that the quorum has voted "yes". + VoteWon +) diff --git a/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go b/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go new file mode 100644 index 000000000..9eca8fd0c --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/quorum/voteresult_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=VoteResult"; DO NOT EDIT. + +package quorum + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VotePending-1] + _ = x[VoteLost-2] + _ = x[VoteWon-3] +} + +const _VoteResult_name = "VotePendingVoteLostVoteWon" + +var _VoteResult_index = [...]uint8{0, 11, 19, 26} + +func (i VoteResult) String() string { + i -= 1 + if i >= VoteResult(len(_VoteResult_index)-1) { + return "VoteResult(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _VoteResult_name[_VoteResult_index[i]:_VoteResult_index[i+1]] +} diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/go.etcd.io/etcd/raft/raft.go similarity index 59% rename from vendor/github.com/coreos/etcd/raft/raft.go rename to vendor/go.etcd.io/etcd/raft/raft.go index 22ff138e9..d3c3f4257 100644 --- a/vendor/github.com/coreos/etcd/raft/raft.go +++ b/vendor/go.etcd.io/etcd/raft/raft.go @@ -25,7 +25,10 @@ import ( "sync" "time" - pb "github.com/coreos/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/confchange" + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" ) // None is a placeholder node ID used when there is no leader. @@ -67,9 +70,13 @@ const ( campaignTransfer CampaignType = "CampaignTransfer" ) +// ErrProposalDropped is returned when the proposal is ignored by some cases, +// so that the proposer can be notified and fail fast. +var ErrProposalDropped = errors.New("raft proposal dropped") + // lockedRand is a small wrapper around rand.Rand to provide -// synchronization. Only the methods needed by the code are exposed -// (e.g. Intn). +// synchronization among multiple raft groups. Only the methods needed +// by the code are exposed (e.g. Intn). type lockedRand struct { mu sync.Mutex rand *rand.Rand @@ -116,8 +123,9 @@ type Config struct { // used for testing right now. peers []uint64 - // learners contains the IDs of all leaner nodes (including self if the local node is a leaner) in the raft cluster. - // learners only receives entries from the leader node. It does not vote or promote itself. + // learners contains the IDs of all learner nodes (including self if the + // local node is a learner) in the raft cluster. learners only receives + // entries from the leader node. It does not vote or promote itself. learners []uint64 // ElectionTick is the number of Node.Tick invocations that must pass between @@ -143,12 +151,20 @@ type Config struct { // applied entries. This is a very application dependent configuration. Applied uint64 - // MaxSizePerMsg limits the max size of each append message. Smaller value - // lowers the raft recovery cost(initial probing and message lost during normal - // operation). On the other side, it might affect the throughput during normal - // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per - // message. + // MaxSizePerMsg limits the max byte size of each append message. Smaller + // value lowers the raft recovery cost(initial probing and message lost + // during normal operation). On the other side, it might affect the + // throughput during normal replication. Note: math.MaxUint64 for unlimited, + // 0 for at most one entry per message. MaxSizePerMsg uint64 + // MaxCommittedSizePerReady limits the size of the committed entries which + // can be applied. + MaxCommittedSizePerReady uint64 + // MaxUncommittedEntriesSize limits the aggregate byte size of the + // uncommitted entries that may be appended to a leader's log. Once this + // limit is exceeded, proposals will begin to return ErrProposalDropped + // errors. Note: 0 for no limit. + MaxUncommittedEntriesSize uint64 // MaxInflightMsgs limits the max number of in-flight append messages during // optimistic replication phase. The application transportation layer usually // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid @@ -187,7 +203,7 @@ type Config struct { // this feature would be in a situation where the Raft leader is used to // compute the data of a proposal, for example, adding a timestamp from a // hybrid logical clock to data in a monotonically increasing way. Forwarding - // should be disabled to prevent a follower with an innaccurate hybrid + // should be disabled to prevent a follower with an inaccurate hybrid // logical clock from assigning the timestamp and then forwarding the data // to the leader. DisableProposalForwarding bool @@ -210,6 +226,16 @@ func (c *Config) validate() error { return errors.New("storage cannot be nil") } + if c.MaxUncommittedEntriesSize == 0 { + c.MaxUncommittedEntriesSize = noLimit + } + + // default MaxCommittedSizePerReady to MaxSizePerMsg because they were + // previously the same parameter. + if c.MaxCommittedSizePerReady == 0 { + c.MaxCommittedSizePerReady = c.MaxSizePerMsg + } + if c.MaxInflightMsgs <= 0 { return errors.New("max inflight messages must be greater than 0") } @@ -236,18 +262,16 @@ type raft struct { // the log raftLog *raftLog - maxInflight int - maxMsgSize uint64 - prs map[uint64]*Progress - learnerPrs map[uint64]*Progress + maxMsgSize uint64 + maxUncommittedSize uint64 + // TODO(tbg): rename to trk. + prs tracker.ProgressTracker state StateType // isLearner is true if the local raft node is a learner. isLearner bool - votes map[uint64]bool - msgs []pb.Message // the leader id @@ -255,8 +279,17 @@ type raft struct { // leadTransferee is id of the leader transfer target when its value is not zero. // Follow the procedure defined in raft thesis 3.10. leadTransferee uint64 - // New configuration is ignored if there exists unapplied configuration. - pendingConf bool + // Only one conf change may be pending (in the log, but not yet + // applied) at a time. This is enforced via pendingConfIndex, which + // is set to a value >= the log index of the latest pending + // configuration change (if any). Config changes are only allowed to + // be proposed if the leader's applied index is greater than this + // value. + pendingConfIndex uint64 + // an estimate of the size of the uncommitted tail of the Raft log. Used to + // prevent unbounded log growth. Only maintained by the leader. Reset on + // term changes. + uncommittedSize uint64 readOnly *readOnly @@ -291,32 +324,31 @@ func newRaft(c *Config) *raft { if err := c.validate(); err != nil { panic(err.Error()) } - raftlog := newLog(c.Storage, c.Logger) + raftlog := newLogWithSize(c.Storage, c.Logger, c.MaxCommittedSizePerReady) hs, cs, err := c.Storage.InitialState() if err != nil { panic(err) // TODO(bdarnell) } - peers := c.peers - learners := c.learners - if len(cs.Nodes) > 0 || len(cs.Learners) > 0 { - if len(peers) > 0 || len(learners) > 0 { + + if len(c.peers) > 0 || len(c.learners) > 0 { + if len(cs.Voters) > 0 || len(cs.Learners) > 0 { // TODO(bdarnell): the peers argument is always nil except in // tests; the argument should be removed and these tests should be // updated to specify their nodes through a snapshot. - panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)") + panic("cannot specify both newRaft(peers, learners) and ConfState.(Voters, Learners)") } - peers = cs.Nodes - learners = cs.Learners + cs.Voters = c.peers + cs.Learners = c.learners } + r := &raft{ id: c.ID, lead: None, isLearner: false, raftLog: raftlog, maxMsgSize: c.MaxSizePerMsg, - maxInflight: c.MaxInflightMsgs, - prs: make(map[uint64]*Progress), - learnerPrs: make(map[uint64]*Progress), + maxUncommittedSize: c.MaxUncommittedEntriesSize, + prs: tracker.MakeProgressTracker(c.MaxInflightMsgs), electionTimeout: c.ElectionTick, heartbeatTimeout: c.HeartbeatTick, logger: c.Logger, @@ -325,20 +357,17 @@ func newRaft(c *Config) *raft { readOnly: newReadOnly(c.ReadOnlyOption), disableProposalForwarding: c.DisableProposalForwarding, } - for _, p := range peers { - r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)} - } - for _, p := range learners { - if _, ok := r.prs[p]; ok { - panic(fmt.Sprintf("node %x is in both learner and peer list", p)) - } - r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true} - if r.id == p { - r.isLearner = true - } + + cfg, prs, err := confchange.Restore(confchange.Changer{ + Tracker: r.prs, + LastIndex: raftlog.lastIndex(), + }, cs) + if err != nil { + panic(err) } + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) - if !isHardStateEqual(hs, emptyState) { + if !IsEmptyHardState(hs) { r.loadState(hs) } if c.Applied > 0 { @@ -347,7 +376,7 @@ func newRaft(c *Config) *raft { r.becomeFollower(r.Term, None) var nodesStrs []string - for _, n := range r.nodes() { + for _, n := range r.prs.VoterNodes() { nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) } @@ -368,20 +397,6 @@ func (r *raft) hardState() pb.HardState { } } -func (r *raft) quorum() int { return len(r.prs)/2 + 1 } - -func (r *raft) nodes() []uint64 { - nodes := make([]uint64, 0, len(r.prs)+len(r.learnerPrs)) - for id := range r.prs { - nodes = append(nodes, id) - } - for id := range r.learnerPrs { - nodes = append(nodes, id) - } - sort.Sort(uint64Slice(nodes)) - return nodes -} - // send persists state to stable storage and then sends to its mailbox. func (r *raft) send(m pb.Message) { m.From = r.id @@ -416,30 +431,35 @@ func (r *raft) send(m pb.Message) { r.msgs = append(r.msgs, m) } -func (r *raft) getProgress(id uint64) *Progress { - if pr, ok := r.prs[id]; ok { - return pr - } - - return r.learnerPrs[id] +// sendAppend sends an append RPC with new entries (if any) and the +// current commit index to the given peer. +func (r *raft) sendAppend(to uint64) { + r.maybeSendAppend(to, true) } -// sendAppend sends RPC, with entries to the given peer. -func (r *raft) sendAppend(to uint64) { - pr := r.getProgress(to) +// maybeSendAppend sends an append RPC with new entries to the given peer, +// if necessary. Returns true if a message was sent. The sendIfEmpty +// argument controls whether messages with no entries will be sent +// ("empty" messages are useful to convey updated Commit indexes, but +// are undesirable when we're sending multiple messages in a batch). +func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool { + pr := r.prs.Progress[to] if pr.IsPaused() { - return + return false } m := pb.Message{} m.To = to term, errt := r.raftLog.term(pr.Next - 1) ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) + if len(ents) == 0 && !sendIfEmpty { + return false + } if errt != nil || erre != nil { // send snapshot if we failed to get term or entries if !pr.RecentActive { r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to) - return + return false } m.Type = pb.MsgSnap @@ -447,7 +467,7 @@ func (r *raft) sendAppend(to uint64) { if err != nil { if err == ErrSnapshotTemporarilyUnavailable { r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to) - return + return false } panic(err) // TODO(bdarnell) } @@ -458,7 +478,7 @@ func (r *raft) sendAppend(to uint64) { sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) - pr.becomeSnapshot(sindex) + pr.BecomeSnapshot(sindex) r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) } else { m.Type = pb.MsgApp @@ -468,22 +488,23 @@ func (r *raft) sendAppend(to uint64) { m.Commit = r.raftLog.committed if n := len(m.Entries); n != 0 { switch pr.State { - // optimistically increase the next when in ProgressStateReplicate - case ProgressStateReplicate: + // optimistically increase the next when in StateReplicate + case tracker.StateReplicate: last := m.Entries[n-1].Index - pr.optimisticUpdate(last) - pr.ins.add(last) - case ProgressStateProbe: - pr.pause() + pr.OptimisticUpdate(last) + pr.Inflights.Add(last) + case tracker.StateProbe: + pr.ProbeSent = true default: r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) } } } r.send(m) + return true } -// sendHeartbeat sends an empty MsgApp +// sendHeartbeat sends a heartbeat RPC to the given peer. func (r *raft) sendHeartbeat(to uint64, ctx []byte) { // Attach the commit as min(to.matched, r.committed). // When the leader sends out heartbeat message, @@ -491,7 +512,7 @@ func (r *raft) sendHeartbeat(to uint64, ctx []byte) { // or it might not have all the committed entries. // The leader MUST NOT forward the follower's commit to // an unmatched index. - commit := min(r.getProgress(to).Match, r.raftLog.committed) + commit := min(r.prs.Progress[to].Match, r.raftLog.committed) m := pb.Message{ To: to, Type: pb.MsgHeartbeat, @@ -502,24 +523,13 @@ func (r *raft) sendHeartbeat(to uint64, ctx []byte) { r.send(m) } -func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) { - for id, pr := range r.prs { - f(id, pr) - } - - for id, pr := range r.learnerPrs { - f(id, pr) - } -} - // bcastAppend sends RPC, with entries to all peers that are not up-to-date // according to the progress recorded in r.prs. func (r *raft) bcastAppend() { - r.forEachProgress(func(id uint64, _ *Progress) { + r.prs.Visit(func(id uint64, _ *tracker.Progress) { if id == r.id { return } - r.sendAppend(id) }) } @@ -535,7 +545,7 @@ func (r *raft) bcastHeartbeat() { } func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { - r.forEachProgress(func(id uint64, _ *Progress) { + r.prs.Visit(func(id uint64, _ *tracker.Progress) { if id == r.id { return } @@ -543,17 +553,51 @@ func (r *raft) bcastHeartbeatWithCtx(ctx []byte) { }) } +func (r *raft) advance(rd Ready) { + // If entries were applied (or a snapshot), update our cursor for + // the next Ready. Note that if the current HardState contains a + // new Commit index, this does not mean that we're also applying + // all of the new entries due to commit pagination by size. + if index := rd.appliedCursor(); index > 0 { + r.raftLog.appliedTo(index) + if r.prs.Config.AutoLeave && index >= r.pendingConfIndex && r.state == StateLeader { + // If the current (and most recent, at least for this leader's term) + // configuration should be auto-left, initiate that now. + ccdata, err := (&pb.ConfChangeV2{}).Marshal() + if err != nil { + panic(err) + } + ent := pb.Entry{ + Type: pb.EntryConfChangeV2, + Data: ccdata, + } + if !r.appendEntry(ent) { + // If we could not append the entry, bump the pending conf index + // so that we'll try again later. + // + // TODO(tbg): test this case. + r.pendingConfIndex = r.raftLog.lastIndex() + } else { + r.logger.Infof("initiating automatic transition out of joint configuration %s", r.prs.Config) + } + } + } + r.reduceUncommittedSize(rd.CommittedEntries) + + if len(rd.Entries) > 0 { + e := rd.Entries[len(rd.Entries)-1] + r.raftLog.stableTo(e.Index, e.Term) + } + if !IsEmptySnap(rd.Snapshot) { + r.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) + } +} + // maybeCommit attempts to advance the commit index. Returns true if // the commit index changed (in which case the caller should call // r.bcastAppend). func (r *raft) maybeCommit() bool { - // TODO(bmizerany): optimize.. Currently naive - mis := make(uint64Slice, 0, len(r.prs)) - for _, p := range r.prs { - mis = append(mis, p.Match) - } - sort.Sort(sort.Reverse(mis)) - mci := mis[r.quorum()-1] + mci := r.prs.Committed() return r.raftLog.maybeCommit(mci, r.Term) } @@ -570,28 +614,45 @@ func (r *raft) reset(term uint64) { r.abortLeaderTransfer() - r.votes = make(map[uint64]bool) - r.forEachProgress(func(id uint64, pr *Progress) { - *pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner} + r.prs.ResetVotes() + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + *pr = tracker.Progress{ + Match: 0, + Next: r.raftLog.lastIndex() + 1, + Inflights: tracker.NewInflights(r.prs.MaxInflight), + IsLearner: pr.IsLearner, + } if id == r.id { pr.Match = r.raftLog.lastIndex() } }) - r.pendingConf = false + r.pendingConfIndex = 0 + r.uncommittedSize = 0 r.readOnly = newReadOnly(r.readOnly.option) } -func (r *raft) appendEntry(es ...pb.Entry) { +func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) { li := r.raftLog.lastIndex() for i := range es { es[i].Term = r.Term es[i].Index = li + 1 + uint64(i) } - r.raftLog.append(es...) - r.getProgress(r.id).maybeUpdate(r.raftLog.lastIndex()) + // Track the size of this uncommitted proposal. + if !r.increaseUncommittedSize(es) { + r.logger.Debugf( + "%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal", + r.id, + ) + // Drop the proposal. + return false + } + // use latest "last" index after truncate/append + li = r.raftLog.append(es...) + r.prs.Progress[r.id].MaybeUpdate(li) // Regardless of maybeCommit's return, our caller will call bcastAppend. r.maybeCommit() + return true } // tickElection is run by followers and candidates after r.electionTimeout. @@ -661,7 +722,7 @@ func (r *raft) becomePreCandidate() { // but doesn't change anything else. In particular it does not increase // r.Term or change r.Vote. r.step = stepCandidate - r.votes = make(map[uint64]bool) + r.prs.ResetVotes() r.tick = r.tickElection r.lead = None r.state = StatePreCandidate @@ -678,24 +739,40 @@ func (r *raft) becomeLeader() { r.tick = r.tickHeartbeat r.lead = r.id r.state = StateLeader - ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit) - if err != nil { - r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err) - } - - nconf := numOfPendingConf(ents) - if nconf > 1 { - panic("unexpected multiple uncommitted config entry") - } - if nconf == 1 { - r.pendingConf = true - } - - r.appendEntry(pb.Entry{Data: nil}) + // Followers enter replicate mode when they've been successfully probed + // (perhaps after having received a snapshot as a result). The leader is + // trivially in this state. Note that r.reset() has initialized this + // progress with the last index already. + r.prs.Progress[r.id].BecomeReplicate() + + // Conservatively set the pendingConfIndex to the last index in the + // log. There may or may not be a pending config change, but it's + // safe to delay any future proposals until we commit all our + // pending log entries, and scanning the entire tail of the log + // could be expensive. + r.pendingConfIndex = r.raftLog.lastIndex() + + emptyEnt := pb.Entry{Data: nil} + if !r.appendEntry(emptyEnt) { + // This won't happen because we just called reset() above. + r.logger.Panic("empty entry was dropped") + } + // As a special case, don't count the initial empty entry towards the + // uncommitted log quota. This is because we want to preserve the + // behavior of allowing one entry larger than quota if the current + // usage is zero. + r.reduceUncommittedSize([]pb.Entry{emptyEnt}) r.logger.Infof("%x became leader at term %d", r.id, r.Term) } +// campaign transitions the raft instance to candidate state. This must only be +// called after verifying that this is a legitimate transition. func (r *raft) campaign(t CampaignType) { + if !r.promotable() { + // This path should not be hit (callers are supposed to check), but + // better safe than sorry. + r.logger.Warningf("%x is unpromotable; campaign() should have been called", r.id) + } var term uint64 var voteMsg pb.MessageType if t == campaignPreElection { @@ -708,7 +785,7 @@ func (r *raft) campaign(t CampaignType) { voteMsg = pb.MsgVote term = r.Term } - if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) { + if _, _, res := r.poll(r.id, voteRespMsgType(voteMsg), true); res == quorum.VoteWon { // We won the election after voting for ourselves (which must mean that // this is a single-node cluster). Advance to the next state. if t == campaignPreElection { @@ -718,7 +795,16 @@ func (r *raft) campaign(t CampaignType) { } return } - for id := range r.prs { + var ids []uint64 + { + idMap := r.prs.Voters.IDs() + ids = make([]uint64, 0, len(idMap)) + for id := range idMap { + ids = append(ids, id) + } + sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) + } + for _, id := range ids { if id == r.id { continue } @@ -733,21 +819,14 @@ func (r *raft) campaign(t CampaignType) { } } -func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) { +func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int, rejected int, result quorum.VoteResult) { if v { r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term) } else { r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term) } - if _, ok := r.votes[id]; !ok { - r.votes[id] = v - } - for _, vv := range r.votes { - if vv { - granted++ - } - } - return granted + r.prs.RecordVote(id, v) + return r.prs.TallyVotes() } func (r *raft) Step(m pb.Message) error { @@ -787,7 +866,7 @@ func (r *raft) Step(m pb.Message) error { } case m.Term < r.Term: - if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { + if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) { // We have received messages from a leader at a lower term. It is possible // that these messages were simply delayed in the network, but this could // also mean that this node has advanced its term number during a network @@ -800,8 +879,23 @@ func (r *raft) Step(m pb.Message) error { // nodes that have been removed from the cluster's configuration: a // removed node will send MsgVotes (or MsgPreVotes) which will be ignored, // but it will not receive MsgApp or MsgHeartbeat, so it will not create - // disruptive term increases + // disruptive term increases, by notifying leader of this node's activeness. + // The above comments also true for Pre-Vote + // + // When follower gets isolated, it soon starts an election ending + // up with a higher term than leader, although it won't receive enough + // votes to win the election. When it regains connectivity, this response + // with "pb.MsgAppResp" of higher term would force leader to step down. + // However, this disruption is inevitable to free this stuck node with + // fresh election. This can be prevented with Pre-Vote phase. r.send(pb.Message{To: m.From, Type: pb.MsgAppResp}) + } else if m.Type == pb.MsgPreVote { + // Before Pre-Vote enable, there may have candidate with higher term, + // but less log. After update to Pre-Vote, the cluster may deadlock if + // we drop messages with a lower term. + r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", + r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) + r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true}) } else { // ignore other cases r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", @@ -813,6 +907,10 @@ func (r *raft) Step(m pb.Message) error { switch m.Type { case pb.MsgHup: if r.state != StateLeader { + if !r.promotable() { + r.logger.Warningf("%x is unpromotable and can not campaign; ignoring MsgHup", r.id) + return nil + } ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit) if err != nil { r.logger.Panicf("unexpected error getting unapplied entries (%v)", err) @@ -833,21 +931,38 @@ func (r *raft) Step(m pb.Message) error { } case pb.MsgVote, pb.MsgPreVote: - if r.isLearner { - // TODO: learner may need to vote, in case of node down when confchange. - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) - return nil - } - // The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should - // always equal r.Term. - if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) { + // We can vote if this is a repeat of a vote we've already cast... + canVote := r.Vote == m.From || + // ...we haven't voted and we don't think there's a leader yet in this term... + (r.Vote == None && r.lead == None) || + // ...or this is a PreVote for a future term... + (m.Type == pb.MsgPreVote && m.Term > r.Term) + // ...and we believe the candidate is up to date. + if canVote && r.raftLog.isUpToDate(m.Index, m.LogTerm) { + // Note: it turns out that that learners must be allowed to cast votes. + // This seems counter- intuitive but is necessary in the situation in which + // a learner has been promoted (i.e. is now a voter) but has not learned + // about this yet. + // For example, consider a group in which id=1 is a learner and id=2 and + // id=3 are voters. A configuration change promoting 1 can be committed on + // the quorum `{2,3}` without the config change being appended to the + // learner's log. If the leader (say 2) fails, there are de facto two + // voters remaining. Only 3 can win an election (due to its log containing + // all committed entries), but to do so it will need 1 to vote. But 1 + // considers itself a learner and will continue to do so until 3 has + // stepped up as leader, replicates the conf change to 1, and 1 applies it. + // Ultimately, by receiving a request to vote, the learner realizes that + // the candidate believes it to be a voter, and that it should act + // accordingly. The candidate's config may be stale, too; but in that case + // it won't win the election, at least in the absence of the bug discussed + // in: + // https://github.com/etcd-io/etcd/issues/7625#issuecomment-488798263. r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d", r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) // When responding to Msg{Pre,}Vote messages we include the term - // from the message, not the local term. To see why consider the + // from the message, not the local term. To see why, consider the // case where a single node was previously partitioned away and - // it's local term is now of date. If we include the local term + // it's local term is now out of date. If we include the local term // (recall that for pre-votes we don't update the local term), the // (pre-)campaigning node on the other end will proceed to ignore // the message (it ignores all out of date messages). @@ -866,57 +981,110 @@ func (r *raft) Step(m pb.Message) error { } default: - r.step(r, m) + err := r.step(r, m) + if err != nil { + return err + } } return nil } -type stepFunc func(r *raft, m pb.Message) +type stepFunc func(r *raft, m pb.Message) error -func stepLeader(r *raft, m pb.Message) { +func stepLeader(r *raft, m pb.Message) error { // These message types do not require any progress for m.From. switch m.Type { case pb.MsgBeat: r.bcastHeartbeat() - return + return nil case pb.MsgCheckQuorum: - if !r.checkQuorumActive() { + // The leader should always see itself as active. As a precaution, handle + // the case in which the leader isn't in the configuration any more (for + // example if it just removed itself). + // + // TODO(tbg): I added a TODO in removeNode, it doesn't seem that the + // leader steps down when removing itself. I might be missing something. + if pr := r.prs.Progress[r.id]; pr != nil { + pr.RecentActive = true + } + if !r.prs.QuorumActive() { r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) r.becomeFollower(r.Term, None) } - return + // Mark everyone (but ourselves) as inactive in preparation for the next + // CheckQuorum. + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + if id != r.id { + pr.RecentActive = false + } + }) + return nil case pb.MsgProp: if len(m.Entries) == 0 { r.logger.Panicf("%x stepped empty MsgProp", r.id) } - if _, ok := r.prs[r.id]; !ok { + if r.prs.Progress[r.id] == nil { // If we are not currently a member of the range (i.e. this node // was removed from the configuration while serving as leader), // drop any new proposals. - return + return ErrProposalDropped } if r.leadTransferee != None { r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee) - return + return ErrProposalDropped } - for i, e := range m.Entries { + for i := range m.Entries { + e := &m.Entries[i] + var cc pb.ConfChangeI if e.Type == pb.EntryConfChange { - if r.pendingConf { - r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String()) + var ccc pb.ConfChange + if err := ccc.Unmarshal(e.Data); err != nil { + panic(err) + } + cc = ccc + } else if e.Type == pb.EntryConfChangeV2 { + var ccc pb.ConfChangeV2 + if err := ccc.Unmarshal(e.Data); err != nil { + panic(err) + } + cc = ccc + } + if cc != nil { + alreadyPending := r.pendingConfIndex > r.raftLog.applied + alreadyJoint := len(r.prs.Config.Voters[1]) > 0 + wantsLeaveJoint := len(cc.AsV2().Changes) == 0 + + var refused string + if alreadyPending { + refused = fmt.Sprintf("possible unapplied conf change at index %d (applied to %d)", r.pendingConfIndex, r.raftLog.applied) + } else if alreadyJoint && !wantsLeaveJoint { + refused = "must transition out of joint config first" + } else if !alreadyJoint && wantsLeaveJoint { + refused = "not in joint state; refusing empty conf change" + } + + if refused != "" { + r.logger.Infof("%x ignoring conf change %v at config %s: %s", r.id, cc, r.prs.Config, refused) m.Entries[i] = pb.Entry{Type: pb.EntryNormal} + } else { + r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1 } - r.pendingConf = true } } - r.appendEntry(m.Entries...) + + if !r.appendEntry(m.Entries...) { + return ErrProposalDropped + } r.bcastAppend() - return + return nil case pb.MsgReadIndex: - if r.quorum() > 1 { + // If more than the local vote is needed, go through a full broadcast, + // otherwise optimize. + if !r.prs.IsSingleton() { if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term { // Reject read only request when this leader has not committed any log entry at its term. - return + return nil } // thinking: use an interally defined context instead of the user given context. @@ -925,62 +1093,85 @@ func stepLeader(r *raft, m pb.Message) { switch r.readOnly.option { case ReadOnlySafe: r.readOnly.addRequest(r.raftLog.committed, m) + // The local node automatically acks the request. + r.readOnly.recvAck(r.id, m.Entries[0].Data) r.bcastHeartbeatWithCtx(m.Entries[0].Data) case ReadOnlyLeaseBased: ri := r.raftLog.committed if m.From == None || m.From == r.id { // from local member - r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + r.readStates = append(r.readStates, ReadState{Index: ri, RequestCtx: m.Entries[0].Data}) } else { r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries}) } } - } else { - r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } else { // only one voting member (the leader) in the cluster + if m.From == None || m.From == r.id { // from leader itself + r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data}) + } else { // from learner member + r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: r.raftLog.committed, Entries: m.Entries}) + } } - return + return nil } // All other message types require a progress for m.From (pr). - pr := r.getProgress(m.From) + pr := r.prs.Progress[m.From] if pr == nil { r.logger.Debugf("%x no progress available for %x", r.id, m.From) - return + return nil } switch m.Type { case pb.MsgAppResp: pr.RecentActive = true if m.Reject { - r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d", + r.logger.Debugf("%x received MsgAppResp(MsgApp was rejected, lastindex: %d) from %x for index %d", r.id, m.RejectHint, m.From, m.Index) - if pr.maybeDecrTo(m.Index, m.RejectHint) { + if pr.MaybeDecrTo(m.Index, m.RejectHint) { r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) - if pr.State == ProgressStateReplicate { - pr.becomeProbe() + if pr.State == tracker.StateReplicate { + pr.BecomeProbe() } r.sendAppend(m.From) } } else { oldPaused := pr.IsPaused() - if pr.maybeUpdate(m.Index) { + if pr.MaybeUpdate(m.Index) { switch { - case pr.State == ProgressStateProbe: - pr.becomeReplicate() - case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort(): - r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - pr.becomeProbe() - case pr.State == ProgressStateReplicate: - pr.ins.freeTo(m.Index) + case pr.State == tracker.StateProbe: + pr.BecomeReplicate() + case pr.State == tracker.StateSnapshot && pr.Match >= pr.PendingSnapshot: + // TODO(tbg): we should also enter this branch if a snapshot is + // received that is below pr.PendingSnapshot but which makes it + // possible to use the log again. + r.logger.Debugf("%x recovered from needing snapshot, resumed sending replication messages to %x [%s]", r.id, m.From, pr) + // Transition back to replicating state via probing state + // (which takes the snapshot into account). If we didn't + // move to replicating state, that would only happen with + // the next round of appends (but there may not be a next + // round for a while, exposing an inconsistent RaftStatus). + pr.BecomeProbe() + pr.BecomeReplicate() + case pr.State == tracker.StateReplicate: + pr.Inflights.FreeLE(m.Index) } if r.maybeCommit() { r.bcastAppend() } else if oldPaused { - // update() reset the wait state on this node. If we had delayed sending - // an update before, send it now. + // If we were paused before, this node may be missing the + // latest commit index, so send it. r.sendAppend(m.From) } + // We've updated flow control information above, which may + // allow us to send multiple (size-limited) in-flight messages + // at once (such as when transitioning from probe to + // replicate, or when freeTo() covers multiple messages). If + // we have more entries to send, send as many messages as we + // can (without sending empty messages for the commit index) + for r.maybeSendAppend(m.From, false) { + } // Transfer leadership is in progress. if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() { r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From) @@ -990,23 +1181,22 @@ func stepLeader(r *raft, m pb.Message) { } case pb.MsgHeartbeatResp: pr.RecentActive = true - pr.resume() + pr.ProbeSent = false // free one slot for the full inflights window to allow progress. - if pr.State == ProgressStateReplicate && pr.ins.full() { - pr.ins.freeFirstOne() + if pr.State == tracker.StateReplicate && pr.Inflights.Full() { + pr.Inflights.FreeFirstOne() } if pr.Match < r.raftLog.lastIndex() { r.sendAppend(m.From) } if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 { - return + return nil } - ackCount := r.readOnly.recvAck(m) - if ackCount < r.quorum() { - return + if r.prs.Voters.VoteResult(r.readOnly.recvAck(m.From, m.Context)) != quorum.VoteWon { + return nil } rss := r.readOnly.advance(m) @@ -1019,32 +1209,38 @@ func stepLeader(r *raft, m pb.Message) { } } case pb.MsgSnapStatus: - if pr.State != ProgressStateSnapshot { - return + if pr.State != tracker.StateSnapshot { + return nil } + // TODO(tbg): this code is very similar to the snapshot handling in + // MsgAppResp above. In fact, the code there is more correct than the + // code here and should likely be updated to match (or even better, the + // logic pulled into a newly created Progress state machine handler). if !m.Reject { - pr.becomeProbe() + pr.BecomeProbe() r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) } else { - pr.snapshotFailure() - pr.becomeProbe() + // NB: the order here matters or we'll be probing erroneously from + // the snapshot index, but the snapshot never applied. + pr.PendingSnapshot = 0 + pr.BecomeProbe() r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) } - // If snapshot finish, wait for the msgAppResp from the remote node before sending - // out the next msgApp. + // If snapshot finish, wait for the MsgAppResp from the remote node before sending + // out the next MsgApp. // If snapshot failure, wait for a heartbeat interval before next try - pr.pause() + pr.ProbeSent = true case pb.MsgUnreachable: // During optimistic replication, if the remote becomes unreachable, // there is huge probability that a MsgApp is lost. - if pr.State == ProgressStateReplicate { - pr.becomeProbe() + if pr.State == tracker.StateReplicate { + pr.BecomeProbe() } r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr) case pb.MsgTransferLeader: if pr.IsLearner { r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id) - return + return nil } leadTransferee := m.From lastLeadTransferee := r.leadTransferee @@ -1052,14 +1248,14 @@ func stepLeader(r *raft, m pb.Message) { if lastLeadTransferee == leadTransferee { r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x", r.id, r.Term, leadTransferee, leadTransferee) - return + return nil } r.abortLeaderTransfer() r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee) } if leadTransferee == r.id { r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id) - return + return nil } // Transfer leadership to third party. r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee) @@ -1073,11 +1269,12 @@ func stepLeader(r *raft, m pb.Message) { r.sendAppend(leadTransferee) } } + return nil } // stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is // whether they respond to MsgVoteResp or MsgPreVoteResp. -func stepCandidate(r *raft, m pb.Message) { +func stepCandidate(r *raft, m pb.Message) error { // Only handle vote responses corresponding to our candidacy (while in // StateCandidate, we may get stale MsgPreVoteResp messages in this term from // our pre-candidate state). @@ -1090,44 +1287,47 @@ func stepCandidate(r *raft, m pb.Message) { switch m.Type { case pb.MsgProp: r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) - return + return ErrProposalDropped case pb.MsgApp: - r.becomeFollower(r.Term, m.From) + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term r.handleAppendEntries(m) case pb.MsgHeartbeat: - r.becomeFollower(r.Term, m.From) + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term r.handleHeartbeat(m) case pb.MsgSnap: - r.becomeFollower(m.Term, m.From) + r.becomeFollower(m.Term, m.From) // always m.Term == r.Term r.handleSnapshot(m) case myVoteRespType: - gr := r.poll(m.From, m.Type, !m.Reject) - r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr) - switch r.quorum() { - case gr: + gr, rj, res := r.poll(m.From, m.Type, !m.Reject) + r.logger.Infof("%x has received %d %s votes and %d vote rejections", r.id, gr, m.Type, rj) + switch res { + case quorum.VoteWon: if r.state == StatePreCandidate { r.campaign(campaignElection) } else { r.becomeLeader() r.bcastAppend() } - case len(r.votes) - gr: + case quorum.VoteLost: + // pb.MsgPreVoteResp contains future term of pre-candidate + // m.Term > r.Term; reuse r.Term r.becomeFollower(r.Term, None) } case pb.MsgTimeoutNow: r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From) } + return nil } -func stepFollower(r *raft, m pb.Message) { +func stepFollower(r *raft, m pb.Message) error { switch m.Type { case pb.MsgProp: if r.lead == None { r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) - return + return ErrProposalDropped } else if r.disableProposalForwarding { r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term) - return + return ErrProposalDropped } m.To = r.lead r.send(m) @@ -1146,7 +1346,7 @@ func stepFollower(r *raft, m pb.Message) { case pb.MsgTransferLeader: if r.lead == None { r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term) - return + return nil } m.To = r.lead r.send(m) @@ -1163,17 +1363,18 @@ func stepFollower(r *raft, m pb.Message) { case pb.MsgReadIndex: if r.lead == None { r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term) - return + return nil } m.To = r.lead r.send(m) case pb.MsgReadIndexResp: if len(m.Entries) != 1 { r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries)) - return + return nil } r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data}) } + return nil } func (r *raft) handleAppendEntries(m pb.Message) { @@ -1185,7 +1386,7 @@ func (r *raft) handleAppendEntries(m pb.Message) { if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) } else { - r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x", + r.logger.Debugf("%x [logterm: %d, index: %d] rejected MsgApp [logterm: %d, index: %d] from %x", r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()}) } @@ -1210,139 +1411,167 @@ func (r *raft) handleSnapshot(m pb.Message) { } // restore recovers the state machine from a snapshot. It restores the log and the -// configuration of state machine. +// configuration of state machine. If this method returns false, the snapshot was +// ignored, either because it was obsolete or because of an error. func (r *raft) restore(s pb.Snapshot) bool { if s.Metadata.Index <= r.raftLog.committed { return false } - if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { - r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) - r.raftLog.commitTo(s.Metadata.Index) + if r.state != StateFollower { + // This is defense-in-depth: if the leader somehow ended up applying a + // snapshot, it could move into a new term without moving into a + // follower state. This should never fire, but if it did, we'd have + // prevented damage by returning early, so log only a loud warning. + // + // At the time of writing, the instance is guaranteed to be in follower + // state when this method is called. + r.logger.Warningf("%x attempted to restore snapshot as leader; should never happen", r.id) + r.becomeFollower(r.Term+1, None) return false } - // The normal peer can't become learner. - if !r.isLearner { - for _, id := range s.Metadata.ConfState.Learners { + // More defense-in-depth: throw away snapshot if recipient is not in the + // config. This shouuldn't ever happen (at the time of writing) but lots of + // code here and there assumes that r.id is in the progress tracker. + found := false + cs := s.Metadata.ConfState + for _, set := range [][]uint64{ + cs.Voters, + cs.Learners, + } { + for _, id := range set { if id == r.id { - r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term) - return false + found = true + break } } } + if !found { + r.logger.Warningf( + "%x attempted to restore snapshot but it is not in the ConfState %v; should never happen", + r.id, cs, + ) + return false + } - r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + // Now go ahead and actually restore. + + if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + r.raftLog.commitTo(s.Metadata.Index) + return false + } r.raftLog.restore(s) - r.prs = make(map[uint64]*Progress) - r.learnerPrs = make(map[uint64]*Progress) - r.restoreNode(s.Metadata.ConfState.Nodes, false) - r.restoreNode(s.Metadata.ConfState.Learners, true) - return true -} -func (r *raft) restoreNode(nodes []uint64, isLearner bool) { - for _, n := range nodes { - match, next := uint64(0), r.raftLog.lastIndex()+1 - if n == r.id { - match = next - 1 - r.isLearner = isLearner - } - r.setProgress(n, match, next, isLearner) - r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n)) + // Reset the configuration and add the (potentially updated) peers in anew. + r.prs = tracker.MakeProgressTracker(r.prs.MaxInflight) + cfg, prs, err := confchange.Restore(confchange.Changer{ + Tracker: r.prs, + LastIndex: r.raftLog.lastIndex(), + }, cs) + + if err != nil { + // This should never happen. Either there's a bug in our config change + // handling or the client corrupted the conf change. + panic(fmt.Sprintf("unable to restore config %+v: %s", cs, err)) } + + assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, prs)) + + pr := r.prs.Progress[r.id] + pr.MaybeUpdate(pr.Next - 1) // TODO(tbg): this is untested and likely unneeded + + r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] restored snapshot [index: %d, term: %d]", + r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) + return true } // promotable indicates whether state machine can be promoted to leader, // which is true when its own id is in progress list. func (r *raft) promotable() bool { - _, ok := r.prs[r.id] - return ok + pr := r.prs.Progress[r.id] + return pr != nil && !pr.IsLearner } -func (r *raft) addNode(id uint64) { - r.addNodeOrLearnerNode(id, false) -} - -func (r *raft) addLearner(id uint64) { - r.addNodeOrLearnerNode(id, true) -} - -func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) { - r.pendingConf = false - pr := r.getProgress(id) - if pr == nil { - r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner) - } else { - if isLearner && !pr.IsLearner { - // can only change Learner to Voter - r.logger.Infof("%x ignored addLeaner: do not support changing %x from raft peer to learner.", r.id, id) - return +func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState { + cfg, prs, err := func() (tracker.Config, tracker.ProgressMap, error) { + changer := confchange.Changer{ + Tracker: r.prs, + LastIndex: r.raftLog.lastIndex(), } - - if isLearner == pr.IsLearner { - // Ignore any redundant addNode calls (which can happen because the - // initial bootstrapping entries are applied twice). - return + if cc.LeaveJoint() { + return changer.LeaveJoint() + } else if autoLeave, ok := cc.EnterJoint(); ok { + return changer.EnterJoint(autoLeave, cc.Changes...) } + return changer.Simple(cc.Changes...) + }() - // change Learner to Voter, use origin Learner progress - delete(r.learnerPrs, id) - pr.IsLearner = false - r.prs[id] = pr - } - - if r.id == id { - r.isLearner = isLearner + if err != nil { + // TODO(tbg): return the error to the caller. + panic(err) } - // When a node is first added, we should mark it as recently active. - // Otherwise, CheckQuorum may cause us to step down if it is invoked - // before the added node has a chance to communicate with us. - pr = r.getProgress(id) - pr.RecentActive = true + return r.switchToConfig(cfg, prs) } -func (r *raft) removeNode(id uint64) { - r.delProgress(id) - r.pendingConf = false - - // do not try to commit or abort transferring if there is no nodes in the cluster. - if len(r.prs) == 0 && len(r.learnerPrs) == 0 { - return +// switchToConfig reconfigures this node to use the provided configuration. It +// updates the in-memory state and, when necessary, carries out additional +// actions such as reacting to the removal of nodes or changed quorum +// requirements. +// +// The inputs usually result from restoring a ConfState or applying a ConfChange. +func (r *raft) switchToConfig(cfg tracker.Config, prs tracker.ProgressMap) pb.ConfState { + r.prs.Config = cfg + r.prs.Progress = prs + + r.logger.Infof("%x switched to configuration %s", r.id, r.prs.Config) + cs := r.prs.ConfState() + pr, ok := r.prs.Progress[r.id] + + // Update whether the node itself is a learner, resetting to false when the + // node is removed. + r.isLearner = ok && pr.IsLearner + + if (!ok || r.isLearner) && r.state == StateLeader { + // This node is leader and was removed or demoted. We prevent demotions + // at the time writing but hypothetically we handle them the same way as + // removing the leader: stepping down into the next Term. + // + // TODO(tbg): step down (for sanity) and ask follower with largest Match + // to TimeoutNow (to avoid interruption). This might still drop some + // proposals but it's better than nothing. + // + // TODO(tbg): test this branch. It is untested at the time of writing. + return cs + } + + // The remaining steps only make sense if this node is the leader and there + // are other nodes. + if r.state != StateLeader || len(cs.Voters) == 0 { + return cs } - // The quorum size is now smaller, so see if any pending entries can - // be committed. if r.maybeCommit() { + // If the configuration change means that more entries are committed now, + // broadcast/append to everyone in the updated config. r.bcastAppend() - } - // If the removed node is the leadTransferee, then abort the leadership transferring. - if r.state == StateLeader && r.leadTransferee == id { + } else { + // Otherwise, still probe the newly added replicas; there's no reason to + // let them wait out a heartbeat interval (or the next incoming + // proposal). + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + r.maybeSendAppend(id, false /* sendIfEmpty */) + }) + } + // If the the leadTransferee was removed, abort the leadership transfer. + if _, tOK := r.prs.Progress[r.leadTransferee]; !tOK && r.leadTransferee != 0 { r.abortLeaderTransfer() } -} - -func (r *raft) resetPendingConf() { r.pendingConf = false } - -func (r *raft) setProgress(id, match, next uint64, isLearner bool) { - if !isLearner { - delete(r.learnerPrs, id) - r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)} - return - } - - if _, ok := r.prs[id]; ok { - panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id)) - } - r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true} -} -func (r *raft) delProgress(id uint64) { - delete(r.prs, id) - delete(r.learnerPrs, id) + return cs } func (r *raft) loadState(state pb.HardState) { @@ -1365,29 +1594,6 @@ func (r *raft) resetRandomizedElectionTimeout() { r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout) } -// checkQuorumActive returns true if the quorum is active from -// the view of the local raft state machine. Otherwise, it returns -// false. -// checkQuorumActive also resets all RecentActive to false. -func (r *raft) checkQuorumActive() bool { - var act int - - r.forEachProgress(func(id uint64, pr *Progress) { - if id == r.id { // self is always active - act++ - return - } - - if pr.RecentActive && !pr.IsLearner { - act++ - } - - pr.RecentActive = false - }) - - return act >= r.quorum() -} - func (r *raft) sendTimeoutNow(to uint64) { r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow}) } @@ -1396,6 +1602,49 @@ func (r *raft) abortLeaderTransfer() { r.leadTransferee = None } +// increaseUncommittedSize computes the size of the proposed entries and +// determines whether they would push leader over its maxUncommittedSize limit. +// If the new entries would exceed the limit, the method returns false. If not, +// the increase in uncommitted entry size is recorded and the method returns +// true. +func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool { + var s uint64 + for _, e := range ents { + s += uint64(PayloadSize(e)) + } + + if r.uncommittedSize > 0 && r.uncommittedSize+s > r.maxUncommittedSize { + // If the uncommitted tail of the Raft log is empty, allow any size + // proposal. Otherwise, limit the size of the uncommitted tail of the + // log and drop any proposal that would push the size over the limit. + return false + } + r.uncommittedSize += s + return true +} + +// reduceUncommittedSize accounts for the newly committed entries by decreasing +// the uncommitted entry size limit. +func (r *raft) reduceUncommittedSize(ents []pb.Entry) { + if r.uncommittedSize == 0 { + // Fast-path for followers, who do not track or enforce the limit. + return + } + + var s uint64 + for _, e := range ents { + s += uint64(PayloadSize(e)) + } + if s > r.uncommittedSize { + // uncommittedSize may underestimate the size of the uncommitted Raft + // log tail but will never overestimate it. Saturate at 0 instead of + // allowing overflow. + r.uncommittedSize = 0 + } else { + r.uncommittedSize -= s + } +} + func numOfPendingConf(ents []pb.Entry) int { n := 0 for i := range ents { diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go b/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go new file mode 100644 index 000000000..46a7a7021 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/confchange.go @@ -0,0 +1,170 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "strconv" + "strings" + + "github.com/gogo/protobuf/proto" +) + +// ConfChangeI abstracts over ConfChangeV2 and (legacy) ConfChange to allow +// treating them in a unified manner. +type ConfChangeI interface { + AsV2() ConfChangeV2 + AsV1() (ConfChange, bool) +} + +// MarshalConfChange calls Marshal on the underlying ConfChange or ConfChangeV2 +// and returns the result along with the corresponding EntryType. +func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) { + var typ EntryType + var ccdata []byte + var err error + if ccv1, ok := c.AsV1(); ok { + typ = EntryConfChange + ccdata, err = ccv1.Marshal() + } else { + ccv2 := c.AsV2() + typ = EntryConfChangeV2 + ccdata, err = ccv2.Marshal() + } + return typ, ccdata, err +} + +// AsV2 returns a V2 configuration change carrying out the same operation. +func (c ConfChange) AsV2() ConfChangeV2 { + return ConfChangeV2{ + Changes: []ConfChangeSingle{{ + Type: c.Type, + NodeID: c.NodeID, + }}, + Context: c.Context, + } +} + +// AsV1 returns the ConfChange and true. +func (c ConfChange) AsV1() (ConfChange, bool) { + return c, true +} + +// AsV2 is the identity. +func (c ConfChangeV2) AsV2() ConfChangeV2 { return c } + +// AsV1 returns ConfChange{} and false. +func (c ConfChangeV2) AsV1() (ConfChange, bool) { return ConfChange{}, false } + +// EnterJoint returns two bools. The second bool is true if and only if this +// config change will use Joint Consensus, which is the case if it contains more +// than one change or if the use of Joint Consensus was requested explicitly. +// The first bool can only be true if second one is, and indicates whether the +// Joint State will be left automatically. +func (c *ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) { + // NB: in theory, more config changes could qualify for the "simple" + // protocol but it depends on the config on top of which the changes apply. + // For example, adding two learners is not OK if both nodes are part of the + // base config (i.e. two voters are turned into learners in the process of + // applying the conf change). In practice, these distinctions should not + // matter, so we keep it simple and use Joint Consensus liberally. + if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 { + // Use Joint Consensus. + var autoLeave bool + switch c.Transition { + case ConfChangeTransitionAuto: + autoLeave = true + case ConfChangeTransitionJointImplicit: + autoLeave = true + case ConfChangeTransitionJointExplicit: + default: + panic(fmt.Sprintf("unknown transition: %+v", c)) + } + return autoLeave, true + } + return false, false +} + +// LeaveJoint is true if the configuration change leaves a joint configuration. +// This is the case if the ConfChangeV2 is zero, with the possible exception of +// the Context field. +func (c *ConfChangeV2) LeaveJoint() bool { + cpy := *c + cpy.Context = nil + return proto.Equal(&cpy, &ConfChangeV2{}) +} + +// ConfChangesFromString parses a Space-delimited sequence of operations into a +// slice of ConfChangeSingle. The supported operations are: +// - vn: make n a voter, +// - ln: make n a learner, +// - rn: remove n, and +// - un: update n. +func ConfChangesFromString(s string) ([]ConfChangeSingle, error) { + var ccs []ConfChangeSingle + toks := strings.Split(strings.TrimSpace(s), " ") + if toks[0] == "" { + toks = nil + } + for _, tok := range toks { + if len(tok) < 2 { + return nil, fmt.Errorf("unknown token %s", tok) + } + var cc ConfChangeSingle + switch tok[0] { + case 'v': + cc.Type = ConfChangeAddNode + case 'l': + cc.Type = ConfChangeAddLearnerNode + case 'r': + cc.Type = ConfChangeRemoveNode + case 'u': + cc.Type = ConfChangeUpdateNode + default: + return nil, fmt.Errorf("unknown input: %s", tok) + } + id, err := strconv.ParseUint(tok[1:], 10, 64) + if err != nil { + return nil, err + } + cc.NodeID = id + ccs = append(ccs, cc) + } + return ccs, nil +} + +// ConfChangesToString is the inverse to ConfChangesFromString. +func ConfChangesToString(ccs []ConfChangeSingle) string { + var buf strings.Builder + for i, cc := range ccs { + if i > 0 { + buf.WriteByte(' ') + } + switch cc.Type { + case ConfChangeAddNode: + buf.WriteByte('v') + case ConfChangeAddLearnerNode: + buf.WriteByte('l') + case ConfChangeRemoveNode: + buf.WriteByte('r') + case ConfChangeUpdateNode: + buf.WriteByte('u') + default: + buf.WriteString("unknown") + } + fmt.Fprintf(&buf, "%d", cc.NodeID) + } + return buf.String() +} diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go b/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go new file mode 100644 index 000000000..4bda93214 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/confstate.go @@ -0,0 +1,45 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "reflect" + "sort" +) + +// Equivalent returns a nil error if the inputs describe the same configuration. +// On mismatch, returns a descriptive error showing the differences. +func (cs ConfState) Equivalent(cs2 ConfState) error { + cs1 := cs + orig1, orig2 := cs1, cs2 + s := func(sl *[]uint64) { + *sl = append([]uint64(nil), *sl...) + sort.Slice(*sl, func(i, j int) bool { return (*sl)[i] < (*sl)[j] }) + } + + for _, cs := range []*ConfState{&cs1, &cs2} { + s(&cs.Voters) + s(&cs.Learners) + s(&cs.VotersOutgoing) + s(&cs.LearnersNext) + cs.XXX_unrecognized = nil + } + + if !reflect.DeepEqual(cs1, cs2) { + return fmt.Errorf("ConfStates not equivalent after sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2) + } + return nil +} diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go similarity index 52% rename from vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go rename to vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go index 753bd84ac..fcf259c89 100644 --- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ b/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go @@ -1,16 +1,35 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: raft.proto +/* + Package raftpb is a generated protocol buffer package. + + It is generated from these files: + raft.proto + + It has these top-level messages: + Entry + SnapshotMetadata + Snapshot + Message + HardState + ConfState + ConfChange + ConfChangeSingle + ConfChangeV2 +*/ package raftpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -27,18 +46,20 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type EntryType int32 const ( - EntryNormal EntryType = 0 - EntryConfChange EntryType = 1 + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 + EntryConfChangeV2 EntryType = 2 ) var EntryType_name = map[int32]string{ 0: "EntryNormal", 1: "EntryConfChange", + 2: "EntryConfChangeV2", } - var EntryType_value = map[string]int32{ - "EntryNormal": 0, - "EntryConfChange": 1, + "EntryNormal": 0, + "EntryConfChange": 1, + "EntryConfChangeV2": 2, } func (x EntryType) Enum() *EntryType { @@ -46,11 +67,9 @@ func (x EntryType) Enum() *EntryType { *p = x return p } - func (x EntryType) String() string { return proto.EnumName(EntryType_name, int32(x)) } - func (x *EntryType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") if err != nil { @@ -59,10 +78,7 @@ func (x *EntryType) UnmarshalJSON(data []byte) error { *x = EntryType(value) return nil } - -func (EntryType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{0} -} +func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } type MessageType int32 @@ -109,7 +125,6 @@ var MessageType_name = map[int32]string{ 17: "MsgPreVote", 18: "MsgPreVoteResp", } - var MessageType_value = map[string]int32{ "MsgHup": 0, "MsgBeat": 1, @@ -137,11 +152,9 @@ func (x MessageType) Enum() *MessageType { *p = x return p } - func (x MessageType) String() string { return proto.EnumName(MessageType_name, int32(x)) } - func (x *MessageType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") if err != nil { @@ -150,10 +163,58 @@ func (x *MessageType) UnmarshalJSON(data []byte) error { *x = MessageType(value) return nil } +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +type ConfChangeTransition int32 + +const ( + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto ConfChangeTransition = 0 + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit ConfChangeTransition = 1 + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit ConfChangeTransition = 2 +) + +var ConfChangeTransition_name = map[int32]string{ + 0: "ConfChangeTransitionAuto", + 1: "ConfChangeTransitionJointImplicit", + 2: "ConfChangeTransitionJointExplicit", +} +var ConfChangeTransition_value = map[string]int32{ + "ConfChangeTransitionAuto": 0, + "ConfChangeTransitionJointImplicit": 1, + "ConfChangeTransitionJointExplicit": 2, +} -func (MessageType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{1} +func (x ConfChangeTransition) Enum() *ConfChangeTransition { + p := new(ConfChangeTransition) + *p = x + return p } +func (x ConfChangeTransition) String() string { + return proto.EnumName(ConfChangeTransition_name, int32(x)) +} +func (x *ConfChangeTransition) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeTransition_value, data, "ConfChangeTransition") + if err != nil { + return err + } + *x = ConfChangeTransition(value) + return nil +} +func (ConfChangeTransition) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } type ConfChangeType int32 @@ -170,7 +231,6 @@ var ConfChangeType_name = map[int32]string{ 2: "ConfChangeUpdateNode", 3: "ConfChangeAddLearnerNode", } - var ConfChangeType_value = map[string]int32{ "ConfChangeAddNode": 0, "ConfChangeRemoveNode": 1, @@ -183,11 +243,9 @@ func (x ConfChangeType) Enum() *ConfChangeType { *p = x return p } - func (x ConfChangeType) String() string { return proto.EnumName(ConfChangeType_name, int32(x)) } - func (x *ConfChangeType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") if err != nil { @@ -196,318 +254,174 @@ func (x *ConfChangeType) UnmarshalJSON(data []byte) error { *x = ConfChangeType(value) return nil } - -func (ConfChangeType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{2} -} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } type Entry struct { - Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` - Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` - Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` - Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Entry) Reset() { *m = Entry{} } -func (m *Entry) String() string { return proto.CompactTextString(m) } -func (*Entry) ProtoMessage() {} -func (*Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{0} -} -func (m *Entry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Entry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_Entry.Merge(m, src) -} -func (m *Entry) XXX_Size() int { - return m.Size() -} -func (m *Entry) XXX_DiscardUnknown() { - xxx_messageInfo_Entry.DiscardUnknown(m) + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_Entry proto.InternalMessageInfo +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } type SnapshotMetadata struct { - ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` - Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` - Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } -func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } -func (*SnapshotMetadata) ProtoMessage() {} -func (*SnapshotMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{1} -} -func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SnapshotMetadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotMetadata.Merge(m, src) -} -func (m *SnapshotMetadata) XXX_Size() int { - return m.Size() -} -func (m *SnapshotMetadata) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m) + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } type Snapshot struct { - Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{2} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(m, src) -} -func (m *Snapshot) XXX_Size() int { - return m.Size() -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_Snapshot proto.InternalMessageInfo +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } type Message struct { - Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` - To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` - From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` - Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` - LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` - Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` - Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` - Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` - Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` - Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` - RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` - Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{3} -} -func (m *Message) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(m, src) -} -func (m *Message) XXX_Size() int { - return m.Size() -} -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) -} - -var xxx_messageInfo_Message proto.InternalMessageInfo + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } type HardState struct { - Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` - Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` - Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HardState) Reset() { *m = HardState{} } -func (m *HardState) String() string { return proto.CompactTextString(m) } -func (*HardState) ProtoMessage() {} -func (*HardState) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{4} -} -func (m *HardState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HardState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HardState) XXX_Merge(src proto.Message) { - xxx_messageInfo_HardState.Merge(m, src) -} -func (m *HardState) XXX_Size() int { - return m.Size() -} -func (m *HardState) XXX_DiscardUnknown() { - xxx_messageInfo_HardState.DiscardUnknown(m) + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_HardState proto.InternalMessageInfo +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } type ConfState struct { - Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"` - Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConfState) Reset() { *m = ConfState{} } -func (m *ConfState) String() string { return proto.CompactTextString(m) } -func (*ConfState) ProtoMessage() {} -func (*ConfState) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{5} -} -func (m *ConfState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConfState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConfState) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfState.Merge(m, src) -} -func (m *ConfState) XXX_Size() int { - return m.Size() -} -func (m *ConfState) XXX_DiscardUnknown() { - xxx_messageInfo_ConfState.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfState proto.InternalMessageInfo + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + Voters []uint64 `protobuf:"varint,1,rep,name=voters" json:"voters,omitempty"` + // The learners in the incoming config. + Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` + // The voters in the outgoing config. + VotersOutgoing []uint64 `protobuf:"varint,3,rep,name=voters_outgoing,json=votersOutgoing" json:"voters_outgoing,omitempty"` + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + LearnersNext []uint64 `protobuf:"varint,4,rep,name=learners_next,json=learnersNext" json:"learners_next,omitempty"` + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + AutoLeave bool `protobuf:"varint,5,opt,name=auto_leave,json=autoLeave" json:"auto_leave"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } type ConfChange struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` - NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` - Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConfChange) Reset() { *m = ConfChange{} } -func (m *ConfChange) String() string { return proto.CompactTextString(m) } -func (*ConfChange) ProtoMessage() {} -func (*ConfChange) Descriptor() ([]byte, []int) { - return fileDescriptor_b042552c306ae59b, []int{6} -} -func (m *ConfChange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConfChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfChange.Merge(m, src) -} -func (m *ConfChange) XXX_Size() int { - return m.Size() -} -func (m *ConfChange) XXX_DiscardUnknown() { - xxx_messageInfo_ConfChange.DiscardUnknown(m) -} - -var xxx_messageInfo_ConfChange proto.InternalMessageInfo + Type ConfChangeType `protobuf:"varint,2,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId" json:"node_id"` + Context []byte `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"` + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + ID uint64 `protobuf:"varint,1,opt,name=id" json:"id"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +type ConfChangeSingle struct { + Type ConfChangeType `protobuf:"varint,1,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId" json:"node_id"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChangeSingle) Reset() { *m = ConfChangeSingle{} } +func (m *ConfChangeSingle) String() string { return proto.CompactTextString(m) } +func (*ConfChangeSingle) ProtoMessage() {} +func (*ConfChangeSingle) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} } + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +type ConfChangeV2 struct { + Transition ConfChangeTransition `protobuf:"varint,1,opt,name=transition,enum=raftpb.ConfChangeTransition" json:"transition"` + Changes []ConfChangeSingle `protobuf:"bytes,2,rep,name=changes" json:"changes"` + Context []byte `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChangeV2) Reset() { *m = ConfChangeV2{} } +func (m *ConfChangeV2) String() string { return proto.CompactTextString(m) } +func (*ConfChangeV2) ProtoMessage() {} +func (*ConfChangeV2) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} } func init() { - proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) - proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) - proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) proto.RegisterType((*Entry)(nil), "raftpb.Entry") proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") @@ -515,69 +429,17 @@ func init() { proto.RegisterType((*HardState)(nil), "raftpb.HardState") proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") + proto.RegisterType((*ConfChangeSingle)(nil), "raftpb.ConfChangeSingle") + proto.RegisterType((*ConfChangeV2)(nil), "raftpb.ConfChangeV2") + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeTransition", ConfChangeTransition_name, ConfChangeTransition_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) } - -func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) } - -var fileDescriptor_b042552c306ae59b = []byte{ - // 816 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45, - 0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38, - 0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b, - 0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20, - 0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3, - 0x63, 0xcf, 0x24, 0xb7, 0xae, 0xaf, 0x6a, 0xbe, 0xfa, 0xbe, 0xea, 0xea, 0x01, 0x50, 0x62, 0xa9, - 0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f, - 0x9b, 0x53, 0x91, 0x9d, 0xfc, 0x06, 0xad, 0x6f, 0x53, 0xad, 0xee, 0x90, 0x43, 0x70, 0x45, 0x2a, - 0xe1, 0xfe, 0xd8, 0x9b, 0x06, 0x27, 0xc1, 0xfd, 0x3f, 0x9f, 0x34, 0xe6, 0x16, 0xc1, 0x7d, 0x68, - 0x5d, 0xa4, 0x11, 0xbd, 0xe3, 0xcd, 0x4a, 0xaa, 0x80, 0xf0, 0x33, 0x08, 0xae, 0xee, 0x32, 0xe2, - 0xde, 0xd8, 0x9b, 0x0e, 0x8e, 0xf7, 0x8e, 0x8a, 0x5e, 0x47, 0x96, 0xd2, 0x24, 0x36, 0x44, 0x77, - 0x19, 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd, - 0x03, 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0, - 0x42, 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xc1, 0x1d, 0x6e, 0xb9, 0x4f, 0x65, 0xba, 0x7c, 0x6d, - 0x12, 0x8e, 0xbb, 0xb7, 0x28, 0x01, 0xa3, 0x74, 0x65, 0x95, 0x56, 0x4d, 0x14, 0x90, 0xf1, 0xa7, - 0x8d, 0xbf, 0xaa, 0x09, 0x8b, 0x4c, 0x7e, 0x80, 0x6e, 0xa9, 0xc0, 0x48, 0x34, 0x0a, 0x6c, 0xcf, - 0xfe, 0xdc, 0x9e, 0xf1, 0x6b, 0xe8, 0x26, 0x4e, 0x99, 0x25, 0x0e, 0x8f, 0x79, 0xa9, 0xe5, 0xb1, - 0x72, 0xc7, 0xbb, 0xa9, 0x9f, 0xfc, 0xd5, 0x84, 0xce, 0x8c, 0xf2, 0x5c, 0xc4, 0x84, 0xcf, 0x21, - 0xd0, 0xdb, 0x59, 0x3d, 0x2b, 0x39, 0x5c, 0xba, 0x3a, 0x2d, 0x53, 0x86, 0x43, 0xf0, 0xb5, 0xac, - 0x39, 0xf1, 0xb5, 0x34, 0x36, 0x96, 0x4a, 0x3e, 0xb2, 0x61, 0x90, 0x8d, 0xc1, 0xe0, 0xb1, 0x41, - 0x1c, 0x41, 0xe7, 0x56, 0xc6, 0xf6, 0x76, 0x5b, 0x95, 0x64, 0x09, 0x6e, 0xc7, 0xd6, 0x7e, 0x3a, - 0xb6, 0xe7, 0xd0, 0xa1, 0x54, 0xab, 0x15, 0xe5, 0xbc, 0x33, 0x6e, 0x4e, 0xc3, 0xe3, 0x9d, 0xda, - 0x1d, 0x97, 0x54, 0xae, 0x06, 0x0f, 0xa0, 0xbd, 0x90, 0x49, 0xb2, 0xd2, 0xbc, 0x5b, 0xe1, 0x72, - 0x18, 0x1e, 0x43, 0x37, 0x77, 0x13, 0xe3, 0x3d, 0x3b, 0x49, 0xf6, 0x78, 0x92, 0xe5, 0x04, 0xcb, - 0x3a, 0xc3, 0xa8, 0xe8, 0x67, 0x5a, 0x68, 0x0e, 0x63, 0x6f, 0xda, 0x2d, 0x19, 0x0b, 0x0c, 0x3f, - 0x05, 0x28, 0x4e, 0xe7, 0xab, 0x54, 0xf3, 0xb0, 0xd2, 0xb3, 0x82, 0x23, 0x87, 0xce, 0x42, 0xa6, - 0x9a, 0xde, 0x69, 0xde, 0xb7, 0x17, 0x5b, 0x86, 0x93, 0x1f, 0xa1, 0x77, 0x2e, 0x54, 0x54, 0xac, - 0x4f, 0x39, 0x41, 0xef, 0xc9, 0x04, 0x39, 0x04, 0x6f, 0xa5, 0xa6, 0xfa, 0xe3, 0x30, 0x48, 0xc5, - 0x70, 0xf3, 0xa9, 0xe1, 0xc9, 0x37, 0xd0, 0xdb, 0xac, 0x2b, 0x0e, 0xa1, 0x95, 0xca, 0x88, 0x72, - 0xee, 0x8d, 0x9b, 0xd3, 0x60, 0x5e, 0x04, 0xb8, 0x0f, 0xdd, 0x5b, 0x12, 0x2a, 0x25, 0x95, 0x73, - 0xdf, 0x26, 0x36, 0xf1, 0xe4, 0x0f, 0x0f, 0xc0, 0x7c, 0x7f, 0x7a, 0x23, 0xd2, 0xd8, 0x6e, 0xc4, - 0xc5, 0x59, 0x4d, 0x9d, 0x7f, 0x71, 0x86, 0x5f, 0xb8, 0x27, 0xe8, 0xdb, 0xb5, 0xfa, 0xb8, 0xfa, - 0x4c, 0x8a, 0xef, 0x9e, 0xbc, 0xc3, 0x03, 0x68, 0x5f, 0xca, 0x88, 0x2e, 0xce, 0xea, 0x9a, 0x0b, - 0xcc, 0x0c, 0xeb, 0xd4, 0x0d, 0xab, 0x78, 0xa8, 0x65, 0x78, 0xf8, 0x25, 0xf4, 0x36, 0x0f, 0x1b, - 0x77, 0x21, 0xb4, 0xc1, 0xa5, 0x54, 0x89, 0xb8, 0x65, 0x0d, 0x7c, 0x06, 0xbb, 0x16, 0xd8, 0x36, - 0x66, 0xde, 0xe1, 0xdf, 0x3e, 0x84, 0x95, 0x05, 0x47, 0x80, 0xf6, 0x2c, 0x8f, 0xcf, 0xd7, 0x19, - 0x6b, 0x60, 0x08, 0x9d, 0x59, 0x1e, 0x9f, 0x90, 0xd0, 0xcc, 0x73, 0xc1, 0x2b, 0x25, 0x33, 0xe6, - 0xbb, 0xaa, 0x17, 0x59, 0xc6, 0x9a, 0x38, 0x00, 0x28, 0xce, 0x73, 0xca, 0x33, 0x16, 0xb8, 0xc2, - 0xef, 0xa5, 0x26, 0xd6, 0x32, 0x22, 0x5c, 0x60, 0xb3, 0x6d, 0x97, 0x35, 0xcb, 0xc4, 0x3a, 0xc8, - 0xa0, 0x6f, 0x9a, 0x91, 0x50, 0xfa, 0xda, 0x74, 0xe9, 0xe2, 0x10, 0x58, 0x15, 0xb1, 0x1f, 0xf5, - 0x10, 0x61, 0x30, 0xcb, 0xe3, 0x37, 0xa9, 0x22, 0xb1, 0xb8, 0x11, 0xd7, 0xb7, 0xc4, 0x00, 0xf7, - 0x60, 0xc7, 0x11, 0x99, 0xcb, 0x5b, 0xe7, 0x2c, 0x74, 0x65, 0xa7, 0x37, 0xb4, 0xf8, 0xe5, 0xbb, - 0xb5, 0x54, 0xeb, 0x84, 0xf5, 0xf1, 0x23, 0xd8, 0x9b, 0xe5, 0xf1, 0x95, 0x12, 0x69, 0xbe, 0x24, - 0xf5, 0x92, 0x44, 0x44, 0x8a, 0xed, 0xb8, 0xaf, 0xaf, 0x56, 0x09, 0xc9, 0xb5, 0xbe, 0x94, 0xbf, - 0xb2, 0x81, 0x13, 0x33, 0x27, 0x11, 0xd9, 0x3f, 0x27, 0xdb, 0x75, 0x62, 0x36, 0x88, 0x15, 0xc3, - 0x9c, 0xdf, 0x57, 0x8a, 0xac, 0xc5, 0x3d, 0xd7, 0xd5, 0xc5, 0xb6, 0x06, 0x0f, 0xef, 0x60, 0x50, - 0xbf, 0x5e, 0xa3, 0x63, 0x8b, 0xbc, 0x88, 0x22, 0x73, 0x97, 0xac, 0x81, 0x1c, 0x86, 0x5b, 0x78, - 0x4e, 0x89, 0x7c, 0x4b, 0x36, 0xe3, 0xd5, 0x33, 0x6f, 0xb2, 0x48, 0xe8, 0x22, 0xe3, 0xe3, 0x01, - 0xf0, 0x1a, 0xd5, 0xcb, 0x62, 0x1b, 0x6d, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f, - 0x8c, 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf, - 0xa8, 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x30, 0xe1, 0x02, 0x69, 0x74, 0x06, 0x00, 0x00, -} - func (m *Entry) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -585,42 +447,35 @@ func (m *Entry) Marshal() (dAtA []byte, err error) { } func (m *Entry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - i = encodeVarintRaft(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x18 - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - i-- - dAtA[i] = 0x10 - i = encodeVarintRaft(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -628,42 +483,34 @@ func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { } func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SnapshotMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - i-- - dAtA[i] = 0x18 - i = encodeVarintRaft(dAtA, i, uint64(m.Index)) - i-- + i += n1 dAtA[i] = 0x10 - { - size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaft(dAtA, i, uint64(size)) + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *Snapshot) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -671,43 +518,34 @@ func (m *Snapshot) Marshal() (dAtA []byte, err error) { } func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaft(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - return len(dAtA) - i, nil + return i, nil } func (m *Message) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -715,89 +553,78 @@ func (m *Message) Marshal() (dAtA []byte, err error) { } func (m *Message) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.To)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.From)) + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x28 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) + dAtA[i] = 0x30 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - if m.Context != nil { - i -= len(m.Context) - copy(dAtA[i:], m.Context) - i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) - i-- - dAtA[i] = 0x62 + dAtA[i] = 0x40 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) - i-- - dAtA[i] = 0x58 - i-- + i += n3 + dAtA[i] = 0x50 + i++ if m.Reject { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x50 - { - size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaft(dAtA, i, uint64(size)) + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) + if m.Context != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) } - i-- - dAtA[i] = 0x4a - i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) - i-- - dAtA[i] = 0x40 - if len(m.Entries) > 0 { - for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintRaft(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) } - i = encodeVarintRaft(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x30 - i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) - i-- - dAtA[i] = 0x28 - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - i-- - dAtA[i] = 0x20 - i = encodeVarintRaft(dAtA, i, uint64(m.From)) - i-- - dAtA[i] = 0x18 - i = encodeVarintRaft(dAtA, i, uint64(m.To)) - i-- - dAtA[i] = 0x10 - i = encodeVarintRaft(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *HardState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -805,35 +632,29 @@ func (m *HardState) Marshal() (dAtA []byte, err error) { } func (m *HardState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HardState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) - i-- - dAtA[i] = 0x18 - i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) - i-- - dAtA[i] = 0x10 - i = encodeVarintRaft(dAtA, i, uint64(m.Term)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *ConfState) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -841,40 +662,56 @@ func (m *ConfState) Marshal() (dAtA []byte, err error) { } func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Voters) > 0 { + for _, num := range m.Voters { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } } if len(m.Learners) > 0 { - for iNdEx := len(m.Learners) - 1; iNdEx >= 0; iNdEx-- { - i = encodeVarintRaft(dAtA, i, uint64(m.Learners[iNdEx])) - i-- + for _, num := range m.Learners { dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) } } - if len(m.Nodes) > 0 { - for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { - i = encodeVarintRaft(dAtA, i, uint64(m.Nodes[iNdEx])) - i-- - dAtA[i] = 0x8 + if len(m.VotersOutgoing) > 0 { + for _, num := range m.VotersOutgoing { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.LearnersNext) > 0 { + for _, num := range m.LearnersNext { + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) } } - return len(dAtA) - i, nil + dAtA[i] = 0x28 + i++ + if m.AutoLeave { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *ConfChange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -882,53 +719,110 @@ func (m *ConfChange) Marshal() (dAtA []byte, err error) { } func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfChange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) if m.Context != nil { - i -= len(m.Context) - copy(dAtA[i:], m.Context) - i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) - i-- dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) } - i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) - i-- - dAtA[i] = 0x18 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChangeSingle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChangeSingle) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ i = encodeVarintRaft(dAtA, i, uint64(m.Type)) - i-- dAtA[i] = 0x10 - i = encodeVarintRaft(dAtA, i, uint64(m.ID)) - i-- + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChangeV2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChangeV2) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Transition)) + if len(m.Changes) > 0 { + for _, msg := range m.Changes { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Context != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { - offset -= sovRaft(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Entry) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRaft(uint64(m.Type)) @@ -945,9 +839,6 @@ func (m *Entry) Size() (n int) { } func (m *SnapshotMetadata) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ConfState.Size() @@ -961,9 +852,6 @@ func (m *SnapshotMetadata) Size() (n int) { } func (m *Snapshot) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Data != nil { @@ -979,9 +867,6 @@ func (m *Snapshot) Size() (n int) { } func (m *Message) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRaft(uint64(m.Type)) @@ -1012,9 +897,6 @@ func (m *Message) Size() (n int) { } func (m *HardState) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRaft(uint64(m.Term)) @@ -1027,13 +909,10 @@ func (m *HardState) Size() (n int) { } func (m *ConfState) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if len(m.Nodes) > 0 { - for _, e := range m.Nodes { + if len(m.Voters) > 0 { + for _, e := range m.Voters { n += 1 + sovRaft(uint64(e)) } } @@ -1042,6 +921,17 @@ func (m *ConfState) Size() (n int) { n += 1 + sovRaft(uint64(e)) } } + if len(m.VotersOutgoing) > 0 { + for _, e := range m.VotersOutgoing { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.LearnersNext) > 0 { + for _, e := range m.LearnersNext { + n += 1 + sovRaft(uint64(e)) + } + } + n += 2 if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1049,9 +939,6 @@ func (m *ConfState) Size() (n int) { } func (m *ConfChange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRaft(uint64(m.ID)) @@ -1067,8 +954,46 @@ func (m *ConfChange) Size() (n int) { return n } +func (m *ConfChangeSingle) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChangeV2) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Transition)) + if len(m.Changes) > 0 { + for _, e := range m.Changes { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovRaft(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozRaft(x uint64) (n int) { return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1088,7 +1013,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1116,7 +1041,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= EntryType(b&0x7F) << shift + m.Type |= (EntryType(b) & 0x7F) << shift if b < 0x80 { break } @@ -1135,7 +1060,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Term |= uint64(b&0x7F) << shift + m.Term |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1154,7 +1079,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint64(b&0x7F) << shift + m.Index |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1173,7 +1098,7 @@ func (m *Entry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1182,9 +1107,6 @@ func (m *Entry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1202,9 +1124,6 @@ func (m *Entry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1233,7 +1152,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1261,7 +1180,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1270,9 +1189,6 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1294,7 +1210,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint64(b&0x7F) << shift + m.Index |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1313,7 +1229,7 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Term |= uint64(b&0x7F) << shift + m.Term |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1327,9 +1243,6 @@ func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1358,7 +1271,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1386,7 +1299,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1395,9 +1308,6 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1420,7 +1330,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1429,9 +1339,6 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1448,9 +1355,6 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1479,7 +1383,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1507,7 +1411,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= MessageType(b&0x7F) << shift + m.Type |= (MessageType(b) & 0x7F) << shift if b < 0x80 { break } @@ -1526,7 +1430,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.To |= uint64(b&0x7F) << shift + m.To |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1545,7 +1449,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.From |= uint64(b&0x7F) << shift + m.From |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1564,7 +1468,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Term |= uint64(b&0x7F) << shift + m.Term |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1583,7 +1487,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.LogTerm |= uint64(b&0x7F) << shift + m.LogTerm |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1602,7 +1506,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint64(b&0x7F) << shift + m.Index |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1621,7 +1525,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1630,9 +1534,6 @@ func (m *Message) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1655,7 +1556,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Commit |= uint64(b&0x7F) << shift + m.Commit |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1674,7 +1575,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1683,9 +1584,6 @@ func (m *Message) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1707,7 +1605,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1727,7 +1625,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RejectHint |= uint64(b&0x7F) << shift + m.RejectHint |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1746,7 +1644,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1755,9 +1653,6 @@ func (m *Message) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1765,7 +1660,434 @@ func (m *Message) Unmarshal(dAtA []byte) error { if m.Context == nil { m.Context = []byte{} } - iNdEx = postIndex + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Voters = append(m.Voters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Voters = append(m.Voters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Voters", wireType) + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) + } + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.VotersOutgoing = append(m.VotersOutgoing, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.VotersOutgoing = append(m.VotersOutgoing, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field VotersOutgoing", wireType) + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LearnersNext = append(m.LearnersNext, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LearnersNext = append(m.LearnersNext, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LearnersNext", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoLeave", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoLeave = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) @@ -1775,9 +2097,6 @@ func (m *Message) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1791,7 +2110,7 @@ func (m *Message) Unmarshal(dAtA []byte) error { } return nil } -func (m *HardState) Unmarshal(dAtA []byte) error { +func (m *ConfChange) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1806,7 +2125,7 @@ func (m *HardState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1814,17 +2133,17 @@ func (m *HardState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HardState: wiretype end group for non-group") + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } - m.Term = 0 + m.ID = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRaft @@ -1834,16 +2153,16 @@ func (m *HardState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Term |= uint64(b&0x7F) << shift + m.ID |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Vote = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRaft @@ -1853,16 +2172,35 @@ func (m *HardState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Vote |= uint64(b&0x7F) << shift + m.Type |= (ConfChangeType(b) & 0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) } - m.Commit = 0 + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRaft @@ -1872,11 +2210,23 @@ func (m *HardState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Commit |= uint64(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) @@ -1886,9 +2236,6 @@ func (m *HardState) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1902,7 +2249,7 @@ func (m *HardState) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfState) Unmarshal(dAtA []byte) error { +func (m *ConfChangeSingle) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1917,7 +2264,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1925,163 +2272,49 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + return fmt.Errorf("proto: ConfChangeSingle: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfChangeSingle: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Nodes = append(m.Nodes, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthRaft + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Nodes) == 0 { - m.Nodes = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Nodes = append(m.Nodes, v) + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } case 2: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Learners = append(m.Learners, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthRaft + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Learners) == 0 { - m.Learners = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Learners = append(m.Learners, v) + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) } default: iNdEx = preIndex @@ -2092,9 +2325,6 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2108,7 +2338,7 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfChange) Unmarshal(dAtA []byte) error { +func (m *ConfChangeV2) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2123,7 +2353,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2131,17 +2361,17 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + return fmt.Errorf("proto: ConfChangeV2: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfChangeV2: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Transition", wireType) } - m.ID = 0 + m.Transition = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRaft @@ -2151,16 +2381,16 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= uint64(b&0x7F) << shift + m.Transition |= (ConfChangeTransition(b) & 0x7F) << shift if b < 0x80 { break } } case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) } - m.Type = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRaft @@ -2170,31 +2400,24 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= ConfChangeType(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + if msglen < 0 { + return ErrInvalidLengthRaft } - m.NodeID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NodeID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF } - case 4: + m.Changes = append(m.Changes, ConfChangeSingle{}) + if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } @@ -2208,7 +2431,7 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2217,9 +2440,6 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRaft } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRaft - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2237,9 +2457,6 @@ func (m *ConfChange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRaft } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRaft - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2307,11 +2524,8 @@ func skipRaft(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthRaft - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthRaft } return iNdEx, nil @@ -2342,9 +2556,6 @@ func skipRaft(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRaft - } } return iNdEx, nil case 4: @@ -2363,3 +2574,73 @@ var ( ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 1009 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcd, 0x6e, 0xe3, 0x36, + 0x17, 0xb5, 0x64, 0xc5, 0x3f, 0xd7, 0x8e, 0xc3, 0xdc, 0xc9, 0x37, 0x20, 0x82, 0xc0, 0xe3, 0xcf, + 0xd3, 0x62, 0x8c, 0x14, 0x93, 0x16, 0x5e, 0x14, 0x45, 0x77, 0xf9, 0x19, 0x20, 0x29, 0xe2, 0x74, + 0xea, 0x64, 0xb2, 0x28, 0x50, 0x04, 0x8c, 0x45, 0x2b, 0x6a, 0x2d, 0x51, 0xa0, 0xe8, 0x34, 0xd9, + 0x14, 0x45, 0x9f, 0xa2, 0x9b, 0xd9, 0xf6, 0x01, 0xfa, 0x14, 0x59, 0x0e, 0xd0, 0xfd, 0xa0, 0x93, + 0xbe, 0x48, 0x41, 0x8a, 0xb2, 0x65, 0x27, 0x98, 0x45, 0x77, 0xe4, 0x39, 0x87, 0xf7, 0x9e, 0x7b, + 0x79, 0x45, 0x01, 0x48, 0x36, 0x56, 0x3b, 0x89, 0x14, 0x4a, 0x60, 0x45, 0xaf, 0x93, 0xcb, 0xcd, + 0x8d, 0x40, 0x04, 0xc2, 0x40, 0x9f, 0xeb, 0x55, 0xc6, 0x76, 0x7f, 0x81, 0x95, 0x57, 0xb1, 0x92, + 0xb7, 0xf8, 0x19, 0x78, 0x67, 0xb7, 0x09, 0xa7, 0x4e, 0xc7, 0xe9, 0xb5, 0xfa, 0xeb, 0x3b, 0xd9, + 0xa9, 0x1d, 0x43, 0x6a, 0x62, 0xcf, 0xbb, 0x7b, 0xff, 0xac, 0x34, 0x34, 0x22, 0xa4, 0xe0, 0x9d, + 0x71, 0x19, 0x51, 0xb7, 0xe3, 0xf4, 0xbc, 0x19, 0xc3, 0x65, 0x84, 0x9b, 0xb0, 0x72, 0x14, 0xfb, + 0xfc, 0x86, 0x96, 0x0b, 0x54, 0x06, 0x21, 0x82, 0x77, 0xc0, 0x14, 0xa3, 0x5e, 0xc7, 0xe9, 0x35, + 0x87, 0x66, 0xdd, 0xfd, 0xd5, 0x01, 0x72, 0x1a, 0xb3, 0x24, 0xbd, 0x12, 0x6a, 0xc0, 0x15, 0xf3, + 0x99, 0x62, 0xf8, 0x25, 0xc0, 0x48, 0xc4, 0xe3, 0x8b, 0x54, 0x31, 0x95, 0x39, 0x6a, 0xcc, 0x1d, + 0xed, 0x8b, 0x78, 0x7c, 0xaa, 0x09, 0x1b, 0xbc, 0x3e, 0xca, 0x01, 0x9d, 0x3c, 0x34, 0xc9, 0x8b, + 0xbe, 0x32, 0x48, 0x5b, 0x56, 0xda, 0x72, 0xd1, 0x97, 0x41, 0xba, 0xdf, 0x43, 0x2d, 0x77, 0xa0, + 0x2d, 0x6a, 0x07, 0x26, 0x67, 0x73, 0x68, 0xd6, 0xf8, 0x35, 0xd4, 0x22, 0xeb, 0xcc, 0x04, 0x6e, + 0xf4, 0x69, 0xee, 0x65, 0xd9, 0xb9, 0x8d, 0x3b, 0xd3, 0x77, 0xdf, 0x96, 0xa1, 0x3a, 0xe0, 0x69, + 0xca, 0x02, 0x8e, 0x2f, 0xc1, 0x53, 0xf3, 0x0e, 0x3f, 0xc9, 0x63, 0x58, 0xba, 0xd8, 0x63, 0x2d, + 0xc3, 0x0d, 0x70, 0x95, 0x58, 0xa8, 0xc4, 0x55, 0x42, 0x97, 0x31, 0x96, 0x62, 0xa9, 0x0c, 0x8d, + 0xcc, 0x0a, 0xf4, 0x96, 0x0b, 0xc4, 0x36, 0x54, 0x27, 0x22, 0x30, 0x17, 0xb6, 0x52, 0x20, 0x73, + 0x70, 0xde, 0xb6, 0xca, 0xc3, 0xb6, 0xbd, 0x84, 0x2a, 0x8f, 0x95, 0x0c, 0x79, 0x4a, 0xab, 0x9d, + 0x72, 0xaf, 0xd1, 0x5f, 0x5d, 0x98, 0x8c, 0x3c, 0x94, 0xd5, 0xe0, 0x16, 0x54, 0x46, 0x22, 0x8a, + 0x42, 0x45, 0x6b, 0x85, 0x58, 0x16, 0xc3, 0x3e, 0xd4, 0x52, 0xdb, 0x31, 0x5a, 0x37, 0x9d, 0x24, + 0xcb, 0x9d, 0xcc, 0x3b, 0x98, 0xeb, 0x74, 0x44, 0xc9, 0x7f, 0xe4, 0x23, 0x45, 0xa1, 0xe3, 0xf4, + 0x6a, 0x79, 0xc4, 0x0c, 0xc3, 0x4f, 0x00, 0xb2, 0xd5, 0x61, 0x18, 0x2b, 0xda, 0x28, 0xe4, 0x2c, + 0xe0, 0x48, 0xa1, 0x3a, 0x12, 0xb1, 0xe2, 0x37, 0x8a, 0x36, 0xcd, 0xc5, 0xe6, 0xdb, 0xee, 0x0f, + 0x50, 0x3f, 0x64, 0xd2, 0xcf, 0xc6, 0x27, 0xef, 0xa0, 0xf3, 0xa0, 0x83, 0x14, 0xbc, 0x6b, 0xa1, + 0xf8, 0xe2, 0xbc, 0x6b, 0xa4, 0x50, 0x70, 0xf9, 0x61, 0xc1, 0xdd, 0x3f, 0x1d, 0xa8, 0xcf, 0xe6, + 0x15, 0x9f, 0x42, 0x45, 0x9f, 0x91, 0x29, 0x75, 0x3a, 0xe5, 0x9e, 0x37, 0xb4, 0x3b, 0xdc, 0x84, + 0xda, 0x84, 0x33, 0x19, 0x6b, 0xc6, 0x35, 0xcc, 0x6c, 0x8f, 0x2f, 0x60, 0x2d, 0x53, 0x5d, 0x88, + 0xa9, 0x0a, 0x44, 0x18, 0x07, 0xb4, 0x6c, 0x24, 0xad, 0x0c, 0xfe, 0xd6, 0xa2, 0xf8, 0x1c, 0x56, + 0xf3, 0x43, 0x17, 0xb1, 0xae, 0xd4, 0x33, 0xb2, 0x66, 0x0e, 0x9e, 0xf0, 0x1b, 0x85, 0xcf, 0x01, + 0xd8, 0x54, 0x89, 0x8b, 0x09, 0x67, 0xd7, 0xdc, 0x0c, 0x43, 0xde, 0xd0, 0xba, 0xc6, 0x8f, 0x35, + 0xdc, 0x7d, 0xeb, 0x00, 0x68, 0xd3, 0xfb, 0x57, 0x2c, 0x0e, 0xf4, 0x47, 0xe5, 0x86, 0xbe, 0xed, + 0x09, 0x68, 0xed, 0xfd, 0xfb, 0x67, 0xee, 0xd1, 0xc1, 0xd0, 0x0d, 0x7d, 0xfc, 0xc2, 0x8e, 0xb4, + 0x6b, 0x46, 0xfa, 0x69, 0xf1, 0x13, 0xcd, 0x4e, 0x3f, 0x98, 0xea, 0x17, 0x50, 0x8d, 0x85, 0xcf, + 0x2f, 0x42, 0xdf, 0x36, 0xac, 0x65, 0x43, 0x56, 0x4e, 0x84, 0xcf, 0x8f, 0x0e, 0x86, 0x15, 0x4d, + 0x1f, 0xf9, 0xc5, 0x3b, 0xf3, 0x16, 0xef, 0x2c, 0x02, 0x32, 0x4f, 0x70, 0x1a, 0xc6, 0xc1, 0x84, + 0xcf, 0x8c, 0x38, 0xff, 0xc5, 0x88, 0xfb, 0x31, 0x23, 0xdd, 0x3f, 0x1c, 0x68, 0xce, 0xe3, 0x9c, + 0xf7, 0x71, 0x0f, 0x40, 0x49, 0x16, 0xa7, 0xa1, 0x0a, 0x45, 0x6c, 0x33, 0x6e, 0x3d, 0x92, 0x71, + 0xa6, 0xc9, 0x27, 0x72, 0x7e, 0x0a, 0xbf, 0x82, 0xea, 0xc8, 0xa8, 0xb2, 0x1b, 0x2f, 0x3c, 0x29, + 0xcb, 0xa5, 0xe5, 0x5f, 0x98, 0x95, 0x17, 0xfb, 0x52, 0x5e, 0xe8, 0xcb, 0xf6, 0x21, 0xd4, 0x67, + 0xaf, 0x35, 0xae, 0x41, 0xc3, 0x6c, 0x4e, 0x84, 0x8c, 0xd8, 0x84, 0x94, 0xf0, 0x09, 0xac, 0x19, + 0x60, 0x1e, 0x9f, 0x38, 0xf8, 0x3f, 0x58, 0x5f, 0x02, 0xcf, 0xfb, 0xc4, 0xdd, 0xfe, 0xcb, 0x85, + 0x46, 0xe1, 0x59, 0x42, 0x80, 0xca, 0x20, 0x0d, 0x0e, 0xa7, 0x09, 0x29, 0x61, 0x03, 0xaa, 0x83, + 0x34, 0xd8, 0xe3, 0x4c, 0x11, 0xc7, 0x6e, 0x5e, 0x4b, 0x91, 0x10, 0xd7, 0xaa, 0x76, 0x93, 0x84, + 0x94, 0xb1, 0x05, 0x90, 0xad, 0x87, 0x3c, 0x4d, 0x88, 0x67, 0x85, 0xe7, 0x42, 0x71, 0xb2, 0xa2, + 0xbd, 0xd9, 0x8d, 0x61, 0x2b, 0x96, 0xd5, 0x4f, 0x00, 0xa9, 0x22, 0x81, 0xa6, 0x4e, 0xc6, 0x99, + 0x54, 0x97, 0x3a, 0x4b, 0x0d, 0x37, 0x80, 0x14, 0x11, 0x73, 0xa8, 0x8e, 0x08, 0xad, 0x41, 0x1a, + 0xbc, 0x89, 0x25, 0x67, 0xa3, 0x2b, 0x76, 0x39, 0xe1, 0x04, 0x70, 0x1d, 0x56, 0x6d, 0x20, 0xfd, + 0xc5, 0x4d, 0x53, 0xd2, 0xb0, 0xb2, 0xfd, 0x2b, 0x3e, 0xfa, 0xe9, 0xbb, 0xa9, 0x90, 0xd3, 0x88, + 0x34, 0x75, 0xd9, 0x83, 0x34, 0x30, 0x17, 0x34, 0xe6, 0xf2, 0x98, 0x33, 0x9f, 0x4b, 0xb2, 0x6a, + 0x4f, 0x9f, 0x85, 0x11, 0x17, 0x53, 0x75, 0x22, 0x7e, 0x26, 0x2d, 0x6b, 0x66, 0xc8, 0x99, 0x6f, + 0x7e, 0x61, 0x64, 0xcd, 0x9a, 0x99, 0x21, 0xc6, 0x0c, 0xb1, 0xf5, 0xbe, 0x96, 0xdc, 0x94, 0xb8, + 0x6e, 0xb3, 0xda, 0xbd, 0xd1, 0xe0, 0xf6, 0x6f, 0x0e, 0x6c, 0x3c, 0x36, 0x1e, 0xb8, 0x05, 0xf4, + 0x31, 0x7c, 0x77, 0xaa, 0x04, 0x29, 0xe1, 0xa7, 0xf0, 0xff, 0xc7, 0xd8, 0x6f, 0x44, 0x18, 0xab, + 0xa3, 0x28, 0x99, 0x84, 0xa3, 0x50, 0x5f, 0xc5, 0xc7, 0x64, 0xaf, 0x6e, 0xac, 0xcc, 0xdd, 0xbe, + 0x85, 0xd6, 0xe2, 0x47, 0xa1, 0x9b, 0x31, 0x47, 0x76, 0x7d, 0x5f, 0x8f, 0x3f, 0x29, 0x21, 0x2d, + 0x9a, 0x1d, 0xf2, 0x48, 0x5c, 0x73, 0xc3, 0x38, 0x8b, 0xcc, 0x9b, 0xc4, 0x67, 0x2a, 0x63, 0xdc, + 0xc5, 0x42, 0x76, 0x7d, 0xff, 0x38, 0x7b, 0x7b, 0x0c, 0x5b, 0xde, 0xa3, 0x77, 0x1f, 0xda, 0xa5, + 0x77, 0x1f, 0xda, 0xa5, 0xbb, 0xfb, 0xb6, 0xf3, 0xee, 0xbe, 0xed, 0xfc, 0x7d, 0xdf, 0x76, 0x7e, + 0xff, 0xa7, 0x5d, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, 0x87, 0x11, 0x6d, 0xd6, 0xaf, 0x08, 0x00, + 0x00, +} diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto b/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto new file mode 100644 index 000000000..23d62ec2f --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto @@ -0,0 +1,177 @@ +syntax = "proto2"; +package raftpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; // corresponds to pb.ConfChange + EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2 +} + +message Entry { + optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional EntryType Type = 1 [(gogoproto.nullable) = false]; + optional bytes Data = 4; +} + +message SnapshotMetadata { + optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; + optional uint64 index = 2 [(gogoproto.nullable) = false]; + optional uint64 term = 3 [(gogoproto.nullable) = false]; +} + +message Snapshot { + optional bytes data = 1; + optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; +} + +enum MessageType { + MsgHup = 0; + MsgBeat = 1; + MsgProp = 2; + MsgApp = 3; + MsgAppResp = 4; + MsgVote = 5; + MsgVoteResp = 6; + MsgSnap = 7; + MsgHeartbeat = 8; + MsgHeartbeatResp = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgPreVote = 17; + MsgPreVoteResp = 18; +} + +message Message { + optional MessageType type = 1 [(gogoproto.nullable) = false]; + optional uint64 to = 2 [(gogoproto.nullable) = false]; + optional uint64 from = 3 [(gogoproto.nullable) = false]; + optional uint64 term = 4 [(gogoproto.nullable) = false]; + optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; + optional uint64 index = 6 [(gogoproto.nullable) = false]; + repeated Entry entries = 7 [(gogoproto.nullable) = false]; + optional uint64 commit = 8 [(gogoproto.nullable) = false]; + optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; + optional bool reject = 10 [(gogoproto.nullable) = false]; + optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; + optional bytes context = 12; +} + +message HardState { + optional uint64 term = 1 [(gogoproto.nullable) = false]; + optional uint64 vote = 2 [(gogoproto.nullable) = false]; + optional uint64 commit = 3 [(gogoproto.nullable) = false]; +} + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +enum ConfChangeTransition { + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto = 0; + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit = 1; + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit = 2; +} + +message ConfState { + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + repeated uint64 voters = 1; + // The learners in the incoming config. + repeated uint64 learners = 2; + // The voters in the outgoing config. + repeated uint64 voters_outgoing = 3; + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + repeated uint64 learners_next = 4; + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + optional bool auto_leave = 5 [(gogoproto.nullable) = false]; +} + +enum ConfChangeType { + ConfChangeAddNode = 0; + ConfChangeRemoveNode = 1; + ConfChangeUpdateNode = 2; + ConfChangeAddLearnerNode = 3; +} + +message ConfChange { + optional ConfChangeType type = 2 [(gogoproto.nullable) = false]; + optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID" ]; + optional bytes context = 4; + + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID" ]; +} + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +message ConfChangeSingle { + optional ConfChangeType type = 1 [(gogoproto.nullable) = false]; + optional uint64 node_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"]; +} + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +message ConfChangeV2 { + optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false]; + repeated ConfChangeSingle changes = 2 [(gogoproto.nullable) = false]; + optional bytes context = 3; +} diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/go.etcd.io/etcd/raft/rawnode.go similarity index 60% rename from vendor/github.com/coreos/etcd/raft/rawnode.go rename to vendor/go.etcd.io/etcd/raft/rawnode.go index 925cb851c..90eb69493 100644 --- a/vendor/github.com/coreos/etcd/raft/rawnode.go +++ b/vendor/go.etcd.io/etcd/raft/rawnode.go @@ -17,7 +17,8 @@ package raft import ( "errors" - pb "github.com/coreos/etcd/raft/raftpb" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" ) // ErrStepLocalMsg is returned when try to step a local raft message @@ -36,85 +37,20 @@ type RawNode struct { prevHardSt pb.HardState } -func (rn *RawNode) newReady() Ready { - return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) -} - -func (rn *RawNode) commitReady(rd Ready) { - if rd.SoftState != nil { - rn.prevSoftSt = rd.SoftState - } - if !IsEmptyHardState(rd.HardState) { - rn.prevHardSt = rd.HardState - } - if rn.prevHardSt.Commit != 0 { - // In most cases, prevHardSt and rd.HardState will be the same - // because when there are new entries to apply we just sent a - // HardState with an updated Commit value. However, on initial - // startup the two are different because we don't send a HardState - // until something changes, but we do send any un-applied but - // committed entries (and previously-committed entries may be - // incorporated into the snapshot, even if rd.CommittedEntries is - // empty). Therefore we mark all committed entries as applied - // whether they were included in rd.HardState or not. - rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit) - } - if len(rd.Entries) > 0 { - e := rd.Entries[len(rd.Entries)-1] - rn.raft.raftLog.stableTo(e.Index, e.Term) - } - if !IsEmptySnap(rd.Snapshot) { - rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) - } - if len(rd.ReadStates) != 0 { - rn.raft.readStates = nil - } -} - -// NewRawNode returns a new RawNode given configuration and a list of raft peers. -func NewRawNode(config *Config, peers []Peer) (*RawNode, error) { - if config.ID == 0 { - panic("config.ID must not be zero") - } +// NewRawNode instantiates a RawNode from the given configuration. +// +// See Bootstrap() for bootstrapping an initial state; this replaces the former +// 'peers' argument to this method (with identical behavior). However, It is +// recommended that instead of calling Bootstrap, applications bootstrap their +// state manually by setting up a Storage that has a first index > 1 and which +// stores the desired ConfState as its InitialState. +func NewRawNode(config *Config) (*RawNode, error) { r := newRaft(config) rn := &RawNode{ raft: r, } - lastIndex, err := config.Storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - // If the log is empty, this is a new RawNode (like StartNode); otherwise it's - // restoring an existing RawNode (like RestartNode). - // TODO(bdarnell): rethink RawNode initialization and whether the application needs - // to be able to tell us when it expects the RawNode to exist. - if lastIndex == 0 { - r.becomeFollower(1, None) - ents := make([]pb.Entry, len(peers)) - for i, peer := range peers { - cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} - data, err := cc.Marshal() - if err != nil { - panic("unexpected marshal error") - } - - ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} - } - r.raftLog.append(ents...) - r.raftLog.committed = uint64(len(ents)) - for _, peer := range peers { - r.addNode(peer.ID) - } - } - - // Set the initial hard and soft states after performing all initialization. rn.prevSoftSt = r.softState() - if lastIndex == 0 { - rn.prevHardSt = emptyState - } else { - rn.prevHardSt = r.hardState() - } - + rn.prevHardSt = r.hardState() return rn, nil } @@ -152,39 +88,20 @@ func (rn *RawNode) Propose(data []byte) error { }}) } -// ProposeConfChange proposes a config change. -func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error { - data, err := cc.Marshal() +// ProposeConfChange proposes a config change. See (Node).ProposeConfChange for +// details. +func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error { + m, err := confChangeToMsg(cc) if err != nil { return err } - return rn.raft.Step(pb.Message{ - Type: pb.MsgProp, - Entries: []pb.Entry{ - {Type: pb.EntryConfChange, Data: data}, - }, - }) + return rn.raft.Step(m) } // ApplyConfChange applies a config change to the local node. -func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { - if cc.NodeID == None { - rn.raft.resetPendingConf() - return &pb.ConfState{Nodes: rn.raft.nodes()} - } - switch cc.Type { - case pb.ConfChangeAddNode: - rn.raft.addNode(cc.NodeID) - case pb.ConfChangeAddLearnerNode: - rn.raft.addLearner(cc.NodeID) - case pb.ConfChangeRemoveNode: - rn.raft.removeNode(cc.NodeID) - case pb.ConfChangeUpdateNode: - rn.raft.resetPendingConf() - default: - panic("unexpected conf type") - } - return &pb.ConfState{Nodes: rn.raft.nodes()} +func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState { + cs := rn.raft.applyConfChange(cc.AsV2()) + return &cs } // Step advances the state machine using the given message. @@ -193,19 +110,41 @@ func (rn *RawNode) Step(m pb.Message) error { if IsLocalMsg(m.Type) { return ErrStepLocalMsg } - if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) { + if pr := rn.raft.prs.Progress[m.From]; pr != nil || !IsResponseMsg(m.Type) { return rn.raft.Step(m) } return ErrStepPeerNotFound } -// Ready returns the current point-in-time state of this RawNode. +// Ready returns the outstanding work that the application needs to handle. This +// includes appending and applying entries or a snapshot, updating the HardState, +// and sending messages. The returned Ready() *must* be handled and subsequently +// passed back via Advance(). func (rn *RawNode) Ready() Ready { - rd := rn.newReady() - rn.raft.msgs = nil + rd := rn.readyWithoutAccept() + rn.acceptReady(rd) return rd } +// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there +// is no obligation that the Ready must be handled. +func (rn *RawNode) readyWithoutAccept() Ready { + return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) +} + +// acceptReady is called when the consumer of the RawNode has decided to go +// ahead and handle a Ready. Nothing must alter the state of the RawNode between +// this call and the prior call to Ready(). +func (rn *RawNode) acceptReady(rd Ready) { + if rd.SoftState != nil { + rn.prevSoftSt = rd.SoftState + } + if len(rd.ReadStates) != 0 { + rn.raft.readStates = nil + } + rn.raft.msgs = nil +} + // HasReady called when RawNode user need to check if any Ready pending. // Checking logic in this method should be consistent with Ready.containsUpdates(). func (rn *RawNode) HasReady() bool { @@ -231,13 +170,47 @@ func (rn *RawNode) HasReady() bool { // Advance notifies the RawNode that the application has applied and saved progress in the // last Ready results. func (rn *RawNode) Advance(rd Ready) { - rn.commitReady(rd) + if !IsEmptyHardState(rd.HardState) { + rn.prevHardSt = rd.HardState + } + rn.raft.advance(rd) } -// Status returns the current status of the given group. -func (rn *RawNode) Status() *Status { +// Status returns the current status of the given group. This allocates, see +// BasicStatus and WithProgress for allocation-friendlier choices. +func (rn *RawNode) Status() Status { status := getStatus(rn.raft) - return &status + return status +} + +// BasicStatus returns a BasicStatus. Notably this does not contain the +// Progress map; see WithProgress for an allocation-free way to inspect it. +func (rn *RawNode) BasicStatus() BasicStatus { + return getBasicStatus(rn.raft) +} + +// ProgressType indicates the type of replica a Progress corresponds to. +type ProgressType byte + +const ( + // ProgressTypePeer accompanies a Progress for a regular peer replica. + ProgressTypePeer ProgressType = iota + // ProgressTypeLearner accompanies a Progress for a learner replica. + ProgressTypeLearner +) + +// WithProgress is a helper to introspect the Progress for this node and its +// peers. +func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) { + rn.raft.prs.Visit(func(id uint64, pr *tracker.Progress) { + typ := ProgressTypePeer + if pr.IsLearner { + typ = ProgressTypeLearner + } + p := *pr + p.Inflights = nil + visitor(id, typ, p) + }) } // ReportUnreachable reports the given node is not reachable for the last send. diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/go.etcd.io/etcd/raft/read_only.go similarity index 79% rename from vendor/github.com/coreos/etcd/raft/read_only.go rename to vendor/go.etcd.io/etcd/raft/read_only.go index ae746fa73..6987f1bd7 100644 --- a/vendor/github.com/coreos/etcd/raft/read_only.go +++ b/vendor/go.etcd.io/etcd/raft/read_only.go @@ -14,7 +14,7 @@ package raft -import pb "github.com/coreos/etcd/raft/raftpb" +import pb "go.etcd.io/etcd/raft/raftpb" // ReadState provides state for read only query. // It's caller's responsibility to call ReadIndex first before getting @@ -29,7 +29,11 @@ type ReadState struct { type readIndexStatus struct { req pb.Message index uint64 - acks map[uint64]struct{} + // NB: this never records 'false', but it's more convenient to use this + // instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If + // this becomes performance sensitive enough (doubtful), quorum.VoteResult + // can change to an API that is closer to that of CommittedIndex. + acks map[uint64]bool } type readOnly struct { @@ -50,26 +54,25 @@ func newReadOnly(option ReadOnlyOption) *readOnly { // the read only request. // `m` is the original read only request message from the local or remote node. func (ro *readOnly) addRequest(index uint64, m pb.Message) { - ctx := string(m.Entries[0].Data) - if _, ok := ro.pendingReadIndex[ctx]; ok { + s := string(m.Entries[0].Data) + if _, ok := ro.pendingReadIndex[s]; ok { return } - ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})} - ro.readIndexQueue = append(ro.readIndexQueue, ctx) + ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)} + ro.readIndexQueue = append(ro.readIndexQueue, s) } // recvAck notifies the readonly struct that the raft state machine received // an acknowledgment of the heartbeat that attached with the read only request // context. -func (ro *readOnly) recvAck(m pb.Message) int { - rs, ok := ro.pendingReadIndex[string(m.Context)] +func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool { + rs, ok := ro.pendingReadIndex[string(context)] if !ok { - return 0 + return nil } - rs.acks[m.From] = struct{}{} - // add one to include an ack from local node - return len(rs.acks) + 1 + rs.acks[id] = true + return rs.acks } // advance advances the read only request queue kept by the readonly struct. diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/go.etcd.io/etcd/raft/status.go similarity index 68% rename from vendor/github.com/coreos/etcd/raft/status.go rename to vendor/go.etcd.io/etcd/raft/status.go index f4d3d86a4..adc60486d 100644 --- a/vendor/github.com/coreos/etcd/raft/status.go +++ b/vendor/go.etcd.io/etcd/raft/status.go @@ -17,44 +17,62 @@ package raft import ( "fmt" - pb "github.com/coreos/etcd/raft/raftpb" + pb "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/raft/tracker" ) +// Status contains information about this Raft peer and its view of the system. +// The Progress is only populated on the leader. type Status struct { + BasicStatus + Config tracker.Config + Progress map[uint64]tracker.Progress +} + +// BasicStatus contains basic information about the Raft peer. It does not allocate. +type BasicStatus struct { ID uint64 pb.HardState SoftState - Applied uint64 - Progress map[uint64]Progress + Applied uint64 LeadTransferee uint64 } -// getStatus gets a copy of the current raft status. -func getStatus(r *raft) Status { - s := Status{ +func getProgressCopy(r *raft) map[uint64]tracker.Progress { + m := make(map[uint64]tracker.Progress) + r.prs.Visit(func(id uint64, pr *tracker.Progress) { + var p tracker.Progress + p = *pr + p.Inflights = pr.Inflights.Clone() + pr = nil + + m[id] = p + }) + return m +} + +func getBasicStatus(r *raft) BasicStatus { + s := BasicStatus{ ID: r.id, LeadTransferee: r.leadTransferee, } - s.HardState = r.hardState() s.SoftState = *r.softState() - s.Applied = r.raftLog.applied + return s +} +// getStatus gets a copy of the current raft status. +func getStatus(r *raft) Status { + var s Status + s.BasicStatus = getBasicStatus(r) if s.RaftState == StateLeader { - s.Progress = make(map[uint64]Progress) - for id, p := range r.prs { - s.Progress[id] = *p - } - - for id, p := range r.learnerPrs { - s.Progress[id] = *p - } + s.Progress = getProgressCopy(r) } - + s.Config = r.prs.Config.Clone() return s } diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/go.etcd.io/etcd/raft/storage.go similarity index 98% rename from vendor/github.com/coreos/etcd/raft/storage.go rename to vendor/go.etcd.io/etcd/raft/storage.go index 69c3a7d90..6be574590 100644 --- a/vendor/github.com/coreos/etcd/raft/storage.go +++ b/vendor/go.etcd.io/etcd/raft/storage.go @@ -18,7 +18,7 @@ import ( "errors" "sync" - pb "github.com/coreos/etcd/raft/raftpb" + pb "go.etcd.io/etcd/raft/raftpb" ) // ErrCompacted is returned by Storage.Entries/Compact when a requested @@ -44,6 +44,8 @@ var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unav // become inoperable and refuse to participate in elections; the // application is responsible for cleanup and recovery in this case. type Storage interface { + // TODO(tbg): split this into two interfaces, LogStorage and StateStorage. + // InitialState returns the saved HardState and ConfState information. InitialState() (pb.HardState, pb.ConfState, error) // Entries returns a slice of log entries in the range [lo,hi). diff --git a/vendor/go.etcd.io/etcd/raft/tracker/inflights.go b/vendor/go.etcd.io/etcd/raft/tracker/inflights.go new file mode 100644 index 000000000..1a056341a --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/inflights.go @@ -0,0 +1,132 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +// Inflights limits the number of MsgApp (represented by the largest index +// contained within) sent to followers but not yet acknowledged by them. Callers +// use Full() to check whether more messages can be sent, call Add() whenever +// they are sending a new append, and release "quota" via FreeLE() whenever an +// ack is received. +type Inflights struct { + // the starting index in the buffer + start int + // number of inflights in the buffer + count int + + // the size of the buffer + size int + + // buffer contains the index of the last entry + // inside one message. + buffer []uint64 +} + +// NewInflights sets up an Inflights that allows up to 'size' inflight messages. +func NewInflights(size int) *Inflights { + return &Inflights{ + size: size, + } +} + +// Clone returns an *Inflights that is identical to but shares no memory with +// the receiver. +func (in *Inflights) Clone() *Inflights { + ins := *in + ins.buffer = append([]uint64(nil), in.buffer...) + return &ins +} + +// Add notifies the Inflights that a new message with the given index is being +// dispatched. Full() must be called prior to Add() to verify that there is room +// for one more message, and consecutive calls to add Add() must provide a +// monotonic sequence of indexes. +func (in *Inflights) Add(inflight uint64) { + if in.Full() { + panic("cannot add into a Full inflights") + } + next := in.start + in.count + size := in.size + if next >= size { + next -= size + } + if next >= len(in.buffer) { + in.grow() + } + in.buffer[next] = inflight + in.count++ +} + +// grow the inflight buffer by doubling up to inflights.size. We grow on demand +// instead of preallocating to inflights.size to handle systems which have +// thousands of Raft groups per process. +func (in *Inflights) grow() { + newSize := len(in.buffer) * 2 + if newSize == 0 { + newSize = 1 + } else if newSize > in.size { + newSize = in.size + } + newBuffer := make([]uint64, newSize) + copy(newBuffer, in.buffer) + in.buffer = newBuffer +} + +// FreeLE frees the inflights smaller or equal to the given `to` flight. +func (in *Inflights) FreeLE(to uint64) { + if in.count == 0 || to < in.buffer[in.start] { + // out of the left side of the window + return + } + + idx := in.start + var i int + for i = 0; i < in.count; i++ { + if to < in.buffer[idx] { // found the first large inflight + break + } + + // increase index and maybe rotate + size := in.size + if idx++; idx >= size { + idx -= size + } + } + // free i inflights and set new start index + in.count -= i + in.start = idx + if in.count == 0 { + // inflights is empty, reset the start index so that we don't grow the + // buffer unnecessarily. + in.start = 0 + } +} + +// FreeFirstOne releases the first inflight. This is a no-op if nothing is +// inflight. +func (in *Inflights) FreeFirstOne() { in.FreeLE(in.buffer[in.start]) } + +// Full returns true if no more messages can be sent at the moment. +func (in *Inflights) Full() bool { + return in.count == in.size +} + +// Count returns the number of inflight messages. +func (in *Inflights) Count() int { return in.count } + +// reset frees all inflights. +func (in *Inflights) reset() { + in.count = 0 + in.start = 0 +} diff --git a/vendor/go.etcd.io/etcd/raft/tracker/progress.go b/vendor/go.etcd.io/etcd/raft/tracker/progress.go new file mode 100644 index 000000000..62c81f45a --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/progress.go @@ -0,0 +1,259 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +import ( + "fmt" + "sort" + "strings" +) + +// Progress represents a follower’s progress in the view of the leader. Leader +// maintains progresses of all followers, and sends entries to the follower +// based on its progress. +// +// NB(tbg): Progress is basically a state machine whose transitions are mostly +// strewn around `*raft.raft`. Additionally, some fields are only used when in a +// certain State. All of this isn't ideal. +type Progress struct { + Match, Next uint64 + // State defines how the leader should interact with the follower. + // + // When in StateProbe, leader sends at most one replication message + // per heartbeat interval. It also probes actual progress of the follower. + // + // When in StateReplicate, leader optimistically increases next + // to the latest entry sent after sending replication message. This is + // an optimized state for fast replicating log entries to the follower. + // + // When in StateSnapshot, leader should have sent out snapshot + // before and stops sending any replication message. + State StateType + + // PendingSnapshot is used in StateSnapshot. + // If there is a pending snapshot, the pendingSnapshot will be set to the + // index of the snapshot. If pendingSnapshot is set, the replication process of + // this Progress will be paused. raft will not resend snapshot until the pending one + // is reported to be failed. + PendingSnapshot uint64 + + // RecentActive is true if the progress is recently active. Receiving any messages + // from the corresponding follower indicates the progress is active. + // RecentActive can be reset to false after an election timeout. + // + // TODO(tbg): the leader should always have this set to true. + RecentActive bool + + // ProbeSent is used while this follower is in StateProbe. When ProbeSent is + // true, raft should pause sending replication message to this peer until + // ProbeSent is reset. See ProbeAcked() and IsPaused(). + ProbeSent bool + + // Inflights is a sliding window for the inflight messages. + // Each inflight message contains one or more log entries. + // The max number of entries per message is defined in raft config as MaxSizePerMsg. + // Thus inflight effectively limits both the number of inflight messages + // and the bandwidth each Progress can use. + // When inflights is Full, no more message should be sent. + // When a leader sends out a message, the index of the last + // entry should be added to inflights. The index MUST be added + // into inflights in order. + // When a leader receives a reply, the previous inflights should + // be freed by calling inflights.FreeLE with the index of the last + // received entry. + Inflights *Inflights + + // IsLearner is true if this progress is tracked for a learner. + IsLearner bool +} + +// ResetState moves the Progress into the specified State, resetting ProbeSent, +// PendingSnapshot, and Inflights. +func (pr *Progress) ResetState(state StateType) { + pr.ProbeSent = false + pr.PendingSnapshot = 0 + pr.State = state + pr.Inflights.reset() +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +// ProbeAcked is called when this peer has accepted an append. It resets +// ProbeSent to signal that additional append messages should be sent without +// further delay. +func (pr *Progress) ProbeAcked() { + pr.ProbeSent = false +} + +// BecomeProbe transitions into StateProbe. Next is reset to Match+1 or, +// optionally and if larger, the index of the pending snapshot. +func (pr *Progress) BecomeProbe() { + // If the original state is StateSnapshot, progress knows that + // the pending snapshot has been sent to this peer successfully, then + // probes from pendingSnapshot + 1. + if pr.State == StateSnapshot { + pendingSnapshot := pr.PendingSnapshot + pr.ResetState(StateProbe) + pr.Next = max(pr.Match+1, pendingSnapshot+1) + } else { + pr.ResetState(StateProbe) + pr.Next = pr.Match + 1 + } +} + +// BecomeReplicate transitions into StateReplicate, resetting Next to Match+1. +func (pr *Progress) BecomeReplicate() { + pr.ResetState(StateReplicate) + pr.Next = pr.Match + 1 +} + +// BecomeSnapshot moves the Progress to StateSnapshot with the specified pending +// snapshot index. +func (pr *Progress) BecomeSnapshot(snapshoti uint64) { + pr.ResetState(StateSnapshot) + pr.PendingSnapshot = snapshoti +} + +// MaybeUpdate is called when an MsgAppResp arrives from the follower, with the +// index acked by it. The method returns false if the given n index comes from +// an outdated message. Otherwise it updates the progress and returns true. +func (pr *Progress) MaybeUpdate(n uint64) bool { + var updated bool + if pr.Match < n { + pr.Match = n + updated = true + pr.ProbeAcked() + } + if pr.Next < n+1 { + pr.Next = n + 1 + } + return updated +} + +// OptimisticUpdate signals that appends all the way up to and including index n +// are in-flight. As a result, Next is increased to n+1. +func (pr *Progress) OptimisticUpdate(n uint64) { pr.Next = n + 1 } + +// MaybeDecrTo adjusts the Progress to the receipt of a MsgApp rejection. The +// arguments are the index the follower rejected to append to its log, and its +// last index. +// +// Rejections can happen spuriously as messages are sent out of order or +// duplicated. In such cases, the rejection pertains to an index that the +// Progress already knows were previously acknowledged, and false is returned +// without changing the Progress. +// +// If the rejection is genuine, Next is lowered sensibly, and the Progress is +// cleared for sending log entries. +func (pr *Progress) MaybeDecrTo(rejected, last uint64) bool { + if pr.State == StateReplicate { + // The rejection must be stale if the progress has matched and "rejected" + // is smaller than "match". + if rejected <= pr.Match { + return false + } + // Directly decrease next to match + 1. + // + // TODO(tbg): why not use last if it's larger? + pr.Next = pr.Match + 1 + return true + } + + // The rejection must be stale if "rejected" does not match next - 1. This + // is because non-replicating followers are probed one entry at a time. + if pr.Next-1 != rejected { + return false + } + + if pr.Next = min(rejected, last+1); pr.Next < 1 { + pr.Next = 1 + } + pr.ProbeSent = false + return true +} + +// IsPaused returns whether sending log entries to this node has been throttled. +// This is done when a node has rejected recent MsgApps, is currently waiting +// for a snapshot, or has reached the MaxInflightMsgs limit. In normal +// operation, this is false. A throttled node will be contacted less frequently +// until it has reached a state in which it's able to accept a steady stream of +// log entries again. +func (pr *Progress) IsPaused() bool { + switch pr.State { + case StateProbe: + return pr.ProbeSent + case StateReplicate: + return pr.Inflights.Full() + case StateSnapshot: + return true + default: + panic("unexpected state") + } +} + +func (pr *Progress) String() string { + var buf strings.Builder + fmt.Fprintf(&buf, "%s match=%d next=%d", pr.State, pr.Match, pr.Next) + if pr.IsLearner { + fmt.Fprint(&buf, " learner") + } + if pr.IsPaused() { + fmt.Fprint(&buf, " paused") + } + if pr.PendingSnapshot > 0 { + fmt.Fprintf(&buf, " pendingSnap=%d", pr.PendingSnapshot) + } + if !pr.RecentActive { + fmt.Fprintf(&buf, " inactive") + } + if n := pr.Inflights.Count(); n > 0 { + fmt.Fprintf(&buf, " inflight=%d", n) + if pr.Inflights.Full() { + fmt.Fprint(&buf, "[full]") + } + } + return buf.String() +} + +// ProgressMap is a map of *Progress. +type ProgressMap map[uint64]*Progress + +// String prints the ProgressMap in sorted key order, one Progress per line. +func (m ProgressMap) String() string { + ids := make([]uint64, 0, len(m)) + for k := range m { + ids = append(ids, k) + } + sort.Slice(ids, func(i, j int) bool { + return ids[i] < ids[j] + }) + var buf strings.Builder + for _, id := range ids { + fmt.Fprintf(&buf, "%d: %s\n", id, m[id]) + } + return buf.String() +} diff --git a/vendor/go.etcd.io/etcd/raft/tracker/state.go b/vendor/go.etcd.io/etcd/raft/tracker/state.go new file mode 100644 index 000000000..285b4b8f5 --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/state.go @@ -0,0 +1,42 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +// StateType is the state of a tracked follower. +type StateType uint64 + +const ( + // StateProbe indicates a follower whose last index isn't known. Such a + // follower is "probed" (i.e. an append sent periodically) to narrow down + // its last index. In the ideal (and common) case, only one round of probing + // is necessary as the follower will react with a hint. Followers that are + // probed over extended periods of time are often offline. + StateProbe StateType = iota + // StateReplicate is the state steady in which a follower eagerly receives + // log entries to append to its log. + StateReplicate + // StateSnapshot indicates a follower that needs log entries not available + // from the leader's Raft log. Such a follower needs a full snapshot to + // return to StateReplicate. + StateSnapshot +) + +var prstmap = [...]string{ + "StateProbe", + "StateReplicate", + "StateSnapshot", +} + +func (st StateType) String() string { return prstmap[uint64(st)] } diff --git a/vendor/go.etcd.io/etcd/raft/tracker/tracker.go b/vendor/go.etcd.io/etcd/raft/tracker/tracker.go new file mode 100644 index 000000000..a4581143d --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/tracker/tracker.go @@ -0,0 +1,288 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracker + +import ( + "fmt" + "sort" + "strings" + + "go.etcd.io/etcd/raft/quorum" + pb "go.etcd.io/etcd/raft/raftpb" +) + +// Config reflects the configuration tracked in a ProgressTracker. +type Config struct { + Voters quorum.JointConfig + // AutoLeave is true if the configuration is joint and a transition to the + // incoming configuration should be carried out automatically by Raft when + // this is possible. If false, the configuration will be joint until the + // application initiates the transition manually. + AutoLeave bool + // Learners is a set of IDs corresponding to the learners active in the + // current configuration. + // + // Invariant: Learners and Voters does not intersect, i.e. if a peer is in + // either half of the joint config, it can't be a learner; if it is a + // learner it can't be in either half of the joint config. This invariant + // simplifies the implementation since it allows peers to have clarity about + // its current role without taking into account joint consensus. + Learners map[uint64]struct{} + // When we turn a voter into a learner during a joint consensus transition, + // we cannot add the learner directly when entering the joint state. This is + // because this would violate the invariant that the intersection of + // voters and learners is empty. For example, assume a Voter is removed and + // immediately re-added as a learner (or in other words, it is demoted): + // + // Initially, the configuration will be + // + // voters: {1 2 3} + // learners: {} + // + // and we want to demote 3. Entering the joint configuration, we naively get + // + // voters: {1 2} & {1 2 3} + // learners: {3} + // + // but this violates the invariant (3 is both voter and learner). Instead, + // we get + // + // voters: {1 2} & {1 2 3} + // learners: {} + // next_learners: {3} + // + // Where 3 is now still purely a voter, but we are remembering the intention + // to make it a learner upon transitioning into the final configuration: + // + // voters: {1 2} + // learners: {3} + // next_learners: {} + // + // Note that next_learners is not used while adding a learner that is not + // also a voter in the joint config. In this case, the learner is added + // right away when entering the joint configuration, so that it is caught up + // as soon as possible. + LearnersNext map[uint64]struct{} +} + +func (c Config) String() string { + var buf strings.Builder + fmt.Fprintf(&buf, "voters=%s", c.Voters) + if c.Learners != nil { + fmt.Fprintf(&buf, " learners=%s", quorum.MajorityConfig(c.Learners).String()) + } + if c.LearnersNext != nil { + fmt.Fprintf(&buf, " learners_next=%s", quorum.MajorityConfig(c.LearnersNext).String()) + } + if c.AutoLeave { + fmt.Fprintf(&buf, " autoleave") + } + return buf.String() +} + +// Clone returns a copy of the Config that shares no memory with the original. +func (c *Config) Clone() Config { + clone := func(m map[uint64]struct{}) map[uint64]struct{} { + if m == nil { + return nil + } + mm := make(map[uint64]struct{}, len(m)) + for k := range m { + mm[k] = struct{}{} + } + return mm + } + return Config{ + Voters: quorum.JointConfig{clone(c.Voters[0]), clone(c.Voters[1])}, + Learners: clone(c.Learners), + LearnersNext: clone(c.LearnersNext), + } +} + +// ProgressTracker tracks the currently active configuration and the information +// known about the nodes and learners in it. In particular, it tracks the match +// index for each peer which in turn allows reasoning about the committed index. +type ProgressTracker struct { + Config + + Progress ProgressMap + + Votes map[uint64]bool + + MaxInflight int +} + +// MakeProgressTracker initializes a ProgressTracker. +func MakeProgressTracker(maxInflight int) ProgressTracker { + p := ProgressTracker{ + MaxInflight: maxInflight, + Config: Config{ + Voters: quorum.JointConfig{ + quorum.MajorityConfig{}, + nil, // only populated when used + }, + Learners: nil, // only populated when used + LearnersNext: nil, // only populated when used + }, + Votes: map[uint64]bool{}, + Progress: map[uint64]*Progress{}, + } + return p +} + +// ConfState returns a ConfState representing the active configuration. +func (p *ProgressTracker) ConfState() pb.ConfState { + return pb.ConfState{ + Voters: p.Voters[0].Slice(), + VotersOutgoing: p.Voters[1].Slice(), + Learners: quorum.MajorityConfig(p.Learners).Slice(), + LearnersNext: quorum.MajorityConfig(p.LearnersNext).Slice(), + AutoLeave: p.AutoLeave, + } +} + +// IsSingleton returns true if (and only if) there is only one voting member +// (i.e. the leader) in the current configuration. +func (p *ProgressTracker) IsSingleton() bool { + return len(p.Voters[0]) == 1 && len(p.Voters[1]) == 0 +} + +type matchAckIndexer map[uint64]*Progress + +var _ quorum.AckedIndexer = matchAckIndexer(nil) + +// AckedIndex implements IndexLookuper. +func (l matchAckIndexer) AckedIndex(id uint64) (quorum.Index, bool) { + pr, ok := l[id] + if !ok { + return 0, false + } + return quorum.Index(pr.Match), true +} + +// Committed returns the largest log index known to be committed based on what +// the voting members of the group have acknowledged. +func (p *ProgressTracker) Committed() uint64 { + return uint64(p.Voters.CommittedIndex(matchAckIndexer(p.Progress))) +} + +func insertionSort(sl []uint64) { + a, b := 0, len(sl) + for i := a + 1; i < b; i++ { + for j := i; j > a && sl[j] < sl[j-1]; j-- { + sl[j], sl[j-1] = sl[j-1], sl[j] + } + } +} + +// Visit invokes the supplied closure for all tracked progresses in stable order. +func (p *ProgressTracker) Visit(f func(id uint64, pr *Progress)) { + n := len(p.Progress) + // We need to sort the IDs and don't want to allocate since this is hot code. + // The optimization here mirrors that in `(MajorityConfig).CommittedIndex`, + // see there for details. + var sl [7]uint64 + ids := sl[:] + if len(sl) >= n { + ids = sl[:n] + } else { + ids = make([]uint64, n) + } + for id := range p.Progress { + n-- + ids[n] = id + } + insertionSort(ids) + for _, id := range ids { + f(id, p.Progress[id]) + } +} + +// QuorumActive returns true if the quorum is active from the view of the local +// raft state machine. Otherwise, it returns false. +func (p *ProgressTracker) QuorumActive() bool { + votes := map[uint64]bool{} + p.Visit(func(id uint64, pr *Progress) { + if pr.IsLearner { + return + } + votes[id] = pr.RecentActive + }) + + return p.Voters.VoteResult(votes) == quorum.VoteWon +} + +// VoterNodes returns a sorted slice of voters. +func (p *ProgressTracker) VoterNodes() []uint64 { + m := p.Voters.IDs() + nodes := make([]uint64, 0, len(m)) + for id := range m { + nodes = append(nodes, id) + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] }) + return nodes +} + +// LearnerNodes returns a sorted slice of learners. +func (p *ProgressTracker) LearnerNodes() []uint64 { + if len(p.Learners) == 0 { + return nil + } + nodes := make([]uint64, 0, len(p.Learners)) + for id := range p.Learners { + nodes = append(nodes, id) + } + sort.Slice(nodes, func(i, j int) bool { return nodes[i] < nodes[j] }) + return nodes +} + +// ResetVotes prepares for a new round of vote counting via recordVote. +func (p *ProgressTracker) ResetVotes() { + p.Votes = map[uint64]bool{} +} + +// RecordVote records that the node with the given id voted for this Raft +// instance if v == true (and declined it otherwise). +func (p *ProgressTracker) RecordVote(id uint64, v bool) { + _, ok := p.Votes[id] + if !ok { + p.Votes[id] = v + } +} + +// TallyVotes returns the number of granted and rejected Votes, and whether the +// election outcome is known. +func (p *ProgressTracker) TallyVotes() (granted int, rejected int, _ quorum.VoteResult) { + // Make sure to populate granted/rejected correctly even if the Votes slice + // contains members no longer part of the configuration. This doesn't really + // matter in the way the numbers are used (they're informational), but might + // as well get it right. + for id, pr := range p.Progress { + if pr.IsLearner { + continue + } + v, voted := p.Votes[id] + if !voted { + continue + } + if v { + granted++ + } else { + rejected++ + } + } + result := p.Voters.VoteResult(p.Votes) + return granted, rejected, result +} diff --git a/vendor/go.etcd.io/etcd/raft/util.go b/vendor/go.etcd.io/etcd/raft/util.go new file mode 100644 index 000000000..785cf735d --- /dev/null +++ b/vendor/go.etcd.io/etcd/raft/util.go @@ -0,0 +1,233 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raft + +import ( + "bytes" + "fmt" + "strings" + + pb "go.etcd.io/etcd/raft/raftpb" +) + +func (st StateType) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", st.String())), nil +} + +func min(a, b uint64) uint64 { + if a > b { + return b + } + return a +} + +func max(a, b uint64) uint64 { + if a > b { + return a + } + return b +} + +func IsLocalMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable || + msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum +} + +func IsResponseMsg(msgt pb.MessageType) bool { + return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp +} + +// voteResponseType maps vote and prevote message types to their corresponding responses. +func voteRespMsgType(msgt pb.MessageType) pb.MessageType { + switch msgt { + case pb.MsgVote: + return pb.MsgVoteResp + case pb.MsgPreVote: + return pb.MsgPreVoteResp + default: + panic(fmt.Sprintf("not a vote message: %s", msgt)) + } +} + +func DescribeHardState(hs pb.HardState) string { + var buf strings.Builder + fmt.Fprintf(&buf, "Term:%d", hs.Term) + if hs.Vote != 0 { + fmt.Fprintf(&buf, " Vote:%d", hs.Vote) + } + fmt.Fprintf(&buf, " Commit:%d", hs.Commit) + return buf.String() +} + +func DescribeSoftState(ss SoftState) string { + return fmt.Sprintf("Lead:%d State:%s", ss.Lead, ss.RaftState) +} + +func DescribeConfState(state pb.ConfState) string { + return fmt.Sprintf( + "Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v", + state.Voters, state.VotersOutgoing, state.Learners, state.LearnersNext, state.AutoLeave, + ) +} + +func DescribeSnapshot(snap pb.Snapshot) string { + m := snap.Metadata + return fmt.Sprintf("Index:%d Term:%d ConfState:%s", m.Index, m.Term, DescribeConfState(m.ConfState)) +} + +func DescribeReady(rd Ready, f EntryFormatter) string { + var buf strings.Builder + if rd.SoftState != nil { + fmt.Fprint(&buf, DescribeSoftState(*rd.SoftState)) + buf.WriteByte('\n') + } + if !IsEmptyHardState(rd.HardState) { + fmt.Fprintf(&buf, "HardState %s", DescribeHardState(rd.HardState)) + buf.WriteByte('\n') + } + if len(rd.ReadStates) > 0 { + fmt.Fprintf(&buf, "ReadStates %v\n", rd.ReadStates) + } + if len(rd.Entries) > 0 { + buf.WriteString("Entries:\n") + fmt.Fprint(&buf, DescribeEntries(rd.Entries, f)) + } + if !IsEmptySnap(rd.Snapshot) { + fmt.Fprintf(&buf, "Snapshot %s\n", DescribeSnapshot(rd.Snapshot)) + } + if len(rd.CommittedEntries) > 0 { + buf.WriteString("CommittedEntries:\n") + fmt.Fprint(&buf, DescribeEntries(rd.CommittedEntries, f)) + } + if len(rd.Messages) > 0 { + buf.WriteString("Messages:\n") + for _, msg := range rd.Messages { + fmt.Fprint(&buf, DescribeMessage(msg, f)) + buf.WriteByte('\n') + } + } + if buf.Len() > 0 { + return fmt.Sprintf("Ready MustSync=%t:\n%s", rd.MustSync, buf.String()) + } + return "" +} + +// EntryFormatter can be implemented by the application to provide human-readable formatting +// of entry data. Nil is a valid EntryFormatter and will use a default format. +type EntryFormatter func([]byte) string + +// DescribeMessage returns a concise human-readable description of a +// Message for debugging. +func DescribeMessage(m pb.Message, f EntryFormatter) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) + if m.Reject { + fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint) + } + if m.Commit != 0 { + fmt.Fprintf(&buf, " Commit:%d", m.Commit) + } + if len(m.Entries) > 0 { + fmt.Fprintf(&buf, " Entries:[") + for i, e := range m.Entries { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(DescribeEntry(e, f)) + } + fmt.Fprintf(&buf, "]") + } + if !IsEmptySnap(m.Snapshot) { + fmt.Fprintf(&buf, " Snapshot: %s", DescribeSnapshot(m.Snapshot)) + } + return buf.String() +} + +// PayloadSize is the size of the payload of this Entry. Notably, it does not +// depend on its Index or Term. +func PayloadSize(e pb.Entry) int { + return len(e.Data) +} + +// DescribeEntry returns a concise human-readable description of an +// Entry for debugging. +func DescribeEntry(e pb.Entry, f EntryFormatter) string { + if f == nil { + f = func(data []byte) string { return fmt.Sprintf("%q", data) } + } + + formatConfChange := func(cc pb.ConfChangeI) string { + // TODO(tbg): give the EntryFormatter a type argument so that it gets + // a chance to expose the Context. + return pb.ConfChangesToString(cc.AsV2().Changes) + } + + var formatted string + switch e.Type { + case pb.EntryNormal: + formatted = f(e.Data) + case pb.EntryConfChange: + var cc pb.ConfChange + if err := cc.Unmarshal(e.Data); err != nil { + formatted = err.Error() + } else { + formatted = formatConfChange(cc) + } + case pb.EntryConfChangeV2: + var cc pb.ConfChangeV2 + if err := cc.Unmarshal(e.Data); err != nil { + formatted = err.Error() + } else { + formatted = formatConfChange(cc) + } + } + if formatted != "" { + formatted = " " + formatted + } + return fmt.Sprintf("%d/%d %s%s", e.Term, e.Index, e.Type, formatted) +} + +// DescribeEntries calls DescribeEntry for each Entry, adding a newline to +// each. +func DescribeEntries(ents []pb.Entry, f EntryFormatter) string { + var buf bytes.Buffer + for _, e := range ents { + _, _ = buf.WriteString(DescribeEntry(e, f) + "\n") + } + return buf.String() +} + +func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { + if len(ents) == 0 { + return ents + } + size := ents[0].Size() + var limit int + for limit = 1; limit < len(ents); limit++ { + size += ents[limit].Size() + if uint64(size) > maxSize { + break + } + } + return ents[:limit] +} + +func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) { + err := cs1.Equivalent(cs2) + if err == nil { + return + } + l.Panic(err) +} diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/go.etcd.io/etcd/version/version.go similarity index 98% rename from vendor/github.com/coreos/etcd/version/version.go rename to vendor/go.etcd.io/etcd/version/version.go index dfc61c221..7d3a57202 100644 --- a/vendor/github.com/coreos/etcd/version/version.go +++ b/vendor/go.etcd.io/etcd/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.3.22" + Version = "3.4.3" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/github.com/coreos/etcd/wal/decoder.go b/vendor/go.etcd.io/etcd/wal/decoder.go similarity index 96% rename from vendor/github.com/coreos/etcd/wal/decoder.go rename to vendor/go.etcd.io/etcd/wal/decoder.go index 6a217f897..f2f01fd88 100644 --- a/vendor/github.com/coreos/etcd/wal/decoder.go +++ b/vendor/go.etcd.io/etcd/wal/decoder.go @@ -21,10 +21,10 @@ import ( "io" "sync" - "github.com/coreos/etcd/pkg/crc" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/wal/walpb" + "go.etcd.io/etcd/pkg/crc" + "go.etcd.io/etcd/pkg/pbutil" + "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/wal/walpb" ) const minSectorSize = 512 diff --git a/vendor/github.com/coreos/etcd/wal/doc.go b/vendor/go.etcd.io/etcd/wal/doc.go similarity index 97% rename from vendor/github.com/coreos/etcd/wal/doc.go rename to vendor/go.etcd.io/etcd/wal/doc.go index a3abd6961..7ea348e4a 100644 --- a/vendor/github.com/coreos/etcd/wal/doc.go +++ b/vendor/go.etcd.io/etcd/wal/doc.go @@ -21,7 +21,7 @@ segmented WAL files. Inside of each file the raft state and entries are appended to it with the Save method: metadata := []byte{} - w, err := wal.Create("/var/lib/etcd", metadata) + w, err := wal.Create(zap.NewExample(), "/var/lib/etcd", metadata) ... err := w.Save(s, ents) diff --git a/vendor/github.com/coreos/etcd/wal/encoder.go b/vendor/go.etcd.io/etcd/wal/encoder.go similarity index 89% rename from vendor/github.com/coreos/etcd/wal/encoder.go rename to vendor/go.etcd.io/etcd/wal/encoder.go index e8890d88a..d3877ed5c 100644 --- a/vendor/github.com/coreos/etcd/wal/encoder.go +++ b/vendor/go.etcd.io/etcd/wal/encoder.go @@ -21,9 +21,9 @@ import ( "os" "sync" - "github.com/coreos/etcd/pkg/crc" - "github.com/coreos/etcd/pkg/ioutil" - "github.com/coreos/etcd/wal/walpb" + "go.etcd.io/etcd/pkg/crc" + "go.etcd.io/etcd/pkg/ioutil" + "go.etcd.io/etcd/wal/walpb" ) // walPageBytes is the alignment for flushing records to the backing Writer. @@ -92,8 +92,7 @@ func (e *encoder) encode(rec *walpb.Record) error { if padBytes != 0 { data = append(data, make([]byte, padBytes)...) } - n, err = e.bw.Write(data) - walWriteBytes.Add(float64(n)) + _, err = e.bw.Write(data) return err } @@ -109,16 +108,13 @@ func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) { func (e *encoder) flush() error { e.mu.Lock() - n, err := e.bw.FlushN() - e.mu.Unlock() - walWriteBytes.Add(float64(n)) - return err + defer e.mu.Unlock() + return e.bw.Flush() } func writeUint64(w io.Writer, n uint64, buf []byte) error { // http://golang.org/src/encoding/binary/binary.go binary.LittleEndian.PutUint64(buf, n) - nv, err := w.Write(buf) - walWriteBytes.Add(float64(nv)) + _, err := w.Write(buf) return err } diff --git a/vendor/github.com/coreos/etcd/wal/file_pipeline.go b/vendor/go.etcd.io/etcd/wal/file_pipeline.go similarity index 84% rename from vendor/github.com/coreos/etcd/wal/file_pipeline.go rename to vendor/go.etcd.io/etcd/wal/file_pipeline.go index 3a1c57c1c..e1e1c557b 100644 --- a/vendor/github.com/coreos/etcd/wal/file_pipeline.go +++ b/vendor/go.etcd.io/etcd/wal/file_pipeline.go @@ -19,11 +19,15 @@ import ( "os" "path/filepath" - "github.com/coreos/etcd/pkg/fileutil" + "go.etcd.io/etcd/pkg/fileutil" + + "go.uber.org/zap" ) // filePipeline pipelines allocating disk space type filePipeline struct { + lg *zap.Logger + // dir to put files dir string // size of files to make, in bytes @@ -36,8 +40,9 @@ type filePipeline struct { donec chan struct{} } -func newFilePipeline(dir string, fileSize int64) *filePipeline { +func newFilePipeline(lg *zap.Logger, dir string, fileSize int64) *filePipeline { fp := &filePipeline{ + lg: lg, dir: dir, size: fileSize, filec: make(chan *fileutil.LockedFile), @@ -70,7 +75,11 @@ func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) { return nil, err } if err = fileutil.Preallocate(f.File, fp.size, true); err != nil { - plog.Errorf("failed to allocate space when creating new wal file (%v)", err) + if fp.lg != nil { + fp.lg.Warn("failed to preallocate space when creating a new WAL", zap.Int64("size", fp.size), zap.Error(err)) + } else { + plog.Errorf("failed to allocate space when creating new wal file (%v)", err) + } f.Close() return nil, err } diff --git a/vendor/github.com/coreos/etcd/wal/metrics.go b/vendor/go.etcd.io/etcd/wal/metrics.go similarity index 62% rename from vendor/github.com/coreos/etcd/wal/metrics.go rename to vendor/go.etcd.io/etcd/wal/metrics.go index 726154416..22cb8003c 100644 --- a/vendor/github.com/coreos/etcd/wal/metrics.go +++ b/vendor/go.etcd.io/etcd/wal/metrics.go @@ -17,22 +17,18 @@ package wal import "github.com/prometheus/client_golang/prometheus" var ( - syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + walFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "etcd", Subsystem: "disk", Name: "wal_fsync_duration_seconds", - Help: "The latency distributions of fsync called by wal.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - walWriteBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "disk", - Name: "wal_write_bytes_total", - Help: "Total number of bytes written in WAL.", + Help: "The latency distributions of fsync called by WAL.", + + // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2 + // highest bucket start of 0.001 sec * 2^13 == 8.192 sec + Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), }) ) func init() { - prometheus.MustRegister(syncDurations) - prometheus.MustRegister(walWriteBytes) + prometheus.MustRegister(walFsyncSec) } diff --git a/vendor/github.com/coreos/etcd/wal/repair.go b/vendor/go.etcd.io/etcd/wal/repair.go similarity index 51% rename from vendor/github.com/coreos/etcd/wal/repair.go rename to vendor/go.etcd.io/etcd/wal/repair.go index f1e507683..15afed017 100644 --- a/vendor/github.com/coreos/etcd/wal/repair.go +++ b/vendor/go.etcd.io/etcd/wal/repair.go @@ -18,21 +18,28 @@ import ( "io" "os" "path/filepath" - "time" - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/wal/walpb" + "go.etcd.io/etcd/pkg/fileutil" + "go.etcd.io/etcd/wal/walpb" + + "go.uber.org/zap" ) // Repair tries to repair ErrUnexpectedEOF in the // last wal file by truncating. -func Repair(dirpath string) bool { - f, err := openLast(dirpath) +func Repair(lg *zap.Logger, dirpath string) bool { + f, err := openLast(lg, dirpath) if err != nil { return false } defer f.Close() + if lg != nil { + lg.Info("repairing", zap.String("path", f.Name())) + } else { + plog.Noticef("repairing %v", f.Name()) + } + rec := &walpb.Record{} decoder := newDecoder(f) for { @@ -52,50 +59,80 @@ func Repair(dirpath string) bool { decoder.updateCRC(rec.Crc) } continue + case io.EOF: + if lg != nil { + lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.EOF)) + } return true + case io.ErrUnexpectedEOF: - plog.Noticef("repairing %v", f.Name()) bf, bferr := os.Create(f.Name() + ".broken") if bferr != nil { - plog.Errorf("could not repair %v, failed to create backup file", f.Name()) + if lg != nil { + lg.Warn("failed to create backup file", zap.String("path", f.Name()+".broken"), zap.Error(bferr)) + } else { + plog.Errorf("could not repair %v, failed to create backup file", f.Name()) + } return false } defer bf.Close() if _, err = f.Seek(0, io.SeekStart); err != nil { - plog.Errorf("could not repair %v, failed to read file", f.Name()) + if lg != nil { + lg.Warn("failed to read file", zap.String("path", f.Name()), zap.Error(err)) + } else { + plog.Errorf("could not repair %v, failed to read file", f.Name()) + } return false } if _, err = io.Copy(bf, f); err != nil { - plog.Errorf("could not repair %v, failed to copy file", f.Name()) + if lg != nil { + lg.Warn("failed to copy", zap.String("from", f.Name()+".broken"), zap.String("to", f.Name()), zap.Error(err)) + } else { + plog.Errorf("could not repair %v, failed to copy file", f.Name()) + } return false } - if err = f.Truncate(int64(lastOffset)); err != nil { - plog.Errorf("could not repair %v, failed to truncate file", f.Name()) + if err = f.Truncate(lastOffset); err != nil { + if lg != nil { + lg.Warn("failed to truncate", zap.String("path", f.Name()), zap.Error(err)) + } else { + plog.Errorf("could not repair %v, failed to truncate file", f.Name()) + } return false } - start := time.Now() if err = fileutil.Fsync(f.File); err != nil { - plog.Errorf("could not repair %v, failed to sync file", f.Name()) + if lg != nil { + lg.Warn("failed to fsync", zap.String("path", f.Name()), zap.Error(err)) + } else { + plog.Errorf("could not repair %v, failed to sync file", f.Name()) + } return false } - syncDurations.Observe(time.Since(start).Seconds()) + if lg != nil { + lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.ErrUnexpectedEOF)) + } return true + default: - plog.Errorf("could not repair error (%v)", err) + if lg != nil { + lg.Warn("failed to repair", zap.String("path", f.Name()), zap.Error(err)) + } else { + plog.Errorf("could not repair error (%v)", err) + } return false } } } // openLast opens the last wal file for read and write. -func openLast(dirpath string) (*fileutil.LockedFile, error) { - names, err := readWalNames(dirpath) +func openLast(lg *zap.Logger, dirpath string) (*fileutil.LockedFile, error) { + names, err := readWALNames(lg, dirpath) if err != nil { return nil, err } diff --git a/vendor/github.com/coreos/etcd/wal/util.go b/vendor/go.etcd.io/etcd/wal/util.go similarity index 58% rename from vendor/github.com/coreos/etcd/wal/util.go rename to vendor/go.etcd.io/etcd/wal/util.go index 5c56e2288..a3f314bb1 100644 --- a/vendor/github.com/coreos/etcd/wal/util.go +++ b/vendor/go.etcd.io/etcd/wal/util.go @@ -19,15 +19,16 @@ import ( "fmt" "strings" - "github.com/coreos/etcd/pkg/fileutil" -) + "go.etcd.io/etcd/pkg/fileutil" -var ( - badWalName = errors.New("bad wal name") + "go.uber.org/zap" ) -func Exist(dirpath string) bool { - names, err := fileutil.ReadDir(dirpath) +var errBadWALName = errors.New("bad wal name") + +// Exist returns true if there are any files in a given directory. +func Exist(dir string) bool { + names, err := fileutil.ReadDir(dir, fileutil.WithExt(".wal")) if err != nil { return false } @@ -37,12 +38,16 @@ func Exist(dirpath string) bool { // searchIndex returns the last array index of names whose raft index section is // equal to or smaller than the given index. // The given names MUST be sorted. -func searchIndex(names []string, index uint64) (int, bool) { +func searchIndex(lg *zap.Logger, names []string, index uint64) (int, bool) { for i := len(names) - 1; i >= 0; i-- { name := names[i] - _, curIndex, err := parseWalName(name) + _, curIndex, err := parseWALName(name) if err != nil { - plog.Panicf("parse correct name should never fail: %v", err) + if lg != nil { + lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err)) + } else { + plog.Panicf("parse correct name should never fail: %v", err) + } } if index >= curIndex { return i, true @@ -53,12 +58,16 @@ func searchIndex(names []string, index uint64) (int, bool) { // names should have been sorted based on sequence number. // isValidSeq checks whether seq increases continuously. -func isValidSeq(names []string) bool { +func isValidSeq(lg *zap.Logger, names []string) bool { var lastSeq uint64 for _, name := range names { - curSeq, _, err := parseWalName(name) + curSeq, _, err := parseWALName(name) if err != nil { - plog.Panicf("parse correct name should never fail: %v", err) + if lg != nil { + lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err)) + } else { + plog.Panicf("parse correct name should never fail: %v", err) + } } if lastSeq != 0 && lastSeq != curSeq-1 { return false @@ -67,25 +76,33 @@ func isValidSeq(names []string) bool { } return true } -func readWalNames(dirpath string) ([]string, error) { + +func readWALNames(lg *zap.Logger, dirpath string) ([]string, error) { names, err := fileutil.ReadDir(dirpath) if err != nil { return nil, err } - wnames := checkWalNames(names) + wnames := checkWalNames(lg, names) if len(wnames) == 0 { return nil, ErrFileNotFound } return wnames, nil } -func checkWalNames(names []string) []string { +func checkWalNames(lg *zap.Logger, names []string) []string { wnames := make([]string, 0) for _, name := range names { - if _, _, err := parseWalName(name); err != nil { + if _, _, err := parseWALName(name); err != nil { // don't complain about left over tmp files if !strings.HasSuffix(name, ".tmp") { - plog.Warningf("ignored file %v in wal", name) + if lg != nil { + lg.Warn( + "ignored file in WAL directory", + zap.String("path", name), + ) + } else { + plog.Warningf("ignored file %v in wal", name) + } } continue } @@ -94,9 +111,9 @@ func checkWalNames(names []string) []string { return wnames } -func parseWalName(str string) (seq, index uint64, err error) { +func parseWALName(str string) (seq, index uint64, err error) { if !strings.HasSuffix(str, ".wal") { - return 0, 0, badWalName + return 0, 0, errBadWALName } _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index) return seq, index, err diff --git a/vendor/github.com/coreos/etcd/wal/wal.go b/vendor/go.etcd.io/etcd/wal/wal.go similarity index 77% rename from vendor/github.com/coreos/etcd/wal/wal.go rename to vendor/go.etcd.io/etcd/wal/wal.go index 95870d183..5f6f21e3a 100644 --- a/vendor/github.com/coreos/etcd/wal/wal.go +++ b/vendor/go.etcd.io/etcd/wal/wal.go @@ -25,13 +25,14 @@ import ( "sync" "time" - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/wal/walpb" + "go.etcd.io/etcd/pkg/fileutil" + "go.etcd.io/etcd/pkg/pbutil" + "go.etcd.io/etcd/raft" + "go.etcd.io/etcd/raft/raftpb" + "go.etcd.io/etcd/wal/walpb" "github.com/coreos/pkg/capnslog" + "go.uber.org/zap" ) const ( @@ -53,7 +54,7 @@ var ( // so that tests can set a different segment size. SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal") + plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "wal") ErrMetadataConflict = errors.New("wal: conflicting metadata found") ErrFileNotFound = errors.New("wal: file not found") @@ -69,6 +70,8 @@ var ( // A just opened WAL is in read mode, and ready for reading records. // The WAL will be ready for appending after reading out all the previous records. type WAL struct { + lg *zap.Logger + dir string // the living directory of the underlay files // dirFile is a fd for the wal directory for syncing on Rename @@ -91,7 +94,7 @@ type WAL struct { // Create creates a WAL ready for appending records. The given metadata is // recorded at the head of each WAL file, and can be retrieved with ReadAll. -func Create(dirpath string, metadata []byte) (*WAL, error) { +func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) { if Exist(dirpath) { return nil, os.ErrExist } @@ -104,22 +107,53 @@ func Create(dirpath string, metadata []byte) (*WAL, error) { } } if err := fileutil.CreateDirAll(tmpdirpath); err != nil { + if lg != nil { + lg.Warn( + "failed to create a temporary WAL directory", + zap.String("tmp-dir-path", tmpdirpath), + zap.String("dir-path", dirpath), + zap.Error(err), + ) + } return nil, err } p := filepath.Join(tmpdirpath, walName(0, 0)) f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode) if err != nil { + if lg != nil { + lg.Warn( + "failed to flock an initial WAL file", + zap.String("path", p), + zap.Error(err), + ) + } return nil, err } if _, err = f.Seek(0, io.SeekEnd); err != nil { + if lg != nil { + lg.Warn( + "failed to seek an initial WAL file", + zap.String("path", p), + zap.Error(err), + ) + } return nil, err } if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil { + if lg != nil { + lg.Warn( + "failed to preallocate an initial WAL file", + zap.String("path", p), + zap.Int64("segment-bytes", SegmentSizeBytes), + zap.Error(err), + ) + } return nil, err } w := &WAL{ + lg: lg, dir: dirpath, metadata: metadata, } @@ -138,30 +172,89 @@ func Create(dirpath string, metadata []byte) (*WAL, error) { return nil, err } - if w, err = w.renameWal(tmpdirpath); err != nil { + if w, err = w.renameWAL(tmpdirpath); err != nil { + if lg != nil { + lg.Warn( + "failed to rename the temporary WAL directory", + zap.String("tmp-dir-path", tmpdirpath), + zap.String("dir-path", w.dir), + zap.Error(err), + ) + } return nil, err } + var perr error + defer func() { + if perr != nil { + w.cleanupWAL(lg) + } + }() + // directory was renamed; sync parent dir to persist rename pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir)) if perr != nil { + if lg != nil { + lg.Warn( + "failed to open the parent data directory", + zap.String("parent-dir-path", filepath.Dir(w.dir)), + zap.String("dir-path", w.dir), + zap.Error(perr), + ) + } return nil, perr } - - start := time.Now() if perr = fileutil.Fsync(pdir); perr != nil { + if lg != nil { + lg.Warn( + "failed to fsync the parent data directory file", + zap.String("parent-dir-path", filepath.Dir(w.dir)), + zap.String("dir-path", w.dir), + zap.Error(perr), + ) + } return nil, perr } - syncDurations.Observe(time.Since(start).Seconds()) - - if perr = pdir.Close(); err != nil { + if perr = pdir.Close(); perr != nil { + if lg != nil { + lg.Warn( + "failed to close the parent data directory file", + zap.String("parent-dir-path", filepath.Dir(w.dir)), + zap.String("dir-path", w.dir), + zap.Error(perr), + ) + } return nil, perr } return w, nil } -func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { +func (w *WAL) cleanupWAL(lg *zap.Logger) { + var err error + if err = w.Close(); err != nil { + if lg != nil { + lg.Panic("failed to close WAL during cleanup", zap.Error(err)) + } else { + plog.Panicf("failed to close WAL during cleanup: %v", err) + } + } + brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999")) + if err = os.Rename(w.dir, brokenDirName); err != nil { + if lg != nil { + lg.Panic( + "failed to rename WAL during cleanup", + zap.Error(err), + zap.String("source-path", w.dir), + zap.String("rename-path", brokenDirName), + ) + } else { + plog.Panicf("failed to rename WAL during cleanup: %v", err) + } + } +} + +func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) { if err := os.RemoveAll(w.dir); err != nil { return nil, err } @@ -173,26 +266,36 @@ func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { // process holds the lock. if err := os.Rename(tmpdirpath, w.dir); err != nil { if _, ok := err.(*os.LinkError); ok { - return w.renameWalUnlock(tmpdirpath) + return w.renameWALUnlock(tmpdirpath) } return nil, err } - w.fp = newFilePipeline(w.dir, SegmentSizeBytes) + w.fp = newFilePipeline(w.lg, w.dir, SegmentSizeBytes) df, err := fileutil.OpenDir(w.dir) w.dirFile = df return w, err } -func (w *WAL) renameWalUnlock(tmpdirpath string) (*WAL, error) { +func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) { // rename of directory with locked files doesn't work on windows/cifs; // close the WAL to release the locks so the directory can be renamed. - plog.Infof("releasing file lock to rename %q to %q", tmpdirpath, w.dir) + if w.lg != nil { + w.lg.Info( + "closing WAL to release flock and retry directory renaming", + zap.String("from", tmpdirpath), + zap.String("to", w.dir), + ) + } else { + plog.Infof("releasing file lock to rename %q to %q", tmpdirpath, w.dir) + } w.Close() + if err := os.Rename(tmpdirpath, w.dir); err != nil { return nil, err } + // reopen and relock - newWAL, oerr := Open(w.dir, walpb.Snapshot{}) + newWAL, oerr := Open(w.lg, w.dir, walpb.Snapshot{}) if oerr != nil { return nil, oerr } @@ -209,8 +312,8 @@ func (w *WAL) renameWalUnlock(tmpdirpath string) (*WAL, error) { // The returned WAL is ready to read and the first record will be the one after // the given snap. The WAL cannot be appended to before reading out all of its // previous records. -func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) { - w, err := openAtIndex(dirpath, snap, true) +func Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) { + w, err := openAtIndex(lg, dirpath, snap, true) if err != nil { return nil, err } @@ -222,23 +325,24 @@ func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) { // OpenForRead only opens the wal files for read. // Write on a read only wal panics. -func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) { - return openAtIndex(dirpath, snap, false) +func OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) { + return openAtIndex(lg, dirpath, snap, false) } -func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) { - names, nameIndex, err := selectWALFiles(dirpath, snap) +func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) { + names, nameIndex, err := selectWALFiles(lg, dirpath, snap) if err != nil { return nil, err } - rs, ls, closer, err := openWALFiles(dirpath, names, nameIndex, write) + rs, ls, closer, err := openWALFiles(lg, dirpath, names, nameIndex, write) if err != nil { return nil, err } // create a WAL ready for reading w := &WAL{ + lg: lg, dir: dirpath, start: snap, decoder: newDecoder(rs...), @@ -250,24 +354,24 @@ func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) // write reuses the file descriptors from read; don't close so // WAL can append without dropping the file lock w.readClose = nil - if _, _, err := parseWalName(filepath.Base(w.tail().Name())); err != nil { + if _, _, err := parseWALName(filepath.Base(w.tail().Name())); err != nil { closer() return nil, err } - w.fp = newFilePipeline(w.dir, SegmentSizeBytes) + w.fp = newFilePipeline(lg, w.dir, SegmentSizeBytes) } return w, nil } -func selectWALFiles(dirpath string, snap walpb.Snapshot) ([]string, int, error) { - names, err := readWalNames(dirpath) +func selectWALFiles(lg *zap.Logger, dirpath string, snap walpb.Snapshot) ([]string, int, error) { + names, err := readWALNames(lg, dirpath) if err != nil { return nil, -1, err } - nameIndex, ok := searchIndex(names, snap.Index) - if !ok || !isValidSeq(names[nameIndex:]) { + nameIndex, ok := searchIndex(lg, names, snap.Index) + if !ok || !isValidSeq(lg, names[nameIndex:]) { err = ErrFileNotFound return nil, -1, err } @@ -275,7 +379,7 @@ func selectWALFiles(dirpath string, snap walpb.Snapshot) ([]string, int, error) return names, nameIndex, nil } -func openWALFiles(dirpath string, names []string, nameIndex int, write bool) ([]io.Reader, []*fileutil.LockedFile, func() error, error) { +func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, write bool) ([]io.Reader, []*fileutil.LockedFile, func() error, error) { rcs := make([]io.ReadCloser, 0) rs := make([]io.Reader, 0) ls := make([]*fileutil.LockedFile, 0) @@ -332,14 +436,17 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. ents = append(ents[:e.Index-w.start.Index-1], e) } w.enti = e.Index + case stateType: state = mustUnmarshalState(rec.Data) + case metadataType: if metadata != nil && !bytes.Equal(metadata, rec.Data) { state.Reset() return nil, state, nil, ErrMetadataConflict } metadata = rec.Data + case crcType: crc := decoder.crc.Sum32() // current crc of decoder must match the crc of the record. @@ -349,6 +456,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. return nil, state, nil, ErrCRCMismatch } decoder.updateCRC(rec.Crc) + case snapshotType: var snap walpb.Snapshot pbutil.MustUnmarshal(&snap, rec.Data) @@ -359,6 +467,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. } match = true } + default: state.Reset() return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type) @@ -420,71 +529,6 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. return metadata, state, ents, err } -// ValidSnapshotEntries returns all the valid snapshot entries in the wal logs in the given directory. -// Snapshot entries are valid if their index is less than or equal to the most recent committed hardstate. -func ValidSnapshotEntries(walDir string) ([]walpb.Snapshot, error) { - var snaps []walpb.Snapshot - var state raftpb.HardState - var err error - - rec := &walpb.Record{} - names, err := readWalNames(walDir) - if err != nil { - return nil, err - } - - // open wal files in read mode, so that there is no conflict - // when the same WAL is opened elsewhere in write mode - rs, _, closer, err := openWALFiles(walDir, names, 0, false) - if err != nil { - return nil, err - } - defer func() { - if closer != nil { - closer() - } - }() - - // create a new decoder from the readers on the WAL files - decoder := newDecoder(rs...) - - for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { - switch rec.Type { - case snapshotType: - var loadedSnap walpb.Snapshot - pbutil.MustUnmarshal(&loadedSnap, rec.Data) - snaps = append(snaps, loadedSnap) - case stateType: - state = mustUnmarshalState(rec.Data) - case crcType: - crc := decoder.crc.Sum32() - // current crc of decoder must match the crc of the record. - // do no need to match 0 crc, since the decoder is a new one at this case. - if crc != 0 && rec.Validate(crc) != nil { - return nil, ErrCRCMismatch - } - decoder.updateCRC(rec.Crc) - } - } - // We do not have to read out all the WAL entries - // as the decoder is opened in read mode. - if err != io.EOF && err != io.ErrUnexpectedEOF { - return nil, err - } - - // filter out any snaps that are newer than the committed hardstate - n := 0 - for _, s := range snaps { - if s.Index <= state.Commit { - snaps[n] = s - n++ - } - } - snaps = snaps[:n:n] - - return snaps, nil -} - // Verify reads through the given WAL and verifies that it is not corrupted. // It creates a new decoder to read through the records of the given WAL. // It does not conflict with any open WAL, but it is recommended not to @@ -492,21 +536,21 @@ func ValidSnapshotEntries(walDir string) ([]walpb.Snapshot, error) { // If it cannot read out the expected snap, it will return ErrSnapshotNotFound. // If the loaded snap doesn't match with the expected one, it will // return error ErrSnapshotMismatch. -func Verify(walDir string, snap walpb.Snapshot) error { +func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) error { var metadata []byte var err error var match bool rec := &walpb.Record{} - names, nameIndex, err := selectWALFiles(walDir, snap) + names, nameIndex, err := selectWALFiles(lg, walDir, snap) if err != nil { return err } // open wal files in read mode, so that there is no conflict // when the same WAL is opened elsewhere in write mode - rs, _, closer, err := openWALFiles(walDir, names, nameIndex, false) + rs, _, closer, err := openWALFiles(lg, walDir, names, nameIndex, false) if err != nil { return err } @@ -573,9 +617,11 @@ func (w *WAL) cut() error { if serr != nil { return serr } + if err := w.tail().Truncate(off); err != nil { return err } + if err := w.sync(); err != nil { return err } @@ -595,15 +641,19 @@ func (w *WAL) cut() error { if err != nil { return err } + if err = w.saveCrc(prevCrc); err != nil { return err } + if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil { return err } + if err = w.saveState(&w.state); err != nil { return err } + // atomically move temp wal file to wal file if err = w.sync(); err != nil { return err @@ -617,12 +667,9 @@ func (w *WAL) cut() error { if err = os.Rename(newTail.Name(), fpath); err != nil { return err } - - start := time.Now() if err = fileutil.Fsync(w.dirFile); err != nil { return err } - syncDurations.Observe(time.Since(start).Seconds()) // reopen newTail with its new path so calls to Name() match the wal filename format newTail.Close() @@ -642,7 +689,11 @@ func (w *WAL) cut() error { return err } - plog.Infof("segmented wal file %v is created", fpath) + if w.lg != nil { + w.lg.Info("created a new WAL segment", zap.String("path", fpath)) + } else { + plog.Infof("segmented wal file %v is created", fpath) + } return nil } @@ -655,19 +706,23 @@ func (w *WAL) sync() error { start := time.Now() err := fileutil.Fdatasync(w.tail().File) - duration := time.Since(start) - if duration > warnSyncDuration { - plog.Warningf("sync duration of %v, expected less than %v", duration, warnSyncDuration) + took := time.Since(start) + if took > warnSyncDuration { + if w.lg != nil { + w.lg.Warn( + "slow fdatasync", + zap.Duration("took", took), + zap.Duration("expected-duration", warnSyncDuration), + ) + } else { + plog.Warningf("sync duration of %v, expected less than %v", took, warnSyncDuration) + } } - syncDurations.Observe(duration.Seconds()) + walFsyncSec.Observe(took.Seconds()) return err } -func (w *WAL) Sync() error { - return w.sync() -} - // ReleaseLockTo releases the locks, which has smaller index than the given index // except the largest one among them. // For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release @@ -682,9 +737,8 @@ func (w *WAL) ReleaseLockTo(index uint64) error { var smaller int found := false - for i, l := range w.locks { - _, lockIndex, err := parseWalName(filepath.Base(l.Name())) + _, lockIndex, err := parseWALName(filepath.Base(l.Name())) if err != nil { return err } @@ -716,6 +770,7 @@ func (w *WAL) ReleaseLockTo(index uint64) error { return nil } +// Close closes the current WAL file and directory. func (w *WAL) Close() error { w.mu.Lock() defer w.mu.Unlock() @@ -735,7 +790,11 @@ func (w *WAL) Close() error { continue } if err := l.Close(); err != nil { - plog.Errorf("failed to unlock during closing wal: %s", err) + if w.lg != nil { + w.lg.Warn("failed to close WAL", zap.Error(err)) + } else { + plog.Errorf("failed to unlock during closing wal: %s", err) + } } } @@ -831,9 +890,13 @@ func (w *WAL) seq() uint64 { if t == nil { return 0 } - seq, _, err := parseWalName(filepath.Base(t.Name())) + seq, _, err := parseWALName(filepath.Base(t.Name())) if err != nil { - plog.Fatalf("bad wal name %s (%v)", t.Name(), err) + if w.lg != nil { + w.lg.Fatal("failed to parse WAL name", zap.String("name", t.Name()), zap.Error(err)) + } else { + plog.Fatalf("bad wal name %s (%v)", t.Name(), err) + } } return seq } diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.go b/vendor/go.etcd.io/etcd/wal/walpb/record.go similarity index 100% rename from vendor/github.com/coreos/etcd/wal/walpb/record.go rename to vendor/go.etcd.io/etcd/wal/walpb/record.go diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/go.etcd.io/etcd/wal/walpb/record.pb.go similarity index 71% rename from vendor/github.com/coreos/etcd/wal/walpb/record.pb.go rename to vendor/go.etcd.io/etcd/wal/walpb/record.pb.go index 10ee41702..3ce63ddc2 100644 --- a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go +++ b/vendor/go.etcd.io/etcd/wal/walpb/record.pb.go @@ -1,16 +1,28 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: record.proto +/* + Package walpb is a generated protocol buffer package. + + It is generated from these files: + record.proto + + It has these top-level messages: + Record + Snapshot +*/ package walpb import ( - fmt "fmt" - io "io" + "fmt" + + proto "github.com/golang/protobuf/proto" + math "math" - math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" + + io "io" ) // Reference imports to suppress errors if they are not otherwise used. @@ -25,115 +37,36 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Record struct { - Type int64 `protobuf:"varint,1,opt,name=type" json:"type"` - Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Type int64 `protobuf:"varint,1,opt,name=type" json:"type"` + Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"` + Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` + XXX_unrecognized []byte `json:"-"` } -func (m *Record) Reset() { *m = Record{} } -func (m *Record) String() string { return proto.CompactTextString(m) } -func (*Record) ProtoMessage() {} -func (*Record) Descriptor() ([]byte, []int) { - return fileDescriptor_bf94fd919e302a1d, []int{0} -} -func (m *Record) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Record.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Record) XXX_Merge(src proto.Message) { - xxx_messageInfo_Record.Merge(m, src) -} -func (m *Record) XXX_Size() int { - return m.Size() -} -func (m *Record) XXX_DiscardUnknown() { - xxx_messageInfo_Record.DiscardUnknown(m) -} - -var xxx_messageInfo_Record proto.InternalMessageInfo +func (m *Record) Reset() { *m = Record{} } +func (m *Record) String() string { return proto.CompactTextString(m) } +func (*Record) ProtoMessage() {} +func (*Record) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{0} } type Snapshot struct { - Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"` - Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_bf94fd919e302a1d, []int{1} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(m, src) -} -func (m *Snapshot) XXX_Size() int { - return m.Size() -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) + Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` } -var xxx_messageInfo_Snapshot proto.InternalMessageInfo +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{1} } func init() { proto.RegisterType((*Record)(nil), "walpb.Record") proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot") } - -func init() { proto.RegisterFile("record.proto", fileDescriptor_bf94fd919e302a1d) } - -var fileDescriptor_bf94fd919e302a1d = []byte{ - // 186 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, - 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92, - 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6, - 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, - 0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45, - 0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x80, 0x90, 0x10, 0x17, 0x4b, 0x4a, - 0x62, 0x49, 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0xad, 0xe4, 0xc0, 0xc5, 0x11, - 0x9c, 0x97, 0x58, 0x50, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc5, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a, - 0x01, 0x36, 0x92, 0x05, 0xaa, 0x13, 0x22, 0x04, 0xb6, 0x2d, 0xb5, 0x28, 0x17, 0x6c, 0x28, 0x0b, - 0xdc, 0xb6, 0xd4, 0xa2, 0x5c, 0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc, - 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00, - 0xff, 0xff, 0x7f, 0x5e, 0x5c, 0x46, 0xd3, 0x00, 0x00, 0x00, -} - func (m *Record) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -141,39 +74,32 @@ func (m *Record) Marshal() (dAtA []byte, err error) { } func (m *Record) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Record) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } + dAtA[i] = 0x8 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Crc)) if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintRecord(dAtA, i, uint64(len(m.Data))) - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintRecord(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - i = encodeVarintRecord(dAtA, i, uint64(m.Crc)) - i-- - dAtA[i] = 0x10 - i = encodeVarintRecord(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil } func (m *Snapshot) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -181,43 +107,32 @@ func (m *Snapshot) Marshal() (dAtA []byte, err error) { } func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRecord(dAtA, i, uint64(m.Term)) if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } - i = encodeVarintRecord(dAtA, i, uint64(m.Term)) - i-- - dAtA[i] = 0x10 - i = encodeVarintRecord(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func encodeVarintRecord(dAtA []byte, offset int, v uint64) int { - offset -= sovRecord(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Record) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRecord(uint64(m.Type)) @@ -233,9 +148,6 @@ func (m *Record) Size() (n int) { } func (m *Snapshot) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovRecord(uint64(m.Index)) @@ -247,7 +159,14 @@ func (m *Snapshot) Size() (n int) { } func sovRecord(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozRecord(x uint64) (n int) { return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -267,7 +186,7 @@ func (m *Record) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -295,7 +214,7 @@ func (m *Record) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= int64(b&0x7F) << shift + m.Type |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -314,7 +233,7 @@ func (m *Record) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Crc |= uint32(b&0x7F) << shift + m.Crc |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -333,7 +252,7 @@ func (m *Record) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -342,9 +261,6 @@ func (m *Record) Unmarshal(dAtA []byte) error { return ErrInvalidLengthRecord } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthRecord - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -362,9 +278,6 @@ func (m *Record) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRecord } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRecord - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -393,7 +306,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -421,7 +334,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint64(b&0x7F) << shift + m.Index |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -440,7 +353,7 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Term |= uint64(b&0x7F) << shift + m.Term |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -454,9 +367,6 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthRecord } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthRecord - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -524,11 +434,8 @@ func skipRecord(dAtA []byte) (n int, err error) { break } } - if length < 0 { - return 0, ErrInvalidLengthRecord - } iNdEx += length - if iNdEx < 0 { + if length < 0 { return 0, ErrInvalidLengthRecord } return iNdEx, nil @@ -559,9 +466,6 @@ func skipRecord(dAtA []byte) (n int, err error) { return 0, err } iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRecord - } } return iNdEx, nil case 4: @@ -580,3 +484,21 @@ var ( ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow") ) + +func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) } + +var fileDescriptorRecord = []byte{ + // 186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, + 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92, + 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6, + 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, + 0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45, + 0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x80, 0x90, 0x10, 0x17, 0x4b, 0x4a, + 0x62, 0x49, 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0xad, 0xe4, 0xc0, 0xc5, 0x11, + 0x9c, 0x97, 0x58, 0x50, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc5, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a, + 0x01, 0x36, 0x92, 0x05, 0xaa, 0x13, 0x22, 0x04, 0xb6, 0x2d, 0xb5, 0x28, 0x17, 0x6c, 0x28, 0x0b, + 0xdc, 0xb6, 0xd4, 0xa2, 0x5c, 0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc, + 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x7f, 0x5e, 0x5c, 0x46, 0xd3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.proto b/vendor/go.etcd.io/etcd/wal/walpb/record.proto similarity index 100% rename from vendor/github.com/coreos/etcd/wal/walpb/record.proto rename to vendor/go.etcd.io/etcd/wal/walpb/record.proto diff --git a/vendor/modules.txt b/vendor/modules.txt index b937f4e33..ff6a60c0e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -135,84 +135,8 @@ github.com/containerd/containerd/sys github.com/containerd/containerd/version # github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41 github.com/containerd/continuity/pathdriver -# github.com/coreos/bbolt v1.3.3 => github.com/coreos/bbolt v1.3.3 +# github.com/coreos/etcd v3.3.22+incompatible => go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b ## explicit -github.com/coreos/bbolt -# github.com/coreos/etcd v3.3.22+incompatible => github.com/coreos/etcd v3.3.22+incompatible -## explicit -github.com/coreos/etcd/alarm -github.com/coreos/etcd/auth -github.com/coreos/etcd/auth/authpb -github.com/coreos/etcd/client -github.com/coreos/etcd/clientv3 -github.com/coreos/etcd/clientv3/balancer -github.com/coreos/etcd/clientv3/balancer/connectivity -github.com/coreos/etcd/clientv3/balancer/picker -github.com/coreos/etcd/clientv3/balancer/resolver/endpoint -github.com/coreos/etcd/clientv3/concurrency -github.com/coreos/etcd/clientv3/credentials -github.com/coreos/etcd/compactor -github.com/coreos/etcd/discovery -github.com/coreos/etcd/embed -github.com/coreos/etcd/error -github.com/coreos/etcd/etcdserver -github.com/coreos/etcd/etcdserver/api -github.com/coreos/etcd/etcdserver/api/etcdhttp -github.com/coreos/etcd/etcdserver/api/v2http -github.com/coreos/etcd/etcdserver/api/v2http/httptypes -github.com/coreos/etcd/etcdserver/api/v2v3 -github.com/coreos/etcd/etcdserver/api/v3client -github.com/coreos/etcd/etcdserver/api/v3election -github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb -github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw -github.com/coreos/etcd/etcdserver/api/v3lock -github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb -github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw -github.com/coreos/etcd/etcdserver/api/v3rpc -github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes -github.com/coreos/etcd/etcdserver/auth -github.com/coreos/etcd/etcdserver/etcdserverpb -github.com/coreos/etcd/etcdserver/etcdserverpb/gw -github.com/coreos/etcd/etcdserver/membership -github.com/coreos/etcd/etcdserver/stats -github.com/coreos/etcd/lease -github.com/coreos/etcd/lease/leasehttp -github.com/coreos/etcd/lease/leasepb -github.com/coreos/etcd/mvcc -github.com/coreos/etcd/mvcc/backend -github.com/coreos/etcd/mvcc/mvccpb -github.com/coreos/etcd/pkg/adt -github.com/coreos/etcd/pkg/contention -github.com/coreos/etcd/pkg/cors -github.com/coreos/etcd/pkg/cpuutil -github.com/coreos/etcd/pkg/crc -github.com/coreos/etcd/pkg/debugutil -github.com/coreos/etcd/pkg/fileutil -github.com/coreos/etcd/pkg/httputil -github.com/coreos/etcd/pkg/idutil -github.com/coreos/etcd/pkg/ioutil -github.com/coreos/etcd/pkg/logutil -github.com/coreos/etcd/pkg/netutil -github.com/coreos/etcd/pkg/pathutil -github.com/coreos/etcd/pkg/pbutil -github.com/coreos/etcd/pkg/runtime -github.com/coreos/etcd/pkg/schedule -github.com/coreos/etcd/pkg/srv -github.com/coreos/etcd/pkg/systemd -github.com/coreos/etcd/pkg/tlsutil -github.com/coreos/etcd/pkg/transport -github.com/coreos/etcd/pkg/types -github.com/coreos/etcd/pkg/wait -github.com/coreos/etcd/proxy/grpcproxy/adapter -github.com/coreos/etcd/raft -github.com/coreos/etcd/raft/raftpb -github.com/coreos/etcd/rafthttp -github.com/coreos/etcd/snap -github.com/coreos/etcd/snap/snappb -github.com/coreos/etcd/store -github.com/coreos/etcd/version -github.com/coreos/etcd/wal -github.com/coreos/etcd/wal/walpb # github.com/coreos/go-semver v0.3.0 github.com/coreos/go-semver/semver # github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf @@ -566,6 +490,88 @@ github.com/xeipuuv/gojsonreference github.com/xeipuuv/gojsonschema # github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 github.com/xiang90/probing +# go.etcd.io/bbolt v1.3.3 +## explicit +go.etcd.io/bbolt +# go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 +## explicit +go.etcd.io/etcd/auth +go.etcd.io/etcd/auth/authpb +go.etcd.io/etcd/client +go.etcd.io/etcd/clientv3 +go.etcd.io/etcd/clientv3/balancer +go.etcd.io/etcd/clientv3/balancer/connectivity +go.etcd.io/etcd/clientv3/balancer/picker +go.etcd.io/etcd/clientv3/balancer/resolver/endpoint +go.etcd.io/etcd/clientv3/concurrency +go.etcd.io/etcd/clientv3/credentials +go.etcd.io/etcd/embed +go.etcd.io/etcd/etcdserver +go.etcd.io/etcd/etcdserver/api +go.etcd.io/etcd/etcdserver/api/etcdhttp +go.etcd.io/etcd/etcdserver/api/membership +go.etcd.io/etcd/etcdserver/api/rafthttp +go.etcd.io/etcd/etcdserver/api/snap +go.etcd.io/etcd/etcdserver/api/snap/snappb +go.etcd.io/etcd/etcdserver/api/v2auth +go.etcd.io/etcd/etcdserver/api/v2discovery +go.etcd.io/etcd/etcdserver/api/v2error +go.etcd.io/etcd/etcdserver/api/v2http +go.etcd.io/etcd/etcdserver/api/v2http/httptypes +go.etcd.io/etcd/etcdserver/api/v2stats +go.etcd.io/etcd/etcdserver/api/v2store +go.etcd.io/etcd/etcdserver/api/v2v3 +go.etcd.io/etcd/etcdserver/api/v3alarm +go.etcd.io/etcd/etcdserver/api/v3client +go.etcd.io/etcd/etcdserver/api/v3compactor +go.etcd.io/etcd/etcdserver/api/v3election +go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb +go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw +go.etcd.io/etcd/etcdserver/api/v3lock +go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb +go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw +go.etcd.io/etcd/etcdserver/api/v3rpc +go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes +go.etcd.io/etcd/etcdserver/etcdserverpb +go.etcd.io/etcd/etcdserver/etcdserverpb/gw +go.etcd.io/etcd/lease +go.etcd.io/etcd/lease/leasehttp +go.etcd.io/etcd/lease/leasepb +go.etcd.io/etcd/mvcc +go.etcd.io/etcd/mvcc/backend +go.etcd.io/etcd/mvcc/mvccpb +go.etcd.io/etcd/pkg/adt +go.etcd.io/etcd/pkg/contention +go.etcd.io/etcd/pkg/cpuutil +go.etcd.io/etcd/pkg/crc +go.etcd.io/etcd/pkg/debugutil +go.etcd.io/etcd/pkg/fileutil +go.etcd.io/etcd/pkg/flags +go.etcd.io/etcd/pkg/httputil +go.etcd.io/etcd/pkg/idutil +go.etcd.io/etcd/pkg/ioutil +go.etcd.io/etcd/pkg/logutil +go.etcd.io/etcd/pkg/netutil +go.etcd.io/etcd/pkg/pathutil +go.etcd.io/etcd/pkg/pbutil +go.etcd.io/etcd/pkg/runtime +go.etcd.io/etcd/pkg/schedule +go.etcd.io/etcd/pkg/srv +go.etcd.io/etcd/pkg/systemd +go.etcd.io/etcd/pkg/tlsutil +go.etcd.io/etcd/pkg/traceutil +go.etcd.io/etcd/pkg/transport +go.etcd.io/etcd/pkg/types +go.etcd.io/etcd/pkg/wait +go.etcd.io/etcd/proxy/grpcproxy/adapter +go.etcd.io/etcd/raft +go.etcd.io/etcd/raft/confchange +go.etcd.io/etcd/raft/quorum +go.etcd.io/etcd/raft/raftpb +go.etcd.io/etcd/raft/tracker +go.etcd.io/etcd/version +go.etcd.io/etcd/wal +go.etcd.io/etcd/wal/walpb # go.opencensus.io v0.22.1 ## explicit go.opencensus.io @@ -647,7 +653,6 @@ golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry # golang.org/x/text v0.3.3 -## explicit golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -1088,11 +1093,10 @@ sigs.k8s.io/structured-merge-diff/v3/value # sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml # github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.2+incompatible -# github.com/coreos/bbolt => github.com/coreos/bbolt v1.3.3 -# github.com/coreos/etcd => github.com/coreos/etcd v3.3.22+incompatible +# github.com/coreos/etcd => go.etcd.io/etcd v0.5.0-alpha.5.0.20200824191128-ae9734ed278b # github.com/docker/docker => github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309 -# google.golang.org/grpc => google.golang.org/grpc v1.26.0 # github.com/golang/protobuf => github.com/golang/protobuf v1.3.5 +# google.golang.org/grpc => google.golang.org/grpc v1.26.0 # k8s.io/api => k8s.io/api v0.18.0 # k8s.io/apimachinery => k8s.io/apimachinery v0.18.0 # k8s.io/client-go => k8s.io/client-go v0.18.0